diff options
author | Dave Airlie <airlied@redhat.com> | 2016-07-28 05:51:39 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2016-07-28 05:51:39 +1000 |
commit | 162b20d2f9ef9dfa833cc329b1e8957beb672349 (patch) | |
tree | 5d043eae378bfb89d6bb9b2018c28537aa28387a /drivers | |
parent | c3f8d8645ea56e57cbd35ea20a7655be512efaa9 (diff) | |
parent | 5ef8292925047ad290205af5f3ae13d0a36d774d (diff) | |
download | op-kernel-dev-162b20d2f9ef9dfa833cc329b1e8957beb672349.zip op-kernel-dev-162b20d2f9ef9dfa833cc329b1e8957beb672349.tar.gz |
Merge branch 'drm-next-4.8' of git://people.freedesktop.org/~agd5f/linux into drm-next
A few more patches for 4.8. Mostly bug fixes and some prep work
for iceland powerplay support. I have a couple polaris patches and
Edward's misc cleanups that require a merge with Linus'. I don't know
if you are planning a merge anytime soon.
[airlied: fixed up endian vs 32-bit change in ppatomctrl]
* 'drm-next-4.8' of git://people.freedesktop.org/~agd5f/linux: (26 commits)
drm/amdgpu: comment out unused defaults_bonaire_pro static const structures to fix the build
drm/amdgpu: temporary comment out unused static const structures to fix the build
drm/amdgpu: S3 resume fail on Polaris10
drm/amd/powerplay: add pp_tables_get_response_times function in process pptables
drm/amd/powerplay: fix the incorrect return value
drm/amd/powerplay: add atomctrl_get_voltage_evv function in ppatomctrl
drm/amdgpu: add new definitions into ppsmc.h for iceland
drm/amd/powerplay: add SMU register macro for future use
drm/amdgpu: add ucode_start_address into cgs_firmware_info
drm/amdgpu: no need load microcode at sdma if powerplay is enabled
drm/amdgpu: rename smumgr to smum for dpm
drm/amdgpu: disable GFX PG on CZ/BR/ST
drivers: gpu: drm: amd: powerplay: hwmgr: Remove unused variable
drm/amdgpu: return -ENOSPC when running out of UVD handles
drm/amdgpu: trace need_flush in grab_vm as well
drm/amdgpu: always signal all fences
drm/amdgpu: check flush fence context instead of same ring v2
drm/radeon: support backlight control for UNIPHY3
drm/amdgpu: support backlight control for UNIPHY3
drm/amdgpu: remove usec timeout loop from IB tests
...
Diffstat (limited to 'drivers')
30 files changed, 469 insertions, 360 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 5556ce9..ee95e95 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -752,6 +752,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, if (!adev->pm.fw) { switch (adev->asic_type) { + case CHIP_TOPAZ: + strcpy(fw_name, "amdgpu/topaz_smc.bin"); + break; case CHIP_TONGA: strcpy(fw_name, "amdgpu/tonga_smc.bin"); break; @@ -800,6 +803,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, info->version = adev->pm.fw_version; info->image_size = ucode_size; + info->ucode_start_address = ucode_start_address; info->kptr = (void *)src; } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index d155876..0b109ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -204,16 +204,25 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) if (seq != ring->fence_drv.sync_seq) amdgpu_fence_schedule_fallback(ring); - while (last_seq != seq) { + if (unlikely(seq == last_seq)) + return; + + last_seq &= drv->num_fences_mask; + seq &= drv->num_fences_mask; + + do { struct fence *fence, **ptr; - ptr = &drv->fences[++last_seq & drv->num_fences_mask]; + ++last_seq; + last_seq &= drv->num_fences_mask; + ptr = &drv->fences[last_seq]; /* There is always exactly one thread signaling this fence slot */ fence = rcu_dereference_protected(*ptr, 1); RCU_INIT_POINTER(*ptr, NULL); - BUG_ON(!fence); + if (!fence) + continue; r = fence_signal(fence); if (!r) @@ -222,7 +231,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) BUG(); fence_put(fence); - } + } while (last_seq != seq); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 46c3097..428ebf3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -122,7 +122,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, bool skip_preamble, need_ctx_switch; unsigned patch_offset = ~0; struct amdgpu_vm *vm; - struct fence *hwf; uint64_t ctx; unsigned i; @@ -190,7 +189,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if (ring->funcs->emit_hdp_invalidate) amdgpu_ring_emit_hdp_invalidate(ring); - r = amdgpu_fence_emit(ring, &hwf); + r = amdgpu_fence_emit(ring, f); if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); if (job && job->vm_id) @@ -205,9 +204,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, AMDGPU_FENCE_FLAG_64BIT); } - if (f) - *f = fence_get(hwf); - if (patch_offset != ~0 && ring->funcs->patch_cond_exec) amdgpu_ring_patch_cond_exec(ring, patch_offset); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index aaee0c8..6674d40 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -172,15 +172,13 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job) trace_amdgpu_sched_run_job(job); r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job->sync.last_vm_update, job, &fence); - if (r) { + if (r) DRM_ERROR("Error scheduling IBs (%d)\n", r); - goto err; - } -err: /* if gpu reset, hw fence will be replaced here */ fence_put(job->fence); - job->fence = fence; + job->fence = fence_get(fence); + amdgpu_job_free_resources(job); return fence; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 499803f..0d8d65e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -149,24 +149,26 @@ TRACE_EVENT(amdgpu_sched_run_job, TRACE_EVENT(amdgpu_vm_grab_id, - TP_PROTO(struct amdgpu_vm *vm, int ring, unsigned vmid, - uint64_t pd_addr), - TP_ARGS(vm, ring, vmid, pd_addr), + TP_PROTO(struct amdgpu_vm *vm, int ring, struct amdgpu_job *job), + TP_ARGS(vm, ring, job), TP_STRUCT__entry( __field(struct amdgpu_vm *, vm) __field(u32, ring) __field(u32, vmid) __field(u64, pd_addr) + __field(u32, needs_flush) ), TP_fast_assign( __entry->vm = vm; __entry->ring = ring; - __entry->vmid = vmid; - __entry->pd_addr = pd_addr; + __entry->vmid = job->vm_id; + __entry->pd_addr = job->vm_pd_addr; + __entry->needs_flush = job->vm_needs_flush; ), - TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx", __entry->vm, - __entry->ring, __entry->vmid, __entry->pd_addr) + TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx needs_flush=%u", + __entry->vm, __entry->ring, __entry->vmid, + __entry->pd_addr, __entry->needs_flush) ); TRACE_EVENT(amdgpu_vm_bo_map, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index d9c88d13..a46a64c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -40,7 +40,7 @@ #include "uvd/uvd_4_2_d.h" /* 1 second timeout */ -#define UVD_IDLE_TIMEOUT_MS 1000 +#define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000) /* Polaris10/11 firmware version */ #define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8)) @@ -662,7 +662,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, } DRM_ERROR("No more free UVD handles!\n"); - return -EINVAL; + return -ENOSPC; case 1: /* it's a decode msg, calc buffer sizes */ @@ -968,7 +968,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, if (direct) { r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); - job->fence = f; + job->fence = fence_get(f); if (r) goto err_free; @@ -1114,8 +1114,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) amdgpu_asic_set_uvd_clocks(adev, 0, 0); } } else { - schedule_delayed_work(&adev->uvd.idle_work, - msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); + schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); } } @@ -1123,7 +1122,7 @@ static void amdgpu_uvd_note_usage(struct amdgpu_device *adev) { bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); set_clocks &= schedule_delayed_work(&adev->uvd.idle_work, - msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); + UVD_IDLE_TIMEOUT); if (set_clocks) { if (adev->pm.dpm_enabled) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 875626a..aeeeb72 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -36,7 +36,7 @@ #include "cikd.h" /* 1 second timeout */ -#define VCE_IDLE_TIMEOUT_MS 1000 +#define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000) /* Firmware Names */ #ifdef CONFIG_DRM_AMDGPU_CIK @@ -310,8 +310,7 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work) amdgpu_asic_set_vce_clocks(adev, 0, 0); } } else { - schedule_delayed_work(&adev->vce.idle_work, - msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); + schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT); } } @@ -324,17 +323,12 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work) */ static void amdgpu_vce_note_usage(struct amdgpu_device *adev) { - bool streams_changed = false; bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work); - set_clocks &= schedule_delayed_work(&adev->vce.idle_work, - msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); - if (adev->pm.dpm_enabled) { - /* XXX figure out if the streams changed */ - streams_changed = false; - } + set_clocks &= schedule_delayed_work(&adev->vce.idle_work, + VCE_IDLE_TIMEOUT); - if (set_clocks || streams_changed) { + if (set_clocks) { if (adev->pm.dpm_enabled) { amdgpu_dpm_enable_vce(adev, true); } else { @@ -357,6 +351,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) int i, r; for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { uint32_t handle = atomic_read(&adev->vce.handles[i]); + if (!handle || adev->vce.filp[i] != filp) continue; @@ -437,7 +432,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ib->ptr[i] = 0x0; r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); - job->fence = f; + job->fence = fence_get(f); if (r) goto err; @@ -499,7 +494,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, if (direct) { r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); - job->fence = f; + job->fence = fence_get(f); if (r) goto err; @@ -580,12 +575,10 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, * we we don't have another free session index. */ static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p, - uint32_t handle, bool *allocated) + uint32_t handle, uint32_t *allocated) { unsigned i; - *allocated = false; - /* validate the handle */ for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { if (atomic_read(&p->adev->vce.handles[i]) == handle) { @@ -602,7 +595,7 @@ static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p, if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) { p->adev->vce.filp[i] = p->filp; p->adev->vce.img_size[i] = 0; - *allocated = true; + *allocated |= 1 << i; return i; } } @@ -622,9 +615,9 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) struct amdgpu_ib *ib = &p->job->ibs[ib_idx]; unsigned fb_idx = 0, bs_idx = 0; int session_idx = -1; - bool destroyed = false; - bool created = false; - bool allocated = false; + uint32_t destroyed = 0; + uint32_t created = 0; + uint32_t allocated = 0; uint32_t tmp, handle = 0; uint32_t *size = &tmp; int i, r = 0, idx = 0; @@ -641,30 +634,30 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) goto out; } - if (destroyed) { - DRM_ERROR("No other command allowed after destroy!\n"); - r = -EINVAL; - goto out; - } - switch (cmd) { - case 0x00000001: // session + case 0x00000001: /* session */ handle = amdgpu_get_ib_value(p, ib_idx, idx + 2); session_idx = amdgpu_vce_validate_handle(p, handle, &allocated); - if (session_idx < 0) - return session_idx; + if (session_idx < 0) { + r = session_idx; + goto out; + } size = &p->adev->vce.img_size[session_idx]; break; - case 0x00000002: // task info + case 0x00000002: /* task info */ fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6); bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7); break; - case 0x01000001: // create - created = true; - if (!allocated) { + case 0x01000001: /* create */ + created |= 1 << session_idx; + if (destroyed & (1 << session_idx)) { + destroyed &= ~(1 << session_idx); + allocated |= 1 << session_idx; + + } else if (!(allocated & (1 << session_idx))) { DRM_ERROR("Handle already in use!\n"); r = -EINVAL; goto out; @@ -675,16 +668,16 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) 8 * 3 / 2; break; - case 0x04000001: // config extension - case 0x04000002: // pic control - case 0x04000005: // rate control - case 0x04000007: // motion estimation - case 0x04000008: // rdo - case 0x04000009: // vui - case 0x05000002: // auxiliary buffer + case 0x04000001: /* config extension */ + case 0x04000002: /* pic control */ + case 0x04000005: /* rate control */ + case 0x04000007: /* motion estimation */ + case 0x04000008: /* rdo */ + case 0x04000009: /* vui */ + case 0x05000002: /* auxiliary buffer */ break; - case 0x03000001: // encode + case 0x03000001: /* encode */ r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9, *size, 0); if (r) @@ -696,18 +689,18 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) goto out; break; - case 0x02000001: // destroy - destroyed = true; + case 0x02000001: /* destroy */ + destroyed |= 1 << session_idx; break; - case 0x05000001: // context buffer + case 0x05000001: /* context buffer */ r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, *size * 2, 0); if (r) goto out; break; - case 0x05000004: // video bitstream buffer + case 0x05000004: /* video bitstream buffer */ tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4); r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, tmp, bs_idx); @@ -715,7 +708,7 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) goto out; break; - case 0x05000005: // feedback buffer + case 0x05000005: /* feedback buffer */ r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, 4096, fb_idx); if (r) @@ -737,21 +730,24 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) idx += len / 4; } - if (allocated && !created) { + if (allocated & ~created) { DRM_ERROR("New session without create command!\n"); r = -ENOENT; } out: - if ((!r && destroyed) || (r && allocated)) { - /* - * IB contains a destroy msg or we have allocated an - * handle and got an error, anyway free the handle - */ - for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) - atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0); + if (!r) { + /* No error, free all destroyed handle slots */ + tmp = destroyed; + } else { + /* Error during parsing, free all allocated handle slots */ + tmp = allocated; } + for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) + if (tmp & (1 << i)) + atomic_set(&p->adev->vce.handles[i], 0); + return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 2f8496d..8e642fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -195,6 +195,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_job *job) { struct amdgpu_device *adev = ring->adev; + uint64_t fence_context = adev->fence_context + ring->idx; struct fence *updates = sync->last_vm_update; struct amdgpu_vm_id *id, *idle; struct fence **fences; @@ -254,7 +255,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, i = ring->idx; do { struct fence *flushed; - bool same_ring = ring->idx == i; id = vm->ids[i++]; if (i == AMDGPU_MAX_RINGS) @@ -272,8 +272,11 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, if (job->vm_pd_addr != id->pd_gpu_addr) continue; - if (!same_ring && - (!id->last_flush || !fence_is_signaled(id->last_flush))) + if (!id->last_flush) + continue; + + if (id->last_flush->context != fence_context && + !fence_is_signaled(id->last_flush)) continue; flushed = id->flushed_updates; @@ -294,7 +297,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, job->vm_id = id - adev->vm_manager.ids; job->vm_needs_flush = false; - trace_amdgpu_vm_grab_id(vm, ring->idx, job->vm_id, job->vm_pd_addr); + trace_amdgpu_vm_grab_id(vm, ring->idx, job); mutex_unlock(&adev->vm_manager.lock); return 0; @@ -325,7 +328,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, vm->ids[ring->idx] = id; job->vm_id = id - adev->vm_manager.ids; - trace_amdgpu_vm_grab_id(vm, ring->idx, job->vm_id, job->vm_pd_addr); + trace_amdgpu_vm_grab_id(vm, ring->idx, job); error: mutex_unlock(&adev->vm_manager.lock); diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index 48b6bd6..c32eca2 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -98,6 +98,7 @@ amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encode case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: if (dig->backlight_level == 0) amdgpu_atombios_encoder_setup_dig_transmitter(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 5c33ed8..573bc97 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -86,12 +86,14 @@ static const struct ci_pt_defaults defaults_bonaire_xt = { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } }; +#if 0 static const struct ci_pt_defaults defaults_bonaire_pro = { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062, { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F }, { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB } }; +#endif static const struct ci_pt_defaults defaults_saturn_xt = { diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 46aca16..6507a7e 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -622,7 +622,6 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct fence *f = NULL; - unsigned i; unsigned index; int r; u32 tmp = 0; @@ -644,7 +643,8 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) goto err0; } - ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); + ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, + SDMA_WRITE_SUB_OPCODE_LINEAR, 0); ib.ptr[1] = lower_32_bits(gpu_addr); ib.ptr[2] = upper_32_bits(gpu_addr); ib.ptr[3] = 1; @@ -659,23 +659,15 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); goto err1; } - for (i = 0; i < adev->usec_timeout; i++) { - tmp = le32_to_cpu(adev->wb.wb[index]); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - if (i < adev->usec_timeout) { - DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ring->idx, i); - goto err1; + tmp = le32_to_cpu(adev->wb.wb[index]); + if (tmp == 0xDEADBEEF) { + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); r = -EINVAL; } err1: - fence_put(f); amdgpu_ib_free(adev, &ib, NULL); fence_put(f); err0: diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index f6bd946..1ac5ad0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2112,7 +2112,6 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) struct fence *f = NULL; uint32_t scratch; uint32_t tmp = 0; - unsigned i; int r; r = amdgpu_gfx_scratch_get(adev, &scratch); @@ -2141,16 +2140,9 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); goto err2; } - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(scratch); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - if (i < adev->usec_timeout) { - DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ring->idx, i); - goto err2; + tmp = RREG32(scratch); + if (tmp == 0xDEADBEEF) { + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); } else { DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp); @@ -2158,7 +2150,6 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) } err2: - fence_put(f); amdgpu_ib_free(adev, &ib, NULL); fence_put(f); err1: diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index aabaf2f..d97b962 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -794,7 +794,6 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) struct fence *f = NULL; uint32_t scratch; uint32_t tmp = 0; - unsigned i; int r; r = amdgpu_gfx_scratch_get(adev, &scratch); @@ -823,23 +822,15 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); goto err2; } - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(scratch); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - if (i < adev->usec_timeout) { - DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ring->idx, i); - goto err2; + tmp = RREG32(scratch); + if (tmp == 0xDEADBEEF) { + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); } else { DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp); r = -EINVAL; } err2: - fence_put(f); amdgpu_ib_free(adev, &ib, NULL); fence_put(f); err1: @@ -1729,7 +1720,6 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) RREG32(sec_ded_counter_registers[i]); fail: - fence_put(f); amdgpu_ib_free(adev, &ib, NULL); fence_put(f); diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c index 825ccd6..2f078ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c @@ -24,7 +24,7 @@ #include <linux/firmware.h> #include "drmP.h" #include "amdgpu.h" -#include "iceland_smumgr.h" +#include "iceland_smum.h" MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c index 52ee081..5285712 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c @@ -25,7 +25,7 @@ #include "drmP.h" #include "amdgpu.h" #include "ppsmc.h" -#include "iceland_smumgr.h" +#include "iceland_smum.h" #include "smu_ucode_xfer_vi.h" #include "amdgpu_ucode.h" diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h b/drivers/gpu/drm/amd/amdgpu/iceland_smum.h index 1e0769e..1e0769e 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h +++ b/drivers/gpu/drm/amd/amdgpu/iceland_smum.h diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 5a0e245..a845e88 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -191,6 +191,7 @@ static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev, vid_mapping_table->num_entries = i; } +#if 0 static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = { { 0, 4, 1 }, @@ -289,6 +290,7 @@ static const struct kv_lcac_config_reg cpl_cac_config_reg[] = { { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } }; +#endif static const struct kv_pt_config_reg didt_config_kv[] = { diff --git a/drivers/gpu/drm/amd/amdgpu/ppsmc.h b/drivers/gpu/drm/amd/amdgpu/ppsmc.h index 7837f2e..8463245 100644 --- a/drivers/gpu/drm/amd/amdgpu/ppsmc.h +++ b/drivers/gpu/drm/amd/amdgpu/ppsmc.h @@ -90,7 +90,9 @@ typedef uint8_t PPSMC_Result; #define PPSMC_StartFanControl ((uint8_t)0x5B) #define PPSMC_StopFanControl ((uint8_t)0x5C) #define PPSMC_MSG_NoDisplay ((uint8_t)0x5D) +#define PPSMC_NoDisplay ((uint8_t)0x5D) #define PPSMC_MSG_HasDisplay ((uint8_t)0x5E) +#define PPSMC_HasDisplay ((uint8_t)0x5E) #define PPSMC_MSG_UVDPowerOFF ((uint8_t)0x60) #define PPSMC_MSG_UVDPowerON ((uint8_t)0x61) #define PPSMC_MSG_EnableULV ((uint8_t)0x62) @@ -108,6 +110,7 @@ typedef uint8_t PPSMC_Result; #define PPSMC_MSG_DisableDTE ((uint8_t)0x88) #define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96) #define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97) +#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149) /* CI/KV/KB */ #define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D) @@ -161,6 +164,7 @@ typedef uint8_t PPSMC_Result; #define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190) #define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191) #define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A) +#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205) #define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C) #define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index ac3730a..0111d15 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -567,19 +567,21 @@ static int sdma_v2_4_start(struct amdgpu_device *adev) { int r; - if (!adev->firmware.smu_load) { - r = sdma_v2_4_load_microcode(adev); - if (r) - return r; - } else { - r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, - AMDGPU_UCODE_ID_SDMA0); - if (r) - return -EINVAL; - r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, - AMDGPU_UCODE_ID_SDMA1); - if (r) - return -EINVAL; + if (!adev->pp_enabled) { + if (!adev->firmware.smu_load) { + r = sdma_v2_4_load_microcode(adev); + if (r) + return r; + } else { + r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, + AMDGPU_UCODE_ID_SDMA0); + if (r) + return -EINVAL; + r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, + AMDGPU_UCODE_ID_SDMA1); + if (r) + return -EINVAL; + } } /* halt the engine before programing */ @@ -671,7 +673,6 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct fence *f = NULL; - unsigned i; unsigned index; int r; u32 tmp = 0; @@ -713,23 +714,15 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); goto err1; } - for (i = 0; i < adev->usec_timeout; i++) { - tmp = le32_to_cpu(adev->wb.wb[index]); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - if (i < adev->usec_timeout) { - DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ring->idx, i); - goto err1; + tmp = le32_to_cpu(adev->wb.wb[index]); + if (tmp == 0xDEADBEEF) { + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); r = -EINVAL; } err1: - fence_put(f); amdgpu_ib_free(adev, &ib, NULL); fence_put(f); err0: diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index f00db6f..2b8ce58 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -901,7 +901,6 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct fence *f = NULL; - unsigned i; unsigned index; int r; u32 tmp = 0; @@ -943,22 +942,14 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); goto err1; } - for (i = 0; i < adev->usec_timeout; i++) { - tmp = le32_to_cpu(adev->wb.wb[index]); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - if (i < adev->usec_timeout) { - DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ring->idx, i); - goto err1; + tmp = le32_to_cpu(adev->wb.wb[index]); + if (tmp == 0xDEADBEEF) { + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); r = -EINVAL; } err1: - fence_put(f); amdgpu_ib_free(adev, &ib, NULL); fence_put(f); err0: diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 30e8099..d7b8da4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -43,6 +43,7 @@ #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 +#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 #define VCE_V3_0_FW_SIZE (384 * 1024) #define VCE_V3_0_STACK_SIZE (64 * 1024) @@ -51,6 +52,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); +static int vce_v3_0_wait_for_idle(void *handle); /** * vce_v3_0_ring_get_rptr - get read pointer @@ -205,6 +207,32 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, vce_v3_0_override_vce_clock_gating(adev, false); } +static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev) +{ + int i, j; + uint32_t status = 0; + + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + status = RREG32(mmVCE_STATUS); + if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK) + return 0; + mdelay(10); + } + + DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); + WREG32_P(mmVCE_SOFT_RESET, + VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + mdelay(10); + WREG32_P(mmVCE_SOFT_RESET, 0, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + mdelay(10); + } + + return -ETIMEDOUT; +} + /** * vce_v3_0_start - start VCE block * @@ -215,11 +243,24 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, static int vce_v3_0_start(struct amdgpu_device *adev) { struct amdgpu_ring *ring; - int idx, i, j, r; + int idx, r; + + ring = &adev->vce.ring[0]; + WREG32(mmVCE_RB_RPTR, ring->wptr); + WREG32(mmVCE_RB_WPTR, ring->wptr); + WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); + WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); + + ring = &adev->vce.ring[1]; + WREG32(mmVCE_RB_RPTR2, ring->wptr); + WREG32(mmVCE_RB_WPTR2, ring->wptr); + WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); + WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); + WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); mutex_lock(&adev->grbm_idx_mutex); for (idx = 0; idx < 2; ++idx) { - if (adev->vce.harvest_config & (1 << idx)) continue; @@ -233,48 +274,24 @@ static int vce_v3_0_start(struct amdgpu_device *adev) vce_v3_0_mc_resume(adev, idx); - /* set BUSY flag */ - WREG32_P(mmVCE_STATUS, 1, ~1); + WREG32_P(mmVCE_STATUS, VCE_STATUS__JOB_BUSY_MASK, + ~VCE_STATUS__JOB_BUSY_MASK); + if (adev->asic_type >= CHIP_STONEY) WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001); else WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK); - WREG32_P(mmVCE_SOFT_RESET, - VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - - mdelay(100); - WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - for (i = 0; i < 10; ++i) { - uint32_t status; - for (j = 0; j < 100; ++j) { - status = RREG32(mmVCE_STATUS); - if (status & 2) - break; - mdelay(10); - } - r = 0; - if (status & 2) - break; - - DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); - WREG32_P(mmVCE_SOFT_RESET, - VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - mdelay(10); - WREG32_P(mmVCE_SOFT_RESET, 0, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - mdelay(10); - r = -1; - } + mdelay(100); + + r = vce_v3_0_firmware_loaded(adev); /* clear BUSY flag */ - WREG32_P(mmVCE_STATUS, 0, ~1); + WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); /* Set Clock-Gating off */ if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) @@ -290,19 +307,46 @@ static int vce_v3_0_start(struct amdgpu_device *adev) WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); mutex_unlock(&adev->grbm_idx_mutex); - ring = &adev->vce.ring[0]; - WREG32(mmVCE_RB_RPTR, ring->wptr); - WREG32(mmVCE_RB_WPTR, ring->wptr); - WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); - WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); - WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); + return 0; +} - ring = &adev->vce.ring[1]; - WREG32(mmVCE_RB_RPTR2, ring->wptr); - WREG32(mmVCE_RB_WPTR2, ring->wptr); - WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); - WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); - WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); +static int vce_v3_0_stop(struct amdgpu_device *adev) +{ + int idx; + + mutex_lock(&adev->grbm_idx_mutex); + for (idx = 0; idx < 2; ++idx) { + if (adev->vce.harvest_config & (1 << idx)) + continue; + + if (idx == 0) + WREG32_P(mmGRBM_GFX_INDEX, 0, + ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + else + WREG32_P(mmGRBM_GFX_INDEX, + GRBM_GFX_INDEX__VCE_INSTANCE_MASK, + ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + + if (adev->asic_type >= CHIP_STONEY) + WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); + else + WREG32_P(mmVCE_VCPU_CNTL, 0, + ~VCE_VCPU_CNTL__CLK_EN_MASK); + /* hold on ECPU */ + WREG32_P(mmVCE_SOFT_RESET, + VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + + /* clear BUSY flag */ + WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); + + /* Set Clock-Gating off */ + if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) + vce_v3_0_set_vce_sw_clock_gating(adev, false); + } + + WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + mutex_unlock(&adev->grbm_idx_mutex); return 0; } @@ -441,7 +485,14 @@ static int vce_v3_0_hw_init(void *handle) static int vce_v3_0_hw_fini(void *handle) { - return 0; + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = vce_v3_0_wait_for_idle(handle); + if (r) + return r; + + return vce_v3_0_stop(adev); } static int vce_v3_0_suspend(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index cda7def..03a31c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1249,15 +1249,7 @@ static int vi_common_early_init(void *handle) AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_LS; - /* rev0 hardware doesn't support PG */ adev->pg_flags = 0; - if (adev->rev_id != 0x00) - adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | - AMD_PG_SUPPORT_GFX_SMG | - AMD_PG_SUPPORT_GFX_DMG | - AMD_PG_SUPPORT_CP | - AMD_PG_SUPPORT_RLC_SMU_HS | - AMD_PG_SUPPORT_GFX_PIPELINE; adev->external_rev_id = adev->rev_id + 0x1; break; case CHIP_STONEY: @@ -1276,12 +1268,6 @@ static int vi_common_early_init(void *handle) AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_LS; - adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | - AMD_PG_SUPPORT_GFX_SMG | - AMD_PG_SUPPORT_GFX_DMG | - AMD_PG_SUPPORT_GFX_PIPELINE | - AMD_PG_SUPPORT_CP | - AMD_PG_SUPPORT_RLC_SMU_HS; adev->external_rev_id = adev->rev_id + 0x1; break; default: diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index 0c8c85d..f32af2f 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h @@ -160,6 +160,10 @@ struct cgs_firmware_info { uint16_t feature_version; uint32_t image_size; uint64_t mc_addr; + + /* only for smc firmware */ + uint32_t ucode_start_address; + void *kptr; }; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c index a6c9b42..d1b528b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c @@ -1828,7 +1828,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) { uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min; struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0; + uint8_t i, stretch_amount, volt_offset = 0; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = @@ -1879,11 +1879,8 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6; /* Populate CKS Lookup Table */ - if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) - stretch_amount2 = 0; - else if (stretch_amount == 3 || stretch_amount == 4) - stretch_amount2 = 1; - else { + if (stretch_amount != 1 && stretch_amount != 2 && stretch_amount != 3 && + stretch_amount != 4 && stretch_amount != 5) { phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ClockStretcher); PP_ASSERT_WITH_CODE(false, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c index a64db7b..e2aece3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c @@ -179,13 +179,12 @@ int atomctrl_set_engine_dram_timings_rv770( /* They are both in 10KHz Units. */ engine_clock_parameters.ulTargetEngineClock = - (uint32_t) engine_clock & SET_CLOCK_FREQ_MASK; - engine_clock_parameters.ulTargetEngineClock |= - (COMPUTE_ENGINE_PLL_PARAM << 24); + cpu_to_le32((engine_clock & SET_CLOCK_FREQ_MASK) | + ((COMPUTE_ENGINE_PLL_PARAM << 24))); /* in 10 khz units.*/ engine_clock_parameters.sReserved.ulClock = - (uint32_t) memory_clock & SET_CLOCK_FREQ_MASK; + cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK); return cgs_atom_exec_cmd_table(hwmgr->device, GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings), &engine_clock_parameters); @@ -252,7 +251,7 @@ int atomctrl_get_memory_pll_dividers_si( COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 mpll_parameters; int result; - mpll_parameters.ulClock = (uint32_t) clock_value; + mpll_parameters.ulClock = cpu_to_le32(clock_value); mpll_parameters.ucInputFlag = (uint8_t)((strobe_mode) ? 1 : 0); result = cgs_atom_exec_cmd_table @@ -262,9 +261,9 @@ int atomctrl_get_memory_pll_dividers_si( if (0 == result) { mpll_param->mpll_fb_divider.clk_frac = - mpll_parameters.ulFbDiv.usFbDivFrac; + le16_to_cpu(mpll_parameters.ulFbDiv.usFbDivFrac); mpll_param->mpll_fb_divider.cl_kf = - mpll_parameters.ulFbDiv.usFbDiv; + le16_to_cpu(mpll_parameters.ulFbDiv.usFbDiv); mpll_param->mpll_post_divider = (uint32_t)mpll_parameters.ucPostDiv; mpll_param->vco_mode = @@ -300,7 +299,7 @@ int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr, COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 mpll_parameters; int result; - mpll_parameters.ulClock.ulClock = (uint32_t)clock_value; + mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value); result = cgs_atom_exec_cmd_table(hwmgr->device, GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam), @@ -320,7 +319,7 @@ int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr, COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 pll_parameters; int result; - pll_parameters.ulClock = clock_value; + pll_parameters.ulClock = cpu_to_le32(clock_value); result = cgs_atom_exec_cmd_table (hwmgr->device, @@ -329,7 +328,7 @@ int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr, if (0 == result) { dividers->pll_post_divider = pll_parameters.ucPostDiv; - dividers->real_clock = pll_parameters.ulClock; + dividers->real_clock = le32_to_cpu(pll_parameters.ulClock); } return result; @@ -343,7 +342,7 @@ int atomctrl_get_engine_pll_dividers_vi( COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters; int result; - pll_patameters.ulClock.ulClock = clock_value; + pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value); pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK; result = cgs_atom_exec_cmd_table @@ -355,12 +354,12 @@ int atomctrl_get_engine_pll_dividers_vi( dividers->pll_post_divider = pll_patameters.ulClock.ucPostDiv; dividers->real_clock = - pll_patameters.ulClock.ulClock; + le32_to_cpu(pll_patameters.ulClock.ulClock); dividers->ul_fb_div.ul_fb_div_frac = - pll_patameters.ulFbDiv.usFbDivFrac; + le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac); dividers->ul_fb_div.ul_fb_div = - pll_patameters.ulFbDiv.usFbDiv; + le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv); dividers->uc_pll_ref_div = pll_patameters.ucPllRefDiv; @@ -380,7 +379,7 @@ int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr, COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters; int result; - pll_patameters.ulClock.ulClock = clock_value; + pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value); pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK; result = cgs_atom_exec_cmd_table @@ -412,7 +411,7 @@ int atomctrl_get_dfs_pll_dividers_vi( COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters; int result; - pll_patameters.ulClock.ulClock = clock_value; + pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value); pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK; @@ -425,12 +424,12 @@ int atomctrl_get_dfs_pll_dividers_vi( dividers->pll_post_divider = pll_patameters.ulClock.ucPostDiv; dividers->real_clock = - pll_patameters.ulClock.ulClock; + le32_to_cpu(pll_patameters.ulClock.ulClock); dividers->ul_fb_div.ul_fb_div_frac = - pll_patameters.ulFbDiv.usFbDivFrac; + le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac); dividers->ul_fb_div.ul_fb_div = - pll_patameters.ulFbDiv.usFbDiv; + le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv); dividers->uc_pll_ref_div = pll_patameters.ucPllRefDiv; @@ -519,13 +518,13 @@ int atomctrl_get_voltage_table_v3( for (i = 0; i < voltage_object->asGpioVoltageObj.ucGpioEntryNum; i++) { voltage_table->entries[i].value = - voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue; + le16_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue); voltage_table->entries[i].smio_low = - voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId; + le32_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId); } voltage_table->mask_low = - voltage_object->asGpioVoltageObj.ulGpioMaskVal; + le32_to_cpu(voltage_object->asGpioVoltageObj.ulGpioMaskVal); voltage_table->count = voltage_object->asGpioVoltageObj.ucGpioEntryNum; voltage_table->phase_delay = @@ -592,12 +591,12 @@ bool atomctrl_get_pp_assign_pin( const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment) { - bool bRet = 0; + bool bRet = false; ATOM_GPIO_PIN_LUT *gpio_lookup_table = get_gpio_lookup_table(hwmgr->device); PP_ASSERT_WITH_CODE((NULL != gpio_lookup_table), - "Could not find GPIO lookup Table in BIOS.", return -1); + "Could not find GPIO lookup Table in BIOS.", return false); bRet = atomctrl_lookup_gpio_pin(gpio_lookup_table, pinId, gpio_pin_assignment); @@ -650,8 +649,8 @@ int atomctrl_calculate_voltage_evv_on_sclk( return -1; if (getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 || - (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 && - getASICProfilingInfo->asHeader.ucTableContentRevision < 4)) + (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 && + getASICProfilingInfo->asHeader.ucTableContentRevision < 4)) return -1; /*----------------------------------------------------------- @@ -662,37 +661,37 @@ int atomctrl_calculate_voltage_evv_on_sclk( switch (dpm_level) { case 1: - fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm1); - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM1, 1000); + fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm1)); + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM1), 1000); break; case 2: - fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm2); - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM2, 1000); + fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm2)); + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM2), 1000); break; case 3: - fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm3); - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM3, 1000); + fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm3)); + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM3), 1000); break; case 4: - fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm4); - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM4, 1000); + fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm4)); + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM4), 1000); break; case 5: - fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm5); - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM5, 1000); + fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm5)); + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM5), 1000); break; case 6: - fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm6); - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM6, 1000); + fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm6)); + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM6), 1000); break; case 7: - fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm7); - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM7, 1000); + fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm7)); + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM7), 1000); break; default: printk(KERN_ERR "DPM Level not supported\n"); fPowerDPMx = Convert_ULONG_ToFraction(1); - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM0, 1000); + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM0), 1000); } /*------------------------- @@ -716,9 +715,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( return result; /* Finally, the actual fuse value */ - ul_RO_fused = sOutput_FuseValues.ulEfuseValue; - fMin = GetScaledFraction(sRO_fuse.ulEfuseMin, 1); - fRange = GetScaledFraction(sRO_fuse.ulEfuseEncodeRange, 1); + ul_RO_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); + fMin = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseMin), 1); + fRange = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseEncodeRange), 1); fRO_fused = fDecodeLinearFuse(ul_RO_fused, fMin, fRange, sRO_fuse.ucEfuseLength); sCACm_fuse = getASICProfilingInfo->sCACm; @@ -736,9 +735,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( if (result) return result; - ul_CACm_fused = sOutput_FuseValues.ulEfuseValue; - fMin = GetScaledFraction(sCACm_fuse.ulEfuseMin, 1000); - fRange = GetScaledFraction(sCACm_fuse.ulEfuseEncodeRange, 1000); + ul_CACm_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); + fMin = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseMin), 1000); + fRange = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseEncodeRange), 1000); fCACm_fused = fDecodeLinearFuse(ul_CACm_fused, fMin, fRange, sCACm_fuse.ucEfuseLength); @@ -756,9 +755,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( if (result) return result; - ul_CACb_fused = sOutput_FuseValues.ulEfuseValue; - fMin = GetScaledFraction(sCACb_fuse.ulEfuseMin, 1000); - fRange = GetScaledFraction(sCACb_fuse.ulEfuseEncodeRange, 1000); + ul_CACb_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); + fMin = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseMin), 1000); + fRange = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseEncodeRange), 1000); fCACb_fused = fDecodeLinearFuse(ul_CACb_fused, fMin, fRange, sCACb_fuse.ucEfuseLength); @@ -777,9 +776,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( if (result) return result; - ul_Kt_Beta_fused = sOutput_FuseValues.ulEfuseValue; - fAverage = GetScaledFraction(sKt_Beta_fuse.ulEfuseEncodeAverage, 1000); - fRange = GetScaledFraction(sKt_Beta_fuse.ulEfuseEncodeRange, 1000); + ul_Kt_Beta_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); + fAverage = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeAverage), 1000); + fRange = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeRange), 1000); fKt_Beta_fused = fDecodeLogisticFuse(ul_Kt_Beta_fused, fAverage, fRange, sKt_Beta_fuse.ucEfuseLength); @@ -798,9 +797,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( if (result) return result; - ul_Kv_m_fused = sOutput_FuseValues.ulEfuseValue; - fAverage = GetScaledFraction(sKv_m_fuse.ulEfuseEncodeAverage, 1000); - fRange = GetScaledFraction((sKv_m_fuse.ulEfuseEncodeRange & 0x7fffffff), 1000); + ul_Kv_m_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); + fAverage = GetScaledFraction(le32_to_cpu(sKv_m_fuse.ulEfuseEncodeAverage), 1000); + fRange = GetScaledFraction((le32_to_cpu(sKv_m_fuse.ulEfuseEncodeRange) & 0x7fffffff), 1000); fRange = fMultiply(fRange, ConvertToFraction(-1)); fKv_m_fused = fDecodeLogisticFuse(ul_Kv_m_fused, @@ -820,9 +819,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( if (result) return result; - ul_Kv_b_fused = sOutput_FuseValues.ulEfuseValue; - fAverage = GetScaledFraction(sKv_b_fuse.ulEfuseEncodeAverage, 1000); - fRange = GetScaledFraction(sKv_b_fuse.ulEfuseEncodeRange, 1000); + ul_Kv_b_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); + fAverage = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeAverage), 1000); + fRange = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeRange), 1000); fKv_b_fused = fDecodeLogisticFuse(ul_Kv_b_fused, fAverage, fRange, sKv_b_fuse.ucEfuseLength); @@ -851,9 +850,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( if (result) return result; - ul_FT_Lkg_V0NORM = sOutput_FuseValues.ulEfuseValue; - fLn_MaxDivMin = GetScaledFraction(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin, 10000); - fMin = GetScaledFraction(getASICProfilingInfo->ulLkgEncodeMin, 10000); + ul_FT_Lkg_V0NORM = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); + fLn_MaxDivMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin), 10000); + fMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeMin), 10000); fFT_Lkg_V0NORM = fDecodeLeakageID(ul_FT_Lkg_V0NORM, fLn_MaxDivMin, fMin, getASICProfilingInfo->ucLkgEfuseLength); @@ -863,40 +862,40 @@ int atomctrl_calculate_voltage_evv_on_sclk( * PART 2 - Grabbing all required values *------------------------------------------- */ - fSM_A0 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A0, 1000000), + fSM_A0 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A0), 1000000), ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A0_sign))); - fSM_A1 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A1, 1000000), + fSM_A1 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A1), 1000000), ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A1_sign))); - fSM_A2 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A2, 100000), + fSM_A2 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A2), 100000), ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A2_sign))); - fSM_A3 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A3, 1000000), + fSM_A3 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A3), 1000000), ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A3_sign))); - fSM_A4 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A4, 1000000), + fSM_A4 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A4), 1000000), ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A4_sign))); - fSM_A5 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A5, 1000), + fSM_A5 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A5), 1000), ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A5_sign))); - fSM_A6 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A6, 1000), + fSM_A6 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A6), 1000), ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A6_sign))); - fSM_A7 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A7, 1000), + fSM_A7 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A7), 1000), ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A7_sign))); - fMargin_RO_a = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_a); - fMargin_RO_b = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_b); - fMargin_RO_c = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_c); + fMargin_RO_a = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_a)); + fMargin_RO_b = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_b)); + fMargin_RO_c = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_c)); - fMargin_fixed = ConvertToFraction(getASICProfilingInfo->ulMargin_fixed); + fMargin_fixed = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_fixed)); fMargin_FMAX_mean = GetScaledFraction( - getASICProfilingInfo->ulMargin_Fmax_mean, 10000); + le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_mean), 10000); fMargin_Plat_mean = GetScaledFraction( - getASICProfilingInfo->ulMargin_plat_mean, 10000); + le32_to_cpu(getASICProfilingInfo->ulMargin_plat_mean), 10000); fMargin_FMAX_sigma = GetScaledFraction( - getASICProfilingInfo->ulMargin_Fmax_sigma, 10000); + le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_sigma), 10000); fMargin_Plat_sigma = GetScaledFraction( - getASICProfilingInfo->ulMargin_plat_sigma, 10000); + le32_to_cpu(getASICProfilingInfo->ulMargin_plat_sigma), 10000); fMargin_DC_sigma = GetScaledFraction( - getASICProfilingInfo->ulMargin_DC_sigma, 100); + le32_to_cpu(getASICProfilingInfo->ulMargin_DC_sigma), 100); fMargin_DC_sigma = fDivide(fMargin_DC_sigma, ConvertToFraction(1000)); fCACm_fused = fDivide(fCACm_fused, ConvertToFraction(100)); @@ -908,14 +907,14 @@ int atomctrl_calculate_voltage_evv_on_sclk( fSclk = GetScaledFraction(sclk, 100); fV_max = fDivide(GetScaledFraction( - getASICProfilingInfo->ulMaxVddc, 1000), ConvertToFraction(4)); - fT_prod = GetScaledFraction(getASICProfilingInfo->ulBoardCoreTemp, 10); - fLKG_Factor = GetScaledFraction(getASICProfilingInfo->ulEvvLkgFactor, 100); - fT_FT = GetScaledFraction(getASICProfilingInfo->ulLeakageTemp, 10); + le32_to_cpu(getASICProfilingInfo->ulMaxVddc), 1000), ConvertToFraction(4)); + fT_prod = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulBoardCoreTemp), 10); + fLKG_Factor = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulEvvLkgFactor), 100); + fT_FT = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLeakageTemp), 10); fV_FT = fDivide(GetScaledFraction( - getASICProfilingInfo->ulLeakageVoltage, 1000), ConvertToFraction(4)); + le32_to_cpu(getASICProfilingInfo->ulLeakageVoltage), 1000), ConvertToFraction(4)); fV_min = fDivide(GetScaledFraction( - getASICProfilingInfo->ulMinVddc, 1000), ConvertToFraction(4)); + le32_to_cpu(getASICProfilingInfo->ulMinVddc), 1000), ConvertToFraction(4)); /*----------------------- * PART 3 @@ -925,7 +924,7 @@ int atomctrl_calculate_voltage_evv_on_sclk( fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4, fSclk), fSM_A5)); fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b); fC_Term = fAdd(fMargin_RO_c, - fAdd(fMultiply(fSM_A0,fLkg_FT), + fAdd(fMultiply(fSM_A0, fLkg_FT), fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT, fSclk)), fAdd(fMultiply(fSM_A3, fSclk), fSubtract(fSM_A7, fRO_fused))))); @@ -1063,9 +1062,55 @@ int atomctrl_get_voltage_evv_on_sclk( get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE; get_voltage_info_param_space.usVoltageLevel = - virtual_voltage_Id; + cpu_to_le16(virtual_voltage_Id); get_voltage_info_param_space.ulSCLKFreq = - sclk; + cpu_to_le32(sclk); + + result = cgs_atom_exec_cmd_table(hwmgr->device, + GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), + &get_voltage_info_param_space); + + if (0 != result) + return result; + + *voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) + (&get_voltage_info_param_space))->usVoltageLevel); + + return result; +} + +/** + * atomctrl_get_voltage_evv gets voltage via call to ATOM COMMAND table. + * @param hwmgr input: pointer to hwManager + * @param virtual_voltage_id input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08 + * @param voltage output: real voltage level in unit of mv + */ +int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr, + uint16_t virtual_voltage_id, + uint16_t *voltage) +{ + int result; + int entry_id; + GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space; + + /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */ + for (entry_id = 0; entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count; entry_id++) { + if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].v == virtual_voltage_id) { + /* found */ + break; + } + } + + PP_ASSERT_WITH_CODE(entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count, + "Can't find requested voltage id in vddc_dependency_on_sclk table!", + return -EINVAL; + ); + + get_voltage_info_param_space.ucVoltageType = VOLTAGE_TYPE_VDDC; + get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE; + get_voltage_info_param_space.usVoltageLevel = virtual_voltage_id; + get_voltage_info_param_space.ulSCLKFreq = + cpu_to_le32(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].clk); result = cgs_atom_exec_cmd_table(hwmgr->device, GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), @@ -1074,8 +1119,8 @@ int atomctrl_get_voltage_evv_on_sclk( if (0 != result) return result; - *voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) - (&get_voltage_info_param_space))->usVoltageLevel; + *voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) + (&get_voltage_info_param_space))->usVoltageLevel); return result; } @@ -1165,8 +1210,8 @@ static int asic_internal_ss_get_ss_asignment(struct pp_hwmgr *hwmgr, if (entry_found) { ssEntry->speed_spectrum_percentage = - ssInfo->usSpreadSpectrumPercentage; - ssEntry->speed_spectrum_rate = ssInfo->usSpreadRateInKhz; + le16_to_cpu(ssInfo->usSpreadSpectrumPercentage); + ssEntry->speed_spectrum_rate = le16_to_cpu(ssInfo->usSpreadRateInKhz); if (((GET_DATA_TABLE_MAJOR_REVISION(table) == 2) && (GET_DATA_TABLE_MINOR_REVISION(table) >= 2)) || @@ -1222,7 +1267,7 @@ int atomctrl_read_efuse(void *device, uint16_t start_index, int result; READ_EFUSE_VALUE_PARAMETER efuse_param; - efuse_param.sEfuse.usEfuseIndex = (start_index / 32) * 4; + efuse_param.sEfuse.usEfuseIndex = cpu_to_le16((start_index / 32) * 4); efuse_param.sEfuse.ucBitShift = (uint8_t) (start_index - ((start_index / 32) * 32)); efuse_param.sEfuse.ucBitLength = (uint8_t) @@ -1232,19 +1277,21 @@ int atomctrl_read_efuse(void *device, uint16_t start_index, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), &efuse_param); if (!result) - *efuse = efuse_param.ulEfuseValue & mask; + *efuse = le32_to_cpu(efuse_param.ulEfuseValue) & mask; return result; } int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock, - uint8_t level) + uint8_t level) { DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters; int result; - memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq = memory_clock & SET_CLOCK_FREQ_MASK; - memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag = ADJUST_MC_SETTING_PARAM; + memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq = + cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK); + memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag = + cpu_to_le32(ADJUST_MC_SETTING_PARAM); memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level; result = cgs_atom_exec_cmd_table @@ -1264,8 +1311,8 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_ get_voltage_info_param_space.ucVoltageType = voltage_type; get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE; - get_voltage_info_param_space.usVoltageLevel = virtual_voltage_Id; - get_voltage_info_param_space.ulSCLKFreq = sclk; + get_voltage_info_param_space.usVoltageLevel = cpu_to_le16(virtual_voltage_Id); + get_voltage_info_param_space.ulSCLKFreq = cpu_to_le32(sclk); result = cgs_atom_exec_cmd_table(hwmgr->device, GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), @@ -1274,7 +1321,7 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_ if (0 != result) return result; - *voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel; + *voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel); return result; } @@ -1295,15 +1342,19 @@ int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctr for (i = 0; i < psmu_info->ucSclkEntryNum; i++) { table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting; table->entry[i].ucPostdiv = psmu_info->asSclkFcwRangeEntry[i].ucPostdiv; - table->entry[i].usFcw_pcc = psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc; - table->entry[i].usFcw_trans_upper = psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper; - table->entry[i].usRcw_trans_lower = psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower; + table->entry[i].usFcw_pcc = + le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc); + table->entry[i].usFcw_trans_upper = + le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper); + table->entry[i].usRcw_trans_lower = + le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower); } return 0; } -int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param) +int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, + struct pp_atom_ctrl__avfs_parameters *param) { ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL; @@ -1317,30 +1368,30 @@ int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__a if (!profile) return -1; - param->ulAVFS_meanNsigma_Acontant0 = profile->ulAVFS_meanNsigma_Acontant0; - param->ulAVFS_meanNsigma_Acontant1 = profile->ulAVFS_meanNsigma_Acontant1; - param->ulAVFS_meanNsigma_Acontant2 = profile->ulAVFS_meanNsigma_Acontant2; - param->usAVFS_meanNsigma_DC_tol_sigma = profile->usAVFS_meanNsigma_DC_tol_sigma; - param->usAVFS_meanNsigma_Platform_mean = profile->usAVFS_meanNsigma_Platform_mean; - param->usAVFS_meanNsigma_Platform_sigma = profile->usAVFS_meanNsigma_Platform_sigma; - param->ulGB_VDROOP_TABLE_CKSOFF_a0 = profile->ulGB_VDROOP_TABLE_CKSOFF_a0; - param->ulGB_VDROOP_TABLE_CKSOFF_a1 = profile->ulGB_VDROOP_TABLE_CKSOFF_a1; - param->ulGB_VDROOP_TABLE_CKSOFF_a2 = profile->ulGB_VDROOP_TABLE_CKSOFF_a2; - param->ulGB_VDROOP_TABLE_CKSON_a0 = profile->ulGB_VDROOP_TABLE_CKSON_a0; - param->ulGB_VDROOP_TABLE_CKSON_a1 = profile->ulGB_VDROOP_TABLE_CKSON_a1; - param->ulGB_VDROOP_TABLE_CKSON_a2 = profile->ulGB_VDROOP_TABLE_CKSON_a2; - param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1; - param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2; - param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b; - param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSON_m1; - param->usAVFSGB_FUSE_TABLE_CKSON_m2 = profile->usAVFSGB_FUSE_TABLE_CKSON_m2; - param->ulAVFSGB_FUSE_TABLE_CKSON_b = profile->ulAVFSGB_FUSE_TABLE_CKSON_b; - param->usMaxVoltage_0_25mv = profile->usMaxVoltage_0_25mv; + param->ulAVFS_meanNsigma_Acontant0 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant0); + param->ulAVFS_meanNsigma_Acontant1 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant1); + param->ulAVFS_meanNsigma_Acontant2 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant2); + param->usAVFS_meanNsigma_DC_tol_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_DC_tol_sigma); + param->usAVFS_meanNsigma_Platform_mean = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_mean); + param->usAVFS_meanNsigma_Platform_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_sigma); + param->ulGB_VDROOP_TABLE_CKSOFF_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a0); + param->ulGB_VDROOP_TABLE_CKSOFF_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a1); + param->ulGB_VDROOP_TABLE_CKSOFF_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a2); + param->ulGB_VDROOP_TABLE_CKSON_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a0); + param->ulGB_VDROOP_TABLE_CKSON_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a1); + param->ulGB_VDROOP_TABLE_CKSON_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a2); + param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1); + param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2); + param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b); + param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_m1); + param->usAVFSGB_FUSE_TABLE_CKSON_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSON_m2); + param->ulAVFSGB_FUSE_TABLE_CKSON_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_b); + param->usMaxVoltage_0_25mv = le16_to_cpu(profile->usMaxVoltage_0_25mv); param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF; param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON; param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF; param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON; - param->usPSM_Age_ComFactor = profile->usPSM_Age_ComFactor; + param->usPSM_Age_ComFactor = le16_to_cpu(profile->usPSM_Age_ComFactor); param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage; return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h index 1e35a96..fc898af 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h @@ -281,6 +281,7 @@ struct pp_atom_ctrl__avfs_parameters { extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment); extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage); +extern int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr, uint16_t virtual_voltage_id, uint16_t *voltage); extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr); extern int atomctrl_get_memory_clock_spread_spectrum(struct pp_hwmgr *hwmgr, const uint32_t memory_clock, pp_atomctrl_internal_ss_info *ssInfo); extern int atomctrl_get_engine_clock_spread_spectrum(struct pp_hwmgr *hwmgr, const uint32_t engine_clock, pp_atomctrl_internal_ss_info *ssInfo); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c index 35bc8a2..6c321b0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c @@ -810,6 +810,19 @@ static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table( return (const ATOM_PPLIB_POWERPLAYTABLE *)table_addr; } +int pp_tables_get_response_times(struct pp_hwmgr *hwmgr, + uint32_t *vol_rep_time, uint32_t *bb_rep_time) +{ + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_tab = get_powerplay_table(hwmgr); + + PP_ASSERT_WITH_CODE(NULL != powerplay_tab, + "Missing PowerPlay Table!", return -EINVAL); + + *vol_rep_time = (uint32_t)le16_to_cpu(powerplay_tab->usVoltageTime); + *bb_rep_time = (uint32_t)le16_to_cpu(powerplay_tab->usBackbiasTime); + + return 0; +} int pp_tables_get_num_of_entries(struct pp_hwmgr *hwmgr, unsigned long *num_of_entries) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h index 3043480..baddaa7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h @@ -32,16 +32,19 @@ struct pp_hw_power_state; extern const struct pp_table_func pptable_funcs; typedef int (*pp_tables_hw_clock_info_callback)(struct pp_hwmgr *hwmgr, - struct pp_hw_power_state *hw_ps, - unsigned int index, - const void *clock_info); + struct pp_hw_power_state *hw_ps, + unsigned int index, + const void *clock_info); int pp_tables_get_num_of_entries(struct pp_hwmgr *hwmgr, - unsigned long *num_of_entries); + unsigned long *num_of_entries); int pp_tables_get_entry(struct pp_hwmgr *hwmgr, - unsigned long entry_index, - struct pp_power_state *ps, - pp_tables_hw_clock_info_callback func); + unsigned long entry_index, + struct pp_power_state *ps, + pp_tables_hw_clock_info_callback func); + +int pp_tables_get_response_times(struct pp_hwmgr *hwmgr, + uint32_t *vol_rep_time, uint32_t *bb_rep_time); #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index fc9e3d1..3c235f0 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -131,6 +131,12 @@ extern int smu_free_memory(void *device, void *handle); smum_wait_on_indirect_register(smumgr, \ mm##port##_INDEX, index, value, mask) +#define SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, value, mask) \ + SMUM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, port, ix##reg, value, mask) + +#define SMUM_WAIT_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \ + SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ + SMUM_FIELD_MASK(reg, field) ) #define SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \ index, value, mask) \ @@ -158,6 +164,10 @@ extern int smu_free_memory(void *device, void *handle); (SMUM_FIELD_MASK(reg, field) & ((field_val) << \ SMUM_FIELD_SHIFT(reg, field)))) +#define SMUM_READ_INDIRECT_FIELD(device, port, reg, field) \ + SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ + reg, field) + #define SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, \ port, index, value, mask) \ smum_wait_on_indirect_register(smumgr, \ @@ -191,6 +201,13 @@ extern int smu_free_memory(void *device, void *handle); SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ reg, field, fieldval)) + +#define SMUM_WRITE_INDIRECT_FIELD(device, port, reg, field, fieldval) \ + cgs_write_ind_register(device, port, ix##reg, \ + SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ + reg, field, fieldval)) + + #define SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \ SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, port, reg, \ (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ @@ -200,4 +217,16 @@ extern int smu_free_memory(void *device, void *handle); SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, \ (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ SMUM_FIELD_MASK(reg, field)) + +#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, index, value, mask) \ + smum_wait_for_indirect_register_unequal(smumgr, \ + mm##port##_INDEX, index, value, mask) + +#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, value, mask) \ + SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, ix##reg, value, mask) + +#define SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, port, reg, field, fieldval) \ + SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ + SMUM_FIELD_MASK(reg, field) ) + #endif diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 587cae4..56bb758 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -120,6 +120,7 @@ atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level) case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: if (dig->backlight_level == 0) atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); else { |