summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/dce_v8_0.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c84
1 files changed, 57 insertions, 27 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index a7decf9..5966166 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -397,15 +397,6 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
- connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
- /* don't try to enable hpd on eDP or LVDS avoid breaking the
- * aux dp channel on imac and help (but not completely fix)
- * https://bugzilla.redhat.com/show_bug.cgi?id=726143
- * also avoid interrupt storms during dpms.
- */
- continue;
- }
switch (amdgpu_connector->hpd.hpd) {
case AMDGPU_HPD_1:
WREG32(mmDC_HPD1_CONTROL, tmp);
@@ -428,6 +419,45 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
default:
break;
}
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ /* don't try to enable hpd on eDP or LVDS avoid breaking the
+ * aux dp channel on imac and help (but not completely fix)
+ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+ * also avoid interrupt storms during dpms.
+ */
+ u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+
+ switch (amdgpu_connector->hpd.hpd) {
+ case AMDGPU_HPD_1:
+ dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_2:
+ dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_3:
+ dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_4:
+ dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_5:
+ dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_6:
+ dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
+ break;
+ default:
+ continue;
+ }
+
+ dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+ dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+ WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ continue;
+ }
+
dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
@@ -1992,7 +2022,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
struct amdgpu_framebuffer *amdgpu_fb;
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
- struct amdgpu_bo *rbo;
+ struct amdgpu_bo *abo;
uint64_t fb_location, tiling_flags;
uint32_t fb_format, fb_pitch_pixels;
u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
@@ -2020,23 +2050,23 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
* just update base pointers
*/
obj = amdgpu_fb->obj;
- rbo = gem_to_amdgpu_bo(obj);
- r = amdgpu_bo_reserve(rbo, false);
+ abo = gem_to_amdgpu_bo(obj);
+ r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
return r;
if (atomic) {
- fb_location = amdgpu_bo_gpu_offset(rbo);
+ fb_location = amdgpu_bo_gpu_offset(abo);
} else {
- r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unreserve(abo);
return -EINVAL;
}
}
- amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
+ amdgpu_bo_unreserve(abo);
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
@@ -2192,12 +2222,12 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
- rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
- r = amdgpu_bo_reserve(rbo, false);
+ abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
return r;
- amdgpu_bo_unpin(rbo);
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unpin(abo);
+ amdgpu_bo_unreserve(abo);
}
/* Bytes per pixel may have changed */
@@ -2669,16 +2699,16 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
if (crtc->primary->fb) {
int r;
struct amdgpu_framebuffer *amdgpu_fb;
- struct amdgpu_bo *rbo;
+ struct amdgpu_bo *abo;
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
- rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
- r = amdgpu_bo_reserve(rbo, false);
+ abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ r = amdgpu_bo_reserve(abo, false);
if (unlikely(r))
- DRM_ERROR("failed to reserve rbo before unpin\n");
+ DRM_ERROR("failed to reserve abo before unpin\n");
else {
- amdgpu_bo_unpin(rbo);
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unpin(abo);
+ amdgpu_bo_unreserve(abo);
}
}
/* disable the GRPH */
OpenPOWER on IntegriCloud