diff options
Diffstat (limited to 'drivers/gpu')
81 files changed, 946 insertions, 819 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 29d6d84..41e41f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -83,6 +83,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, } break; } + + if (!(*out_ring && (*out_ring)->adev)) { + DRM_ERROR("Ring %d is not initialized on IP %d\n", + ring, ip_type); + return -EINVAL; + } + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 9999dc7..ccb5e02 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2512,6 +2512,8 @@ static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc, WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); + WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, + ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); return 0; } @@ -2537,7 +2539,6 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, int32_t hot_y) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; struct drm_gem_object *obj; struct amdgpu_bo *aobj; int ret; @@ -2578,7 +2579,9 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, dce_v10_0_lock_cursor(crtc, true); - if (hot_x != amdgpu_crtc->cursor_hot_x || + if (width != amdgpu_crtc->cursor_width || + height != amdgpu_crtc->cursor_height || + hot_x != amdgpu_crtc->cursor_hot_x || hot_y != amdgpu_crtc->cursor_hot_y) { int x, y; @@ -2587,16 +2590,10 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, dce_v10_0_cursor_move_locked(crtc, x, y); - amdgpu_crtc->cursor_hot_x = hot_x; - amdgpu_crtc->cursor_hot_y = hot_y; - } - - if (width != amdgpu_crtc->cursor_width || - height != amdgpu_crtc->cursor_height) { - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, - (width - 1) << 16 | (height - 1)); amdgpu_crtc->cursor_width = width; amdgpu_crtc->cursor_height = height; + amdgpu_crtc->cursor_hot_x = hot_x; + amdgpu_crtc->cursor_hot_y = hot_y; } dce_v10_0_show_cursor(crtc); @@ -2620,7 +2617,6 @@ unpin: static void dce_v10_0_cursor_reset(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; if (amdgpu_crtc->cursor_bo) { dce_v10_0_lock_cursor(crtc, true); @@ -2628,10 +2624,6 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc) dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, amdgpu_crtc->cursor_y); - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, - (amdgpu_crtc->cursor_width - 1) << 16 | - (amdgpu_crtc->cursor_height - 1)); - dce_v10_0_show_cursor(crtc); dce_v10_0_lock_cursor(crtc, false); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 2006abb..a7af5b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -2532,6 +2532,8 @@ static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc, WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); + WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, + ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); return 0; } @@ -2557,7 +2559,6 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, int32_t hot_y) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; struct drm_gem_object *obj; struct amdgpu_bo *aobj; int ret; @@ -2598,7 +2599,9 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, dce_v11_0_lock_cursor(crtc, true); - if (hot_x != amdgpu_crtc->cursor_hot_x || + if (width != amdgpu_crtc->cursor_width || + height != amdgpu_crtc->cursor_height || + hot_x != amdgpu_crtc->cursor_hot_x || hot_y != amdgpu_crtc->cursor_hot_y) { int x, y; @@ -2607,16 +2610,10 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, dce_v11_0_cursor_move_locked(crtc, x, y); - amdgpu_crtc->cursor_hot_x = hot_x; - amdgpu_crtc->cursor_hot_y = hot_y; - } - - if (width != amdgpu_crtc->cursor_width || - height != amdgpu_crtc->cursor_height) { - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, - (width - 1) << 16 | (height - 1)); amdgpu_crtc->cursor_width = width; amdgpu_crtc->cursor_height = height; + amdgpu_crtc->cursor_hot_x = hot_x; + amdgpu_crtc->cursor_hot_y = hot_y; } dce_v11_0_show_cursor(crtc); @@ -2640,7 +2637,6 @@ unpin: static void dce_v11_0_cursor_reset(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; if (amdgpu_crtc->cursor_bo) { dce_v11_0_lock_cursor(crtc, true); @@ -2648,10 +2644,6 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc) dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, amdgpu_crtc->cursor_y); - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, - (amdgpu_crtc->cursor_width - 1) << 16 | - (amdgpu_crtc->cursor_height - 1)); - dce_v11_0_show_cursor(crtc); dce_v11_0_lock_cursor(crtc, false); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index b4e4ec6..39df6a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -1859,6 +1859,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc, struct amdgpu_device *adev = crtc->dev->dev_private; int xorigin = 0, yorigin = 0; + int w = amdgpu_crtc->cursor_width; + amdgpu_crtc->cursor_x = x; amdgpu_crtc->cursor_y = y; @@ -1878,6 +1880,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc, WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); + WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, + ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); return 0; } @@ -1903,7 +1907,6 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, int32_t hot_y) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; struct drm_gem_object *obj; struct amdgpu_bo *aobj; int ret; @@ -1944,7 +1947,9 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, dce_v6_0_lock_cursor(crtc, true); - if (hot_x != amdgpu_crtc->cursor_hot_x || + if (width != amdgpu_crtc->cursor_width || + height != amdgpu_crtc->cursor_height || + hot_x != amdgpu_crtc->cursor_hot_x || hot_y != amdgpu_crtc->cursor_hot_y) { int x, y; @@ -1953,16 +1958,10 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, dce_v6_0_cursor_move_locked(crtc, x, y); - amdgpu_crtc->cursor_hot_x = hot_x; - amdgpu_crtc->cursor_hot_y = hot_y; - } - - if (width != amdgpu_crtc->cursor_width || - height != amdgpu_crtc->cursor_height) { - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, - (width - 1) << 16 | (height - 1)); amdgpu_crtc->cursor_width = width; amdgpu_crtc->cursor_height = height; + amdgpu_crtc->cursor_hot_x = hot_x; + amdgpu_crtc->cursor_hot_y = hot_y; } dce_v6_0_show_cursor(crtc); @@ -1986,7 +1985,6 @@ unpin: static void dce_v6_0_cursor_reset(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; if (amdgpu_crtc->cursor_bo) { dce_v6_0_lock_cursor(crtc, true); @@ -1994,10 +1992,6 @@ static void dce_v6_0_cursor_reset(struct drm_crtc *crtc) dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, amdgpu_crtc->cursor_y); - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, - (amdgpu_crtc->cursor_width - 1) << 16 | - (amdgpu_crtc->cursor_height - 1)); - dce_v6_0_show_cursor(crtc); dce_v6_0_lock_cursor(crtc, false); } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 584abe8..28102bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2363,6 +2363,8 @@ static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc, WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); + WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, + ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); return 0; } @@ -2388,7 +2390,6 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, int32_t hot_y) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; struct drm_gem_object *obj; struct amdgpu_bo *aobj; int ret; @@ -2429,7 +2430,9 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, dce_v8_0_lock_cursor(crtc, true); - if (hot_x != amdgpu_crtc->cursor_hot_x || + if (width != amdgpu_crtc->cursor_width || + height != amdgpu_crtc->cursor_height || + hot_x != amdgpu_crtc->cursor_hot_x || hot_y != amdgpu_crtc->cursor_hot_y) { int x, y; @@ -2438,16 +2441,10 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, dce_v8_0_cursor_move_locked(crtc, x, y); - amdgpu_crtc->cursor_hot_x = hot_x; - amdgpu_crtc->cursor_hot_y = hot_y; - } - - if (width != amdgpu_crtc->cursor_width || - height != amdgpu_crtc->cursor_height) { - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, - (width - 1) << 16 | (height - 1)); amdgpu_crtc->cursor_width = width; amdgpu_crtc->cursor_height = height; + amdgpu_crtc->cursor_hot_x = hot_x; + amdgpu_crtc->cursor_hot_y = hot_y; } dce_v8_0_show_cursor(crtc); @@ -2471,7 +2468,6 @@ unpin: static void dce_v8_0_cursor_reset(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; if (amdgpu_crtc->cursor_bo) { dce_v8_0_lock_cursor(crtc, true); @@ -2479,10 +2475,6 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc) dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, amdgpu_crtc->cursor_y); - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, - (amdgpu_crtc->cursor_width - 1) << 16 | - (amdgpu_crtc->cursor_height - 1)); - dce_v8_0_show_cursor(crtc); dce_v8_0_lock_cursor(crtc, false); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 762f8e8..e9a1768 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -627,11 +627,8 @@ static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs = static void dce_virtual_encoder_destroy(struct drm_encoder *encoder) { - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - - kfree(amdgpu_encoder->enc_priv); drm_encoder_cleanup(encoder); - kfree(amdgpu_encoder); + kfree(encoder); } static const struct drm_encoder_funcs dce_virtual_encoder_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 45a573e..0635829 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -44,6 +44,7 @@ MODULE_FIRMWARE("radeon/tahiti_mc.bin"); MODULE_FIRMWARE("radeon/pitcairn_mc.bin"); MODULE_FIRMWARE("radeon/verde_mc.bin"); MODULE_FIRMWARE("radeon/oland_mc.bin"); +MODULE_FIRMWARE("radeon/si58_mc.bin"); #define MC_SEQ_MISC0__MT__MASK 0xf0000000 #define MC_SEQ_MISC0__MT__GDDR1 0x10000000 @@ -113,6 +114,7 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev) const char *chip_name; char fw_name[30]; int err; + bool is_58_fw = false; DRM_DEBUG("\n"); @@ -135,7 +137,14 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev) default: BUG(); } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); + /* this memory configuration requires special firmware */ + if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58) + is_58_fw = true; + + if (is_58_fw) + snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin"); + else + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); err = request_firmware(&adev->mc.fw, fw_name, adev->dev); if (err) goto out; @@ -245,6 +254,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) } WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); + if (adev->mode_info.num_crtc) + amdgpu_display_set_vga_render_state(adev, false); + gmc_v6_0_mc_stop(adev, &save); if (gmc_v6_0_wait_for_idle((void *)adev)) { @@ -274,7 +286,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } gmc_v6_0_mc_resume(adev, &save); - amdgpu_display_set_vga_render_state(adev, false); } static int gmc_v6_0_mc_init(struct amdgpu_device *adev) @@ -463,19 +474,11 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) WREG32(mmVM_CONTEXT1_CNTL, VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK | (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) | - ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT) | - VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | - VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | - VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | - VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | - VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | - VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK); + ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT)); + if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) + gmc_v6_0_set_fault_enable_default(adev, false); + else + gmc_v6_0_set_fault_enable_default(adev, true); gmc_v6_0_gart_flush_gpu_tlb(adev, 0); dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", @@ -754,7 +757,10 @@ static int gmc_v6_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); + if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) + return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); + else + return 0; } static int gmc_v6_0_sw_init(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index 10bedfa..6e150db 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -64,6 +64,7 @@ MODULE_FIRMWARE("radeon/oland_smc.bin"); MODULE_FIRMWARE("radeon/oland_k_smc.bin"); MODULE_FIRMWARE("radeon/hainan_smc.bin"); MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); +MODULE_FIRMWARE("radeon/banks_k_2_smc.bin"); union power_info { struct _ATOM_POWERPLAY_INFO info; @@ -3487,17 +3488,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, (adev->pdev->device == 0x6817) || (adev->pdev->device == 0x6806)) max_mclk = 120000; - } else if (adev->asic_type == CHIP_OLAND) { - if ((adev->pdev->revision == 0xC7) || - (adev->pdev->revision == 0x80) || - (adev->pdev->revision == 0x81) || - (adev->pdev->revision == 0x83) || - (adev->pdev->revision == 0x87) || - (adev->pdev->device == 0x6604) || - (adev->pdev->device == 0x6605)) { - max_sclk = 75000; - max_mclk = 80000; - } } else if (adev->asic_type == CHIP_HAINAN) { if ((adev->pdev->revision == 0x81) || (adev->pdev->revision == 0x83) || @@ -3506,7 +3496,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, (adev->pdev->device == 0x6665) || (adev->pdev->device == 0x6667)) { max_sclk = 75000; - max_mclk = 80000; } } /* Apply dpm quirks */ @@ -7713,10 +7702,11 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev) ((adev->pdev->device == 0x6660) || (adev->pdev->device == 0x6663) || (adev->pdev->device == 0x6665) || - (adev->pdev->device == 0x6667))) || - ((adev->pdev->revision == 0xc3) && - (adev->pdev->device == 0x6665))) + (adev->pdev->device == 0x6667)))) chip_name = "hainan_k"; + else if ((adev->pdev->revision == 0xc3) && + (adev->pdev->device == 0x6665)) + chip_name = "banks_k_2"; else chip_name = "hainan"; break; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 96444e4..7fb9137 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -40,13 +40,14 @@ #include "smu/smu_7_0_1_sh_mask.h" static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); -static void uvd_v4_2_init_cg(struct amdgpu_device *adev); static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev); static int uvd_v4_2_start(struct amdgpu_device *adev); static void uvd_v4_2_stop(struct amdgpu_device *adev); static int uvd_v4_2_set_clockgating_state(void *handle, enum amd_clockgating_state state); +static void uvd_v4_2_set_dcm(struct amdgpu_device *adev, + bool sw_mode); /** * uvd_v4_2_ring_get_rptr - get read pointer * @@ -140,7 +141,8 @@ static int uvd_v4_2_sw_fini(void *handle) return r; } - +static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, + bool enable); /** * uvd_v4_2_hw_init - start and test UVD block * @@ -155,8 +157,7 @@ static int uvd_v4_2_hw_init(void *handle) uint32_t tmp; int r; - uvd_v4_2_init_cg(adev); - uvd_v4_2_set_clockgating_state(adev, AMD_CG_STATE_GATE); + uvd_v4_2_enable_mgcg(adev, true); amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); r = uvd_v4_2_start(adev); if (r) @@ -266,11 +267,13 @@ static int uvd_v4_2_start(struct amdgpu_device *adev) struct amdgpu_ring *ring = &adev->uvd.ring; uint32_t rb_bufsz; int i, j, r; - /* disable byte swapping */ u32 lmi_swap_cntl = 0; u32 mp_swap_cntl = 0; + WREG32(mmUVD_CGC_GATE, 0); + uvd_v4_2_set_dcm(adev, true); + uvd_v4_2_mc_resume(adev); /* disable interupt */ @@ -406,6 +409,8 @@ static void uvd_v4_2_stop(struct amdgpu_device *adev) /* Unstall UMC and register bus */ WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); + + uvd_v4_2_set_dcm(adev, false); } /** @@ -619,19 +624,6 @@ static void uvd_v4_2_set_dcm(struct amdgpu_device *adev, WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2); } -static void uvd_v4_2_init_cg(struct amdgpu_device *adev) -{ - bool hw_mode = true; - - if (hw_mode) { - uvd_v4_2_set_dcm(adev, false); - } else { - u32 tmp = RREG32(mmUVD_CGC_CTRL); - tmp &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; - WREG32(mmUVD_CGC_CTRL, tmp); - } -} - static bool uvd_v4_2_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -685,17 +677,6 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev, static int uvd_v4_2_set_clockgating_state(void *handle, enum amd_clockgating_state state) { - bool gate = false; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) - return 0; - - if (state == AMD_CG_STATE_GATE) - gate = true; - - uvd_v4_2_enable_mgcg(adev, gate); - return 0; } @@ -711,9 +692,6 @@ static int uvd_v4_2_set_powergating_state(void *handle, */ struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) - return 0; - if (state == AMD_PG_STATE_GATE) { uvd_v4_2_stop(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 5fb0b7f..37ca685 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -43,9 +43,13 @@ #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 +#define GRBM_GFX_INDEX__VCE_ALL_PIPE 0x07 + #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 +#define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000 + #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 #define VCE_V3_0_FW_SIZE (384 * 1024) @@ -54,6 +58,9 @@ #define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8)) +#define GET_VCE_INSTANCE(i) ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \ + | GRBM_GFX_INDEX__VCE_ALL_PIPE) + static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); @@ -175,7 +182,7 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, WREG32(mmVCE_UENC_CLOCK_GATING_2, data); data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); - data &= ~0xffc00000; + data &= ~0x3ff; WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); @@ -249,7 +256,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev) if (adev->vce.harvest_config & (1 << idx)) continue; - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); vce_v3_0_mc_resume(adev, idx); WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); @@ -273,7 +280,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev) } } - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); mutex_unlock(&adev->grbm_idx_mutex); return 0; @@ -288,7 +295,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev) if (adev->vce.harvest_config & (1 << idx)) continue; - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); if (adev->asic_type >= CHIP_STONEY) WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); @@ -306,7 +313,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev) vce_v3_0_set_vce_sw_clock_gating(adev, false); } - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); mutex_unlock(&adev->grbm_idx_mutex); return 0; @@ -586,17 +593,17 @@ static bool vce_v3_0_check_soft_reset(void *handle) * VCE team suggest use bit 3--bit 6 for busy status check */ mutex_lock(&adev->grbm_idx_mutex); - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); } - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); } - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); mutex_unlock(&adev->grbm_idx_mutex); if (srbm_soft_reset) { @@ -734,7 +741,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, if (adev->vce.harvest_config & (1 << i)) continue; - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i)); if (enable) { /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ @@ -753,7 +760,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, vce_v3_0_set_vce_sw_clock_gating(adev, enable); } - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); mutex_unlock(&adev->grbm_idx_mutex); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c index b0c63c5..6bb79c9 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c @@ -200,7 +200,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) cgs_set_clockgating_state( hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_UNGATE); + AMD_CG_STATE_GATE); cgs_set_powergating_state( hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, @@ -218,7 +218,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) cgs_set_clockgating_state( hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_GATE); + AMD_PG_STATE_UNGATE); cz_dpm_update_vce_dpm(hwmgr); cz_enable_disable_vce_dpm(hwmgr, true); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 4b14f25..0fb4e8c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -1402,14 +1402,22 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr) cz_hwmgr->vce_dpm.hard_min_clk, PPSMC_MSG_SetEclkHardMin)); } else { - /*EPR# 419220 -HW limitation to to */ - cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetEclkHardMin, - cz_get_eclk_level(hwmgr, - cz_hwmgr->vce_dpm.hard_min_clk, - PPSMC_MSG_SetEclkHardMin)); - + /*Program HardMin based on the vce_arbiter.ecclk */ + if (hwmgr->vce_arbiter.ecclk == 0) { + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetEclkHardMin, 0); + /* disable ECLK DPM 0. Otherwise VCE could hang if + * switching SCLK from DPM 0 to 6/7 */ + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetEclkSoftMin, 1); + } else { + cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk; + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetEclkHardMin, + cz_get_eclk_level(hwmgr, + cz_hwmgr->vce_dpm.hard_min_clk, + PPSMC_MSG_SetEclkHardMin)); + } } return 0; } diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 908011d..7abda94 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -113,6 +113,7 @@ struct ast_private { struct ttm_bo_kmap_obj cache_kmap; int next_cursor; bool support_wide_screen; + bool DisableP2A; enum ast_tx_chip tx_chip_type; u8 dp501_maxclk; diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index f75c642..533e762 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -124,6 +124,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) } else *need_post = false; + /* Check P2A Access */ + ast->DisableP2A = true; + data = ast_read32(ast, 0xf004); + if (data != 0xFFFFFFFF) + ast->DisableP2A = false; + /* Check if we support wide screen */ switch (ast->chip) { case AST1180: @@ -140,15 +146,17 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) ast->support_wide_screen = true; else { ast->support_wide_screen = false; - /* Read SCU7c (silicon revision register) */ - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - data = ast_read32(ast, 0x1207c); - data &= 0x300; - if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ - ast->support_wide_screen = true; - if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ - ast->support_wide_screen = true; + if (ast->DisableP2A == false) { + /* Read SCU7c (silicon revision register) */ + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + data = ast_read32(ast, 0x1207c); + data &= 0x300; + if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ + ast->support_wide_screen = true; + if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ + ast->support_wide_screen = true; + } } break; } @@ -216,80 +224,81 @@ static int ast_get_dram_info(struct drm_device *dev) uint32_t data, data2; uint32_t denum, num, div, ref_pll; - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - - - ast_write32(ast, 0x10000, 0xfc600309); - - do { - if (pci_channel_offline(dev->pdev)) - return -EIO; - } while (ast_read32(ast, 0x10000) != 0x01); - data = ast_read32(ast, 0x10004); - - if (data & 0x40) + if (ast->DisableP2A) + { ast->dram_bus_width = 16; + ast->dram_type = AST_DRAM_1Gx16; + ast->mclk = 396; + } else - ast->dram_bus_width = 32; + { + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + data = ast_read32(ast, 0x10004); + + if (data & 0x40) + ast->dram_bus_width = 16; + else + ast->dram_bus_width = 32; + + if (ast->chip == AST2300 || ast->chip == AST2400) { + switch (data & 0x03) { + case 0: + ast->dram_type = AST_DRAM_512Mx16; + break; + default: + case 1: + ast->dram_type = AST_DRAM_1Gx16; + break; + case 2: + ast->dram_type = AST_DRAM_2Gx16; + break; + case 3: + ast->dram_type = AST_DRAM_4Gx16; + break; + } + } else { + switch (data & 0x0c) { + case 0: + case 4: + ast->dram_type = AST_DRAM_512Mx16; + break; + case 8: + if (data & 0x40) + ast->dram_type = AST_DRAM_1Gx16; + else + ast->dram_type = AST_DRAM_512Mx32; + break; + case 0xc: + ast->dram_type = AST_DRAM_1Gx32; + break; + } + } - if (ast->chip == AST2300 || ast->chip == AST2400) { - switch (data & 0x03) { - case 0: - ast->dram_type = AST_DRAM_512Mx16; - break; - default: - case 1: - ast->dram_type = AST_DRAM_1Gx16; - break; - case 2: - ast->dram_type = AST_DRAM_2Gx16; - break; + data = ast_read32(ast, 0x10120); + data2 = ast_read32(ast, 0x10170); + if (data2 & 0x2000) + ref_pll = 14318; + else + ref_pll = 12000; + + denum = data & 0x1f; + num = (data & 0x3fe0) >> 5; + data = (data & 0xc000) >> 14; + switch (data) { case 3: - ast->dram_type = AST_DRAM_4Gx16; - break; - } - } else { - switch (data & 0x0c) { - case 0: - case 4: - ast->dram_type = AST_DRAM_512Mx16; + div = 0x4; break; - case 8: - if (data & 0x40) - ast->dram_type = AST_DRAM_1Gx16; - else - ast->dram_type = AST_DRAM_512Mx32; + case 2: + case 1: + div = 0x2; break; - case 0xc: - ast->dram_type = AST_DRAM_1Gx32; + default: + div = 0x1; break; } + ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); } - - data = ast_read32(ast, 0x10120); - data2 = ast_read32(ast, 0x10170); - if (data2 & 0x2000) - ref_pll = 14318; - else - ref_pll = 12000; - - denum = data & 0x1f; - num = (data & 0x3fe0) >> 5; - data = (data & 0xc000) >> 14; - switch (data) { - case 3: - div = 0x4; - break; - case 2: - case 1: - div = 0x2; - break; - default: - div = 0x1; - break; - } - ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); return 0; } diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 810c51d..5331ee1 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -379,12 +379,20 @@ void ast_post_gpu(struct drm_device *dev) ast_open_key(ast); ast_set_def_ext_reg(dev); - if (ast->chip == AST2300 || ast->chip == AST2400) - ast_init_dram_2300(dev); - else - ast_init_dram_reg(dev); + if (ast->DisableP2A == false) + { + if (ast->chip == AST2300 || ast->chip == AST2400) + ast_init_dram_2300(dev); + else + ast_init_dram_reg(dev); - ast_init_3rdtx(dev); + ast_init_3rdtx(dev); + } + else + { + if (ast->tx_chip_type != AST_TX_NONE) + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */ + } } /* AST 2300 DRAM settings */ diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index eb9bf87..18eefdc 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -1382,6 +1382,7 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, pm_runtime_enable(dev); + pm_runtime_get_sync(dev); phy_power_on(dp->phy); analogix_dp_init_dp(dp); @@ -1414,9 +1415,15 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, goto err_disable_pm_runtime; } + phy_power_off(dp->phy); + pm_runtime_put(dev); + return 0; err_disable_pm_runtime: + + phy_power_off(dp->phy); + pm_runtime_put(dev); pm_runtime_disable(dev); return ret; diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig index 04b3c16..7f4cc6e 100644 --- a/drivers/gpu/drm/cirrus/Kconfig +++ b/drivers/gpu/drm/cirrus/Kconfig @@ -7,3 +7,12 @@ config DRM_CIRRUS_QEMU This is a KMS driver for emulated cirrus device in qemu. It is *NOT* intended for real cirrus devices. This requires the modesetting userspace X.org driver. + + Cirrus is obsolete, the hardware was designed in the 90ies + and can't keep up with todays needs. More background: + https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/ + + Better alternatives are: + - stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+) + - qxl (DRM_QXL, qemu -vga qxl, works best with spice) + - virtio (DRM_VIRTIO_GPU), qemu -vga virtio) diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 6069748..fdfb1ec 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -291,15 +291,15 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state, EXPORT_SYMBOL(drm_atomic_get_crtc_state); static void set_out_fence_for_crtc(struct drm_atomic_state *state, - struct drm_crtc *crtc, s64 __user *fence_ptr) + struct drm_crtc *crtc, s32 __user *fence_ptr) { state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr; } -static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state, +static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state, struct drm_crtc *crtc) { - s64 __user *fence_ptr; + s32 __user *fence_ptr; fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr; state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL; @@ -512,7 +512,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc, state->color_mgmt_changed |= replaced; return ret; } else if (property == config->prop_out_fence_ptr) { - s64 __user *fence_ptr = u64_to_user_ptr(val); + s32 __user *fence_ptr = u64_to_user_ptr(val); if (!fence_ptr) return 0; @@ -1915,7 +1915,7 @@ EXPORT_SYMBOL(drm_atomic_clean_old_fb); */ struct drm_out_fence_state { - s64 __user *out_fence_ptr; + s32 __user *out_fence_ptr; struct sync_file *sync_file; int fd; }; @@ -1952,7 +1952,7 @@ static int prepare_crtc_signaling(struct drm_device *dev, return 0; for_each_crtc_in_state(state, crtc, crtc_state, i) { - u64 __user *fence_ptr; + s32 __user *fence_ptr; fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc); @@ -2032,13 +2032,16 @@ static void complete_crtc_signaling(struct drm_device *dev, } for_each_crtc_in_state(state, crtc, crtc_state, i) { + struct drm_pending_vblank_event *event = crtc_state->event; /* - * TEST_ONLY and PAGE_FLIP_EVENT are mutually - * exclusive, if they weren't, this code should be - * called on success for TEST_ONLY too. + * Free the allocated event. drm_atomic_helper_setup_commit + * can allocate an event too, so only free it if it's ours + * to prevent a double free in drm_atomic_state_clear. */ - if (crtc_state->event) - drm_event_cancel_free(dev, &crtc_state->event->base); + if (event && (event->base.fence || event->base.file_priv)) { + drm_event_cancel_free(dev, &event->base); + crtc_state->event = NULL; + } } if (!fence_state) diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 34f757b..4594477 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1666,9 +1666,6 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev, funcs = plane->helper_private; - if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc)) - continue; - if (funcs->prepare_fb) { ret = funcs->prepare_fb(plane, plane_state); if (ret) @@ -1685,9 +1682,6 @@ fail: if (j >= i) continue; - if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc)) - continue; - funcs = plane->helper_private; if (funcs->cleanup_fb) @@ -1954,9 +1948,6 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev, for_each_plane_in_state(old_state, plane, plane_state, i) { const struct drm_plane_helper_funcs *funcs; - if (!drm_atomic_helper_framebuffer_changed(dev, old_state, plane_state->crtc)) - continue; - funcs = plane->helper_private; if (funcs->cleanup_fb) diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 5a45262..7a7019a 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -225,6 +225,7 @@ int drm_connector_init(struct drm_device *dev, INIT_LIST_HEAD(&connector->probed_modes); INIT_LIST_HEAD(&connector->modes); + mutex_init(&connector->mutex); connector->edid_blob_ptr = NULL; connector->status = connector_status_unknown; @@ -359,6 +360,8 @@ void drm_connector_cleanup(struct drm_connector *connector) connector->funcs->atomic_destroy_state(connector, connector->state); + mutex_destroy(&connector->mutex); + memset(connector, 0, sizeof(*connector)); } EXPORT_SYMBOL(drm_connector_cleanup); @@ -374,14 +377,18 @@ EXPORT_SYMBOL(drm_connector_cleanup); */ int drm_connector_register(struct drm_connector *connector) { - int ret; + int ret = 0; - if (connector->registered) + if (!connector->dev->registered) return 0; + mutex_lock(&connector->mutex); + if (connector->registered) + goto unlock; + ret = drm_sysfs_connector_add(connector); if (ret) - return ret; + goto unlock; ret = drm_debugfs_connector_add(connector); if (ret) { @@ -397,12 +404,14 @@ int drm_connector_register(struct drm_connector *connector) drm_mode_object_register(connector->dev, &connector->base); connector->registered = true; - return 0; + goto unlock; err_debugfs: drm_debugfs_connector_remove(connector); err_sysfs: drm_sysfs_connector_remove(connector); +unlock: + mutex_unlock(&connector->mutex); return ret; } EXPORT_SYMBOL(drm_connector_register); @@ -415,8 +424,11 @@ EXPORT_SYMBOL(drm_connector_register); */ void drm_connector_unregister(struct drm_connector *connector) { - if (!connector->registered) + mutex_lock(&connector->mutex); + if (!connector->registered) { + mutex_unlock(&connector->mutex); return; + } if (connector->funcs->early_unregister) connector->funcs->early_unregister(connector); @@ -425,6 +437,7 @@ void drm_connector_unregister(struct drm_connector *connector) drm_debugfs_connector_remove(connector); connector->registered = false; + mutex_unlock(&connector->mutex); } EXPORT_SYMBOL(drm_connector_unregister); diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index a525751..6594b40 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -745,6 +745,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) if (ret) goto err_minors; + dev->registered = true; + if (dev->driver->load) { ret = dev->driver->load(dev, flags); if (ret) @@ -785,6 +787,8 @@ void drm_dev_unregister(struct drm_device *dev) drm_lastclose(dev); + dev->registered = false; + if (drm_core_check_feature(dev, DRIVER_MODESET)) drm_modeset_unregister_all(dev); diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index ac6a352..e6b19bc 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1460,6 +1460,13 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev, return NULL; mode->type |= DRM_MODE_TYPE_USERDEF; + /* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */ + if (cmd->xres == 1366 && mode->hdisplay == 1368) { + mode->hdisplay = 1366; + mode->hsync_start--; + mode->hsync_end--; + drm_mode_set_name(mode); + } drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); return mode; } diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index ac953f0..cf8f012 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -143,8 +143,18 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev) } if (dev->mode_config.delayed_event) { + /* + * FIXME: + * + * Use short (1s) delay to handle the initial delayed event. + * This delay should not be needed, but Optimus/nouveau will + * fail in a mysterious way if the delayed event is handled as + * soon as possible like it is done in + * drm_helper_probe_single_connector_modes() in case the poll + * was enabled before. + */ poll = true; - delay = 0; + delay = HZ; } if (poll) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 169ac96..fe0e85b 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -116,9 +116,14 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu, struct list_head list; bool found; + /* + * XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick + * drm_mm into giving out a low IOVA after address space + * rollover. This needs a proper fix. + */ ret = drm_mm_insert_node_in_range(&mmu->mm, node, size, 0, mmu->last_iova, ~0UL, - DRM_MM_SEARCH_DEFAULT); + mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW); if (ret != -ENOSPC) break; diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 6ca1f31..75eeb83 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -46,7 +46,8 @@ enum decon_flag_bits { BIT_CLKS_ENABLED, BIT_IRQS_ENABLED, BIT_WIN_UPDATED, - BIT_SUSPENDED + BIT_SUSPENDED, + BIT_REQUEST_UPDATE }; struct decon_context { @@ -141,12 +142,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc) m->crtc_vsync_end = m->crtc_vsync_start + 1; } - decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID, 0); - - /* enable clock gate */ - val = CMU_CLKGAGE_MODE_SFR_F | CMU_CLKGAGE_MODE_MEM_F; - writel(val, ctx->addr + DECON_CMU); - if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)) decon_setup_trigger(ctx); @@ -315,6 +310,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc, /* window enable */ decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0); + set_bit(BIT_REQUEST_UPDATE, &ctx->flags); } static void decon_disable_plane(struct exynos_drm_crtc *crtc, @@ -327,6 +323,7 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc, return; decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0); + set_bit(BIT_REQUEST_UPDATE, &ctx->flags); } static void decon_atomic_flush(struct exynos_drm_crtc *crtc) @@ -340,8 +337,8 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc) for (i = ctx->first_win; i < WINDOWS_NR; i++) decon_shadow_protect_win(ctx, i, false); - /* standalone update */ - decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0); + if (test_and_clear_bit(BIT_REQUEST_UPDATE, &ctx->flags)) + decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0); if (ctx->out_type & IFTYPE_I80) set_bit(BIT_WIN_UPDATED, &ctx->flags); diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 0d41ebc..f7bce86 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -37,13 +37,6 @@ #include "i915_drv.h" #include "gvt.h" -#define MB_TO_BYTES(mb) ((mb) << 20ULL) -#define BYTES_TO_MB(b) ((b) >> 20ULL) - -#define HOST_LOW_GM_SIZE MB_TO_BYTES(128) -#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384) -#define HOST_FENCE 4 - static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) { struct intel_gvt *gvt = vgpu->gvt; @@ -165,6 +158,14 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu, POSTING_READ(fence_reg_lo); } +static void _clear_vgpu_fence(struct intel_vgpu *vgpu) +{ + int i; + + for (i = 0; i < vgpu_fence_sz(vgpu); i++) + intel_vgpu_write_fence(vgpu, i, 0); +} + static void free_vgpu_fence(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; @@ -178,9 +179,9 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu) intel_runtime_pm_get(dev_priv); mutex_lock(&dev_priv->drm.struct_mutex); + _clear_vgpu_fence(vgpu); for (i = 0; i < vgpu_fence_sz(vgpu); i++) { reg = vgpu->fence.regs[i]; - intel_vgpu_write_fence(vgpu, i, 0); list_add_tail(®->link, &dev_priv->mm.fence_list); } @@ -208,13 +209,14 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu) continue; list_del(pos); vgpu->fence.regs[i] = reg; - intel_vgpu_write_fence(vgpu, i, 0); if (++i == vgpu_fence_sz(vgpu)) break; } if (i != vgpu_fence_sz(vgpu)) goto out_free_fence; + _clear_vgpu_fence(vgpu); + mutex_unlock(&dev_priv->drm.struct_mutex); intel_runtime_pm_put(dev_priv); return 0; @@ -314,6 +316,22 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu) } /** + * intel_vgpu_reset_resource - reset resource state owned by a vGPU + * @vgpu: a vGPU + * + * This function is used to reset resource state owned by a vGPU. + * + */ +void intel_vgpu_reset_resource(struct intel_vgpu *vgpu) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + + intel_runtime_pm_get(dev_priv); + _clear_vgpu_fence(vgpu); + intel_runtime_pm_put(dev_priv); +} + +/** * intel_alloc_vgpu_resource - allocate HW resource for a vGPU * @vgpu: vGPU * @param: vGPU creation params diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index 711c31c..4a6a2ed 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -282,3 +282,77 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, } return 0; } + +/** + * intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU + * + * @vgpu: a vGPU + * @primary: is the vGPU presented as primary + * + */ +void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, + bool primary) +{ + struct intel_gvt *gvt = vgpu->gvt; + const struct intel_gvt_device_info *info = &gvt->device_info; + u16 *gmch_ctl; + int i; + + memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, + info->cfg_space_size); + + if (!primary) { + vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] = + INTEL_GVT_PCI_CLASS_VGA_OTHER; + vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] = + INTEL_GVT_PCI_CLASS_VGA_OTHER; + } + + /* Show guest that there isn't any stolen memory.*/ + gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL); + *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT); + + intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2, + gvt_aperture_pa_base(gvt), true); + + vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO + | PCI_COMMAND_MEMORY + | PCI_COMMAND_MASTER); + /* + * Clear the bar upper 32bit and let guest to assign the new value + */ + memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); + memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); + memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); + + for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) { + vgpu->cfg_space.bar[i].size = pci_resource_len( + gvt->dev_priv->drm.pdev, i * 2); + vgpu->cfg_space.bar[i].tracked = false; + } +} + +/** + * intel_vgpu_reset_cfg_space - reset vGPU configuration space + * + * @vgpu: a vGPU + * + */ +void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu) +{ + u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND]; + bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] != + INTEL_GVT_PCI_CLASS_VGA_OTHER; + + if (cmd & PCI_COMMAND_MEMORY) { + trap_gttmmio(vgpu, false); + map_aperture(vgpu, false); + } + + /** + * Currently we only do such reset when vGPU is not + * owned by any VM, so we simply restore entire cfg + * space to default value. + */ + intel_vgpu_init_cfg_space(vgpu, primary); +} diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index d26a092..e456398 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -481,7 +481,6 @@ struct parser_exec_state { (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2) static unsigned long bypass_scan_mask = 0; -static bool bypass_batch_buffer_scan = true; /* ring ALL, type = 0 */ static struct sub_op_bits sub_op_mi[] = { @@ -1525,9 +1524,6 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s) { struct intel_gvt *gvt = s->vgpu->gvt; - if (bypass_batch_buffer_scan) - return 0; - if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { /* BDW decides privilege based on address space */ if (cmd_val(s, 0) & (1 << 8)) diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index f32bb6f..3408373 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -364,58 +364,30 @@ static void free_workload(struct intel_vgpu_workload *workload) #define get_desc_from_elsp_dwords(ed, i) \ ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2])) - -#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2)) -#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U)) -static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj, - unsigned long add, int gmadr_bytes) -{ - if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8)) - return -1; - - *((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add & - BATCH_BUFFER_ADDR_MASK; - if (gmadr_bytes == 8) { - *((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) = - add & BATCH_BUFFER_ADDR_HIGH_MASK; - } - - return 0; -} - static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) { - int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; + const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; + struct intel_shadow_bb_entry *entry_obj; /* pin the gem object to ggtt */ - if (!list_empty(&workload->shadow_bb)) { - struct intel_shadow_bb_entry *entry_obj = - list_first_entry(&workload->shadow_bb, - struct intel_shadow_bb_entry, - list); - struct intel_shadow_bb_entry *temp; + list_for_each_entry(entry_obj, &workload->shadow_bb, list) { + struct i915_vma *vma; - list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb, - list) { - struct i915_vma *vma; - - vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, - 4, 0); - if (IS_ERR(vma)) { - gvt_err("Cannot pin\n"); - return; - } - - /* FIXME: we are not tracking our pinned VMA leaving it - * up to the core to fix up the stray pin_count upon - * free. - */ - - /* update the relocate gma with shadow batch buffer*/ - set_gma_to_bb_cmd(entry_obj, - i915_ggtt_offset(vma), - gmadr_bytes); + vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); + if (IS_ERR(vma)) { + gvt_err("Cannot pin\n"); + return; } + + /* FIXME: we are not tracking our pinned VMA leaving it + * up to the core to fix up the stray pin_count upon + * free. + */ + + /* update the relocate gma with shadow batch buffer*/ + entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma); + if (gmadr_bytes == 8) + entry_obj->bb_start_cmd_va[2] = 0; } } @@ -826,7 +798,7 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) INIT_LIST_HEAD(&vgpu->workload_q_head[i]); } - vgpu->workloads = kmem_cache_create("gvt-g vgpu workload", + vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload", sizeof(struct intel_vgpu_workload), 0, SLAB_HWCACHE_ALIGN, NULL); diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 6c5fdf5..47dec4a 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -240,15 +240,8 @@ static inline int get_pse_type(int type) static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index) { void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; - u64 pte; -#ifdef readq - pte = readq(addr); -#else - pte = ioread32(addr); - pte |= (u64)ioread32(addr + 4) << 32; -#endif - return pte; + return readq(addr); } static void write_pte64(struct drm_i915_private *dev_priv, @@ -256,12 +249,8 @@ static void write_pte64(struct drm_i915_private *dev_priv, { void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; -#ifdef writeq writeq(pte, addr); -#else - iowrite32((u32)pte, addr); - iowrite32(pte >> 32, addr + 4); -#endif + I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); POSTING_READ(GFX_FLSH_CNTL_GEN6); } @@ -1380,8 +1369,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm) info->gtt_entry_size; mem = kzalloc(mm->has_shadow_page_table ? mm->page_table_entry_size * 2 - : mm->page_table_entry_size, - GFP_ATOMIC); + : mm->page_table_entry_size, GFP_KERNEL); if (!mem) return -ENOMEM; mm->virtual_page_table = mem; @@ -1532,7 +1520,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm; int ret; - mm = kzalloc(sizeof(*mm), GFP_ATOMIC); + mm = kzalloc(sizeof(*mm), GFP_KERNEL); if (!mm) { ret = -ENOMEM; goto fail; @@ -1886,30 +1874,27 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; int page_entry_num = GTT_PAGE_SIZE >> vgpu->gvt->device_info.gtt_entry_size_shift; - struct page *scratch_pt; + void *scratch_pt; unsigned long mfn; int i; - void *p; if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) return -EINVAL; - scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); + scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); if (!scratch_pt) { gvt_err("fail to allocate scratch page\n"); return -ENOMEM; } - p = kmap_atomic(scratch_pt); - mfn = intel_gvt_hypervisor_virt_to_mfn(p); + mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt); if (mfn == INTEL_GVT_INVALID_ADDR) { - gvt_err("fail to translate vaddr:0x%llx\n", (u64)p); - kunmap_atomic(p); - __free_page(scratch_pt); + gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt); + free_page((unsigned long)scratch_pt); return -EFAULT; } gtt->scratch_pt[type].page_mfn = mfn; - gtt->scratch_pt[type].page = scratch_pt; + gtt->scratch_pt[type].page = virt_to_page(scratch_pt); gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", vgpu->id, type, mfn); @@ -1918,7 +1903,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, * scratch_pt[type] indicate the scratch pt/scratch page used by the * 'type' pt. * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by - * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self + * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. */ if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) { @@ -1936,11 +1921,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, se.val64 |= PPAT_CACHED_INDEX; for (i = 0; i < page_entry_num; i++) - ops->set_entry(p, &se, i, false, 0, vgpu); + ops->set_entry(scratch_pt, &se, i, false, 0, vgpu); } - kunmap_atomic(p); - return 0; } @@ -2208,7 +2191,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, int intel_gvt_init_gtt(struct intel_gvt *gvt) { int ret; - void *page_addr; + void *page; gvt_dbg_core("init gtt\n"); @@ -2221,17 +2204,14 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) return -ENODEV; } - gvt->gtt.scratch_ggtt_page = - alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); - if (!gvt->gtt.scratch_ggtt_page) { + page = (void *)get_zeroed_page(GFP_KERNEL); + if (!page) { gvt_err("fail to allocate scratch ggtt page\n"); return -ENOMEM; } + gvt->gtt.scratch_ggtt_page = virt_to_page(page); - page_addr = page_address(gvt->gtt.scratch_ggtt_page); - - gvt->gtt.scratch_ggtt_mfn = - intel_gvt_hypervisor_virt_to_mfn(page_addr); + gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page); if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) { gvt_err("fail to translate scratch ggtt page\n"); __free_page(gvt->gtt.scratch_ggtt_page); @@ -2297,3 +2277,30 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) for (offset = 0; offset < num_entries; offset++) ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); } + +/** + * intel_vgpu_reset_gtt - reset the all GTT related status + * @vgpu: a vGPU + * @dmlr: true for vGPU Device Model Level Reset, false for GT Reset + * + * This function is called from vfio core to reset reset all + * GTT related status, including GGTT, PPGTT, scratch page. + * + */ +void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr) +{ + int i; + + ppgtt_free_all_shadow_page(vgpu); + if (!dmlr) + return; + + intel_vgpu_reset_ggtt(vgpu); + + /* clear scratch page for security */ + for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { + if (vgpu->gtt.scratch_pt[i].page != NULL) + memset(page_address(vgpu->gtt.scratch_pt[i].page), + 0, PAGE_SIZE); + } +} diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index b315ab3..f88eb5e 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -208,6 +208,7 @@ extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); extern int intel_gvt_init_gtt(struct intel_gvt *gvt); +extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr); extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 398877c..e6bf5c5 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -201,6 +201,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt); intel_gvt_clean_vgpu_types(gvt); + idr_destroy(&gvt->vgpu_idr); + kfree(dev_priv->gvt); dev_priv->gvt = NULL; } @@ -237,6 +239,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) gvt_dbg_core("init gvt device\n"); + idr_init(&gvt->vgpu_idr); + mutex_init(&gvt->lock); gvt->dev_priv = dev_priv; @@ -244,7 +248,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) ret = intel_gvt_setup_mmio_info(gvt); if (ret) - return ret; + goto out_clean_idr; ret = intel_gvt_load_firmware(gvt); if (ret) @@ -313,6 +317,8 @@ out_free_firmware: intel_gvt_free_firmware(gvt); out_clean_mmio_info: intel_gvt_clean_mmio_info(gvt); +out_clean_idr: + idr_destroy(&gvt->vgpu_idr); kfree(gvt); return ret; } diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 0af1701..e227caf 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -323,6 +323,7 @@ struct intel_vgpu_creation_params { int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, struct intel_vgpu_creation_params *param); +void intel_vgpu_reset_resource(struct intel_vgpu *vgpu); void intel_vgpu_free_resource(struct intel_vgpu *vgpu); void intel_vgpu_write_fence(struct intel_vgpu *vgpu, u32 fence, u64 value); @@ -375,6 +376,8 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt); struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, struct intel_vgpu_type *type); void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); +void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, + unsigned int engine_mask); void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); @@ -411,6 +414,10 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, unsigned long *g_index); +void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, + bool primary); +void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu); + int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes); @@ -424,7 +431,6 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu); int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa); int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); -int setup_vgpu_mmio(struct intel_vgpu *vgpu); void populate_pvinfo_page(struct intel_vgpu *vgpu); struct intel_gvt_ops { diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 5228097..ab2ea15 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -93,7 +93,8 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset, static int new_mmio_info(struct intel_gvt *gvt, u32 offset, u32 flags, u32 size, u32 addr_mask, u32 ro_mask, u32 device, - void *read, void *write) + int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int), + int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int)) { struct intel_gvt_mmio_info *info, *p; u32 start, end, i; @@ -219,7 +220,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, default: /*should not hit here*/ gvt_err("invalid forcewake offset 0x%x\n", offset); - return 1; + return -EINVAL; } } else { ack_reg_offset = FORCEWAKE_ACK_HSW_REG; @@ -230,77 +231,45 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, return 0; } -static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset, - void *p_data, unsigned int bytes, unsigned long bitmap) -{ - struct intel_gvt_workload_scheduler *scheduler = - &vgpu->gvt->scheduler; - - vgpu->resetting = true; - - intel_vgpu_stop_schedule(vgpu); - /* - * The current_vgpu will set to NULL after stopping the - * scheduler when the reset is triggered by current vgpu. - */ - if (scheduler->current_vgpu == NULL) { - mutex_unlock(&vgpu->gvt->lock); - intel_gvt_wait_vgpu_idle(vgpu); - mutex_lock(&vgpu->gvt->lock); - } - - intel_vgpu_reset_execlist(vgpu, bitmap); - - /* full GPU reset */ - if (bitmap == 0xff) { - mutex_unlock(&vgpu->gvt->lock); - intel_vgpu_clean_gtt(vgpu); - mutex_lock(&vgpu->gvt->lock); - setup_vgpu_mmio(vgpu); - populate_pvinfo_page(vgpu); - intel_vgpu_init_gtt(vgpu); - } - - vgpu->resetting = false; - - return 0; -} - static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, - void *p_data, unsigned int bytes) + void *p_data, unsigned int bytes) { + unsigned int engine_mask = 0; u32 data; - u64 bitmap = 0; write_vreg(vgpu, offset, p_data, bytes); data = vgpu_vreg(vgpu, offset); if (data & GEN6_GRDOM_FULL) { gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id); - bitmap = 0xff; - } - if (data & GEN6_GRDOM_RENDER) { - gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id); - bitmap |= (1 << RCS); - } - if (data & GEN6_GRDOM_MEDIA) { - gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id); - bitmap |= (1 << VCS); - } - if (data & GEN6_GRDOM_BLT) { - gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id); - bitmap |= (1 << BCS); - } - if (data & GEN6_GRDOM_VECS) { - gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id); - bitmap |= (1 << VECS); - } - if (data & GEN8_GRDOM_MEDIA2) { - gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); - if (HAS_BSD2(vgpu->gvt->dev_priv)) - bitmap |= (1 << VCS2); + engine_mask = ALL_ENGINES; + } else { + if (data & GEN6_GRDOM_RENDER) { + gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id); + engine_mask |= (1 << RCS); + } + if (data & GEN6_GRDOM_MEDIA) { + gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id); + engine_mask |= (1 << VCS); + } + if (data & GEN6_GRDOM_BLT) { + gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id); + engine_mask |= (1 << BCS); + } + if (data & GEN6_GRDOM_VECS) { + gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id); + engine_mask |= (1 << VECS); + } + if (data & GEN8_GRDOM_MEDIA2) { + gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); + if (HAS_BSD2(vgpu->gvt->dev_priv)) + engine_mask |= (1 << VCS2); + } } - return handle_device_reset(vgpu, offset, p_data, bytes, bitmap); + + intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask); + + return 0; } static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, @@ -974,7 +943,7 @@ static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, return 0; } -static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, +static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 data; @@ -1366,7 +1335,6 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - int rc = 0; unsigned int id = 0; write_vreg(vgpu, offset, p_data, bytes); @@ -1389,12 +1357,11 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, id = VECS; break; default: - rc = -EINVAL; - break; + return -EINVAL; } set_bit(id, (void *)vgpu->tlb_handle_pending); - return rc; + return 0; } static int ring_reset_ctl_write(struct intel_vgpu *vgpu, diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index faaae07..3f656e3 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -230,8 +230,8 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt, return NULL; } -static ssize_t available_instance_show(struct kobject *kobj, struct device *dev, - char *buf) +static ssize_t available_instances_show(struct kobject *kobj, + struct device *dev, char *buf) { struct intel_vgpu_type *type; unsigned int num = 0; @@ -269,12 +269,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev, type->fence); } -static MDEV_TYPE_ATTR_RO(available_instance); +static MDEV_TYPE_ATTR_RO(available_instances); static MDEV_TYPE_ATTR_RO(device_api); static MDEV_TYPE_ATTR_RO(description); static struct attribute *type_attrs[] = { - &mdev_type_attr_available_instance.attr, + &mdev_type_attr_available_instances.attr, &mdev_type_attr_device_api.attr, &mdev_type_attr_description.attr, NULL, @@ -398,6 +398,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) struct intel_vgpu_type *type; struct device *pdev; void *gvt; + int ret; pdev = mdev_parent_dev(mdev); gvt = kdev_to_i915(pdev)->gvt; @@ -406,13 +407,15 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) if (!type) { gvt_err("failed to find type %s to create\n", kobject_name(kobj)); - return -EINVAL; + ret = -EINVAL; + goto out; } vgpu = intel_gvt_ops->vgpu_create(gvt, type); if (IS_ERR_OR_NULL(vgpu)) { - gvt_err("create intel vgpu failed\n"); - return -EINVAL; + ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); + gvt_err("failed to create intel vgpu: %d\n", ret); + goto out; } INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work); @@ -422,7 +425,10 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", dev_name(mdev_dev(mdev))); - return 0; + ret = 0; + +out: + return ret; } static int intel_vgpu_remove(struct mdev_device *mdev) diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 09c9450..4df078b 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -125,25 +125,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1))) goto err; - mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); - if (!mmio && !vgpu->mmio.disable_warn_untrack) { - gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n", - vgpu->id, offset, bytes, *(u32 *)p_data); - - if (offset == 0x206c) { - gvt_err("------------------------------------------\n"); - gvt_err("vgpu%d: likely triggers a gfx reset\n", - vgpu->id); - gvt_err("------------------------------------------\n"); - vgpu->mmio.disable_warn_untrack = true; - } - } - if (!intel_gvt_mmio_is_unalign(gvt, offset)) { if (WARN_ON(!IS_ALIGNED(offset, bytes))) goto err; } + mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); if (mmio) { if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) { if (WARN_ON(offset + bytes > mmio->offset + mmio->size)) @@ -152,9 +139,23 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, goto err; } ret = mmio->read(vgpu, offset, p_data, bytes); - } else + } else { ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); + if (!vgpu->mmio.disable_warn_untrack) { + gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n", + vgpu->id, offset, bytes, *(u32 *)p_data); + + if (offset == 0x206c) { + gvt_err("------------------------------------------\n"); + gvt_err("vgpu%d: likely triggers a gfx reset\n", + vgpu->id); + gvt_err("------------------------------------------\n"); + vgpu->mmio.disable_warn_untrack = true; + } + } + } + if (ret) goto err; @@ -302,3 +303,56 @@ err: mutex_unlock(&gvt->lock); return ret; } + + +/** + * intel_vgpu_reset_mmio - reset virtual MMIO space + * @vgpu: a vGPU + * + */ +void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu) +{ + struct intel_gvt *gvt = vgpu->gvt; + const struct intel_gvt_device_info *info = &gvt->device_info; + + memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size); + memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size); + + vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; + + /* set the bit 0:2(Core C-State ) to C0 */ + vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; +} + +/** + * intel_vgpu_init_mmio - init MMIO space + * @vgpu: a vGPU + * + * Returns: + * Zero on success, negative error code if failed + */ +int intel_vgpu_init_mmio(struct intel_vgpu *vgpu) +{ + const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; + + vgpu->mmio.vreg = vzalloc(info->mmio_size * 2); + if (!vgpu->mmio.vreg) + return -ENOMEM; + + vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; + + intel_vgpu_reset_mmio(vgpu); + + return 0; +} + +/** + * intel_vgpu_clean_mmio - clean MMIO space + * @vgpu: a vGPU + * + */ +void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu) +{ + vfree(vgpu->mmio.vreg); + vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; +} diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index 87d5b5e..3bc620f 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -86,6 +86,10 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt, *offset; \ }) +int intel_vgpu_init_mmio(struct intel_vgpu *vgpu); +void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu); +void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu); + int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa); int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 81cd921..d9fb41a 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -36,9 +36,9 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa) vgpu->id)) return -EINVAL; - vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC | - GFP_DMA32 | __GFP_ZERO, - INTEL_GVT_OPREGION_PORDER); + vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL | + __GFP_ZERO, + get_order(INTEL_GVT_OPREGION_SIZE)); if (!vgpu_opregion(vgpu)->va) return -ENOMEM; @@ -97,7 +97,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu) if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) { map_vgpu_opregion(vgpu, false); free_pages((unsigned long)vgpu_opregion(vgpu)->va, - INTEL_GVT_OPREGION_PORDER); + get_order(INTEL_GVT_OPREGION_SIZE)); vgpu_opregion(vgpu)->va = NULL; } diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index 0dfe789..fbd023a 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h @@ -50,8 +50,7 @@ #define INTEL_GVT_OPREGION_PARM 0x204 #define INTEL_GVT_OPREGION_PAGES 2 -#define INTEL_GVT_OPREGION_PORDER 1 -#define INTEL_GVT_OPREGION_SIZE (2 * 4096) +#define INTEL_GVT_OPREGION_SIZE (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE) #define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B) diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 4db2422..e91885d 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -350,13 +350,15 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_vgpu_workload *workload; + struct intel_vgpu *vgpu; int event; mutex_lock(&gvt->lock); workload = scheduler->current_workload[ring_id]; + vgpu = workload->vgpu; - if (!workload->status && !workload->vgpu->resetting) { + if (!workload->status && !vgpu->resetting) { wait_event(workload->shadow_ctx_status_wq, !atomic_read(&workload->shadow_ctx_active)); @@ -364,8 +366,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) for_each_set_bit(event, workload->pending_events, INTEL_GVT_EVENT_MAX) - intel_vgpu_trigger_virtual_event(workload->vgpu, - event); + intel_vgpu_trigger_virtual_event(vgpu, event); } gvt_dbg_sched("ring id %d complete workload %p status %d\n", @@ -373,11 +374,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) scheduler->current_workload[ring_id] = NULL; - atomic_dec(&workload->vgpu->running_workload_num); - list_del_init(&workload->list); workload->complete(workload); + atomic_dec(&vgpu->running_workload_num); wake_up(&scheduler->workload_complete_wq); mutex_unlock(&gvt->lock); } @@ -459,11 +459,11 @@ complete: gvt_dbg_sched("will complete workload %p\n, status: %d\n", workload, workload->status); - complete_current_workload(gvt, ring_id); - if (workload->req) i915_gem_request_put(fetch_and_zero(&workload->req)); + complete_current_workload(gvt, ring_id); + if (need_force_wake) intel_uncore_forcewake_put(gvt->dev_priv, FORCEWAKE_ALL); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 3b30c28..2833dfa 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -113,7 +113,7 @@ struct intel_shadow_bb_entry { struct drm_i915_gem_object *obj; void *va; unsigned long len; - void *bb_start_cmd_va; + u32 *bb_start_cmd_va; }; #define workload_q_head(vgpu, ring_id) \ diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 536d2b9..7295bc8 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -35,79 +35,6 @@ #include "gvt.h" #include "i915_pvinfo.h" -static void clean_vgpu_mmio(struct intel_vgpu *vgpu) -{ - vfree(vgpu->mmio.vreg); - vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; -} - -int setup_vgpu_mmio(struct intel_vgpu *vgpu) -{ - struct intel_gvt *gvt = vgpu->gvt; - const struct intel_gvt_device_info *info = &gvt->device_info; - - if (vgpu->mmio.vreg) - memset(vgpu->mmio.vreg, 0, info->mmio_size * 2); - else { - vgpu->mmio.vreg = vzalloc(info->mmio_size * 2); - if (!vgpu->mmio.vreg) - return -ENOMEM; - } - - vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; - - memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size); - memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size); - - vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; - - /* set the bit 0:2(Core C-State ) to C0 */ - vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; - return 0; -} - -static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu, - struct intel_vgpu_creation_params *param) -{ - struct intel_gvt *gvt = vgpu->gvt; - const struct intel_gvt_device_info *info = &gvt->device_info; - u16 *gmch_ctl; - int i; - - memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, - info->cfg_space_size); - - if (!param->primary) { - vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] = - INTEL_GVT_PCI_CLASS_VGA_OTHER; - vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] = - INTEL_GVT_PCI_CLASS_VGA_OTHER; - } - - /* Show guest that there isn't any stolen memory.*/ - gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL); - *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT); - - intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2, - gvt_aperture_pa_base(gvt), true); - - vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO - | PCI_COMMAND_MEMORY - | PCI_COMMAND_MASTER); - /* - * Clear the bar upper 32bit and let guest to assign the new value - */ - memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); - memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); - memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); - - for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) { - vgpu->cfg_space.bar[i].size = pci_resource_len( - gvt->dev_priv->drm.pdev, i * 2); - vgpu->cfg_space.bar[i].tracked = false; - } -} - void populate_pvinfo_page(struct intel_vgpu *vgpu) { /* setup the ballooning information */ @@ -177,7 +104,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) if (low_avail / min_low == 0) break; gvt->types[i].low_gm_size = min_low; - gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size; + gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U)); gvt->types[i].fence = 4; gvt->types[i].max_instance = low_avail / min_low; gvt->types[i].avail_instance = gvt->types[i].max_instance; @@ -217,7 +144,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt) */ low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE - gvt->gm.vgpu_allocated_low_gm_size; - high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE - + high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE - gvt->gm.vgpu_allocated_high_gm_size; fence_avail = gvt_fence_sz(gvt) - HOST_FENCE - gvt->fence.vgpu_allocated_fence_num; @@ -268,7 +195,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) intel_vgpu_clean_gtt(vgpu); intel_gvt_hypervisor_detach_vgpu(vgpu); intel_vgpu_free_resource(vgpu); - clean_vgpu_mmio(vgpu); + intel_vgpu_clean_mmio(vgpu); vfree(vgpu); intel_gvt_update_vgpu_types(gvt); @@ -300,11 +227,11 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, vgpu->gvt = gvt; bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES); - setup_vgpu_cfg_space(vgpu, param); + intel_vgpu_init_cfg_space(vgpu, param->primary); - ret = setup_vgpu_mmio(vgpu); + ret = intel_vgpu_init_mmio(vgpu); if (ret) - goto out_free_vgpu; + goto out_clean_idr; ret = intel_vgpu_alloc_resource(vgpu, param); if (ret) @@ -354,7 +281,9 @@ out_detach_hypervisor_vgpu: out_clean_vgpu_resource: intel_vgpu_free_resource(vgpu); out_clean_vgpu_mmio: - clean_vgpu_mmio(vgpu); + intel_vgpu_clean_mmio(vgpu); +out_clean_idr: + idr_remove(&gvt->vgpu_idr, vgpu->id); out_free_vgpu: vfree(vgpu); mutex_unlock(&gvt->lock); @@ -398,7 +327,75 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, } /** - * intel_gvt_reset_vgpu - reset a virtual GPU + * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset + * @vgpu: virtual GPU + * @dmlr: vGPU Device Model Level Reset or GT Reset + * @engine_mask: engines to reset for GT reset + * + * This function is called when user wants to reset a virtual GPU through + * device model reset or GT reset. The caller should hold the gvt lock. + * + * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset + * the whole vGPU to default state as when it is created. This vGPU function + * is required both for functionary and security concerns.The ultimate goal + * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we + * assign a vGPU to a virtual machine we must isse such reset first. + * + * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines + * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec. + * Unlike the FLR, GT reset only reset particular resource of a vGPU per + * the reset request. Guest driver can issue a GT reset by programming the + * virtual GDRST register to reset specific virtual GPU engine or all + * engines. + * + * The parameter dev_level is to identify if we will do DMLR or GT reset. + * The parameter engine_mask is to specific the engines that need to be + * resetted. If value ALL_ENGINES is given for engine_mask, it means + * the caller requests a full GT reset that we will reset all virtual + * GPU engines. For FLR, engine_mask is ignored. + */ +void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, + unsigned int engine_mask) +{ + struct intel_gvt *gvt = vgpu->gvt; + struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; + + gvt_dbg_core("------------------------------------------\n"); + gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", + vgpu->id, dmlr, engine_mask); + vgpu->resetting = true; + + intel_vgpu_stop_schedule(vgpu); + /* + * The current_vgpu will set to NULL after stopping the + * scheduler when the reset is triggered by current vgpu. + */ + if (scheduler->current_vgpu == NULL) { + mutex_unlock(&gvt->lock); + intel_gvt_wait_vgpu_idle(vgpu); + mutex_lock(&gvt->lock); + } + + intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask); + + /* full GPU reset or device model level reset */ + if (engine_mask == ALL_ENGINES || dmlr) { + intel_vgpu_reset_gtt(vgpu, dmlr); + intel_vgpu_reset_resource(vgpu); + intel_vgpu_reset_mmio(vgpu); + populate_pvinfo_page(vgpu); + + if (dmlr) + intel_vgpu_reset_cfg_space(vgpu); + } + + vgpu->resetting = false; + gvt_dbg_core("reset vgpu%d done\n", vgpu->id); + gvt_dbg_core("------------------------------------------\n"); +} + +/** + * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level) * @vgpu: virtual GPU * * This function is called when user wants to reset a virtual GPU. @@ -406,4 +403,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, */ void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu) { + mutex_lock(&vgpu->gvt->lock); + intel_gvt_reset_vgpu_locked(vgpu, true, 0); + mutex_unlock(&vgpu->gvt->lock); } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 445fec9c..b2c4a0b 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -2378,7 +2378,7 @@ static int intel_runtime_suspend(struct device *kdev) assert_forcewakes_inactive(dev_priv); - if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv)) + if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) intel_hpd_poll_init(dev_priv); DRM_DEBUG_KMS("Device suspended\n"); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 243224a..8493e19 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1012,6 +1012,8 @@ struct intel_fbc { struct work_struct underrun_work; struct intel_fbc_state_cache { + struct i915_vma *vma; + struct { unsigned int mode_flags; uint32_t hsw_bdw_pixel_rate; @@ -1025,15 +1027,14 @@ struct intel_fbc { } plane; struct { - u64 ilk_ggtt_offset; uint32_t pixel_format; unsigned int stride; - int fence_reg; - unsigned int tiling_mode; } fb; } state_cache; struct intel_fbc_reg_params { + struct i915_vma *vma; + struct { enum pipe pipe; enum plane plane; @@ -1041,10 +1042,8 @@ struct intel_fbc { } crtc; struct { - u64 ggtt_offset; uint32_t pixel_format; unsigned int stride; - int fence_reg; } fb; int cfb_size; @@ -1977,6 +1976,11 @@ struct drm_i915_private { struct i915_frontbuffer_tracking fb_tracking; + struct intel_atomic_helper { + struct llist_head free_list; + struct work_struct free_work; + } atomic_helper; + u16 orig_clock; bool mchbar_need_disable; @@ -3163,13 +3167,6 @@ i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj, return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view); } -static inline unsigned long -i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o, - const struct i915_ggtt_view *view) -{ - return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view)); -} - /* i915_gem_fence_reg.c */ int __must_check i915_vma_get_fence(struct i915_vma *vma); int __must_check i915_vma_put_fence(struct i915_vma *vma); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3dd7fc6..4b23a78 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -595,47 +595,21 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, struct drm_file *file) { - struct drm_device *dev = obj->base.dev; void *vaddr = obj->phys_handle->vaddr + args->offset; char __user *user_data = u64_to_user_ptr(args->data_ptr); - int ret; /* We manually control the domain here and pretend that it * remains coherent i.e. in the GTT domain, like shmem_pwrite. */ - lockdep_assert_held(&obj->base.dev->struct_mutex); - ret = i915_gem_object_wait(obj, - I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED | - I915_WAIT_ALL, - MAX_SCHEDULE_TIMEOUT, - to_rps_client(file)); - if (ret) - return ret; - intel_fb_obj_invalidate(obj, ORIGIN_CPU); - if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { - unsigned long unwritten; - - /* The physical object once assigned is fixed for the lifetime - * of the obj, so we can safely drop the lock and continue - * to access vaddr. - */ - mutex_unlock(&dev->struct_mutex); - unwritten = copy_from_user(vaddr, user_data, args->size); - mutex_lock(&dev->struct_mutex); - if (unwritten) { - ret = -EFAULT; - goto out; - } - } + if (copy_from_user(vaddr, user_data, args->size)) + return -EFAULT; drm_clflush_virt_range(vaddr, args->size); - i915_gem_chipset_flush(to_i915(dev)); + i915_gem_chipset_flush(to_i915(obj->base.dev)); -out: intel_fb_obj_flush(obj, false, ORIGIN_CPU); - return ret; + return 0; } void *i915_gem_object_alloc(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index bd08814..d534a31 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -199,6 +199,7 @@ found: } /* Unbinding will emit any required flushes */ + ret = 0; while (!list_empty(&eviction_list)) { vma = list_first_entry(&eviction_list, struct i915_vma, diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index a792dcb..e924a95 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -185,6 +185,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, return ret; } + trace_i915_vma_bind(vma, bind_flags); ret = vma->vm->bind_vma(vma, cache_level, bind_flags); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c index dbe9fb4..8d3e515 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/intel_atomic_plane.c @@ -85,6 +85,8 @@ intel_plane_duplicate_state(struct drm_plane *plane) __drm_atomic_helper_plane_duplicate_state(plane, state); + intel_state->vma = NULL; + return state; } @@ -100,6 +102,24 @@ void intel_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state) { + struct i915_vma *vma; + + vma = fetch_and_zero(&to_intel_plane_state(state)->vma); + + /* + * FIXME: Normally intel_cleanup_plane_fb handles destruction of vma. + * We currently don't clear all planes during driver unload, so we have + * to be able to unpin vma here for now. + * + * Normally this can only happen during unload when kmscon is disabled + * and userspace doesn't attempt to set a framebuffer at all. + */ + if (vma) { + mutex_lock(&plane->dev->struct_mutex); + intel_unpin_fb_vma(vma); + mutex_unlock(&plane->dev->struct_mutex); + } + drm_atomic_helper_plane_destroy_state(plane, state); } diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 86ecec5..588470e 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -499,6 +499,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev); struct edid *edid; struct i2c_adapter *i2c; + bool ret = false; BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); @@ -515,17 +516,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) */ if (!is_digital) { DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); - return true; + ret = true; + } else { + DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); } - - DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); } else { DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n"); } kfree(edid); - return false; + return ret; } static enum drm_connector_status diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3dc8724..f1e4a21 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2235,24 +2235,22 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) i915_vma_pin_fence(vma); } + i915_vma_get(vma); err: intel_runtime_pm_put(dev_priv); return vma; } -void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) +void intel_unpin_fb_vma(struct i915_vma *vma) { - struct drm_i915_gem_object *obj = intel_fb_obj(fb); - struct i915_ggtt_view view; - struct i915_vma *vma; + lockdep_assert_held(&vma->vm->dev->struct_mutex); - WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); - - intel_fill_fb_ggtt_view(&view, fb, rotation); - vma = i915_gem_object_to_ggtt(obj, &view); + if (WARN_ON_ONCE(!vma)) + return; i915_vma_unpin_fence(vma); i915_gem_object_unpin_from_display_plane(vma); + i915_vma_put(vma); } static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane, @@ -2585,8 +2583,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, * We only keep the x/y offsets, so push all of the * gtt offset into the x/y offsets. */ - _intel_adjust_tile_offset(&x, &y, tile_size, - tile_width, tile_height, pitch_tiles, + _intel_adjust_tile_offset(&x, &y, + tile_width, tile_height, + tile_size, pitch_tiles, gtt_offset_rotated * tile_size, 0); gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height; @@ -2746,7 +2745,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *c; - struct intel_crtc *i; struct drm_i915_gem_object *obj; struct drm_plane *primary = intel_crtc->base.primary; struct drm_plane_state *plane_state = primary->state; @@ -2771,20 +2769,20 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, * an fb with another CRTC instead */ for_each_crtc(dev, c) { - i = to_intel_crtc(c); + struct intel_plane_state *state; if (c == &intel_crtc->base) continue; - if (!i->active) + if (!to_intel_crtc(c)->active) continue; - fb = c->primary->fb; - if (!fb) + state = to_intel_plane_state(c->primary->state); + if (!state->vma) continue; - obj = intel_fb_obj(fb); - if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) { + if (intel_plane_ggtt_offset(state) == plane_config->base) { + fb = c->primary->fb; drm_framebuffer_reference(fb); goto valid_fb; } @@ -2805,6 +2803,19 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, return; valid_fb: + mutex_lock(&dev->struct_mutex); + intel_state->vma = + intel_pin_and_fence_fb_obj(fb, primary->state->rotation); + mutex_unlock(&dev->struct_mutex); + if (IS_ERR(intel_state->vma)) { + DRM_ERROR("failed to pin boot fb on pipe %d: %li\n", + intel_crtc->pipe, PTR_ERR(intel_state->vma)); + + intel_state->vma = NULL; + drm_framebuffer_unreference(fb); + return; + } + plane_state->src_x = 0; plane_state->src_y = 0; plane_state->src_w = fb->width << 16; @@ -2967,6 +2978,9 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state) unsigned int rotation = plane_state->base.rotation; int ret; + if (!plane_state->base.visible) + return 0; + /* Rotate src coordinates to match rotated GTT view */ if (drm_rotation_90_or_270(rotation)) drm_rect_rotate(&plane_state->base.src, @@ -3097,13 +3111,13 @@ static void i9xx_update_primary_plane(struct drm_plane *primary, I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); if (INTEL_GEN(dev_priv) >= 4) { I915_WRITE(DSPSURF(plane), - intel_fb_gtt_offset(fb, rotation) + + intel_plane_ggtt_offset(plane_state) + intel_crtc->dspaddr_offset); I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); I915_WRITE(DSPLINOFF(plane), linear_offset); } else { I915_WRITE(DSPADDR(plane), - intel_fb_gtt_offset(fb, rotation) + + intel_plane_ggtt_offset(plane_state) + intel_crtc->dspaddr_offset); } POSTING_READ(reg); @@ -3200,7 +3214,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary, I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); I915_WRITE(DSPSURF(plane), - intel_fb_gtt_offset(fb, rotation) + + intel_plane_ggtt_offset(plane_state) + intel_crtc->dspaddr_offset); if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { I915_WRITE(DSPOFFSET(plane), (y << 16) | x); @@ -3223,23 +3237,6 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv, } } -u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, - unsigned int rotation) -{ - struct drm_i915_gem_object *obj = intel_fb_obj(fb); - struct i915_ggtt_view view; - struct i915_vma *vma; - - intel_fill_fb_ggtt_view(&view, fb, rotation); - - vma = i915_gem_object_to_ggtt(obj, &view); - if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n", - view.type)) - return -1; - - return i915_ggtt_offset(vma); -} - static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) { struct drm_device *dev = intel_crtc->base.dev; @@ -3434,7 +3431,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane, } I915_WRITE(PLANE_SURF(pipe, 0), - intel_fb_gtt_offset(fb, rotation) + surf_addr); + intel_plane_ggtt_offset(plane_state) + surf_addr); POSTING_READ(PLANE_SURF(pipe, 0)); } @@ -6846,6 +6843,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) } state = drm_atomic_state_alloc(crtc->dev); + if (!state) { + DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory", + crtc->base.id, crtc->name); + return; + } + state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; /* Everything's already locked, -EDEADLK can't happen. */ @@ -11243,6 +11246,7 @@ found: } old->restore_state = restore_state; + drm_atomic_state_put(state); /* let the connector get through one full cycle before testing */ intel_wait_for_vblank(dev_priv, intel_crtc->pipe); @@ -11522,7 +11526,7 @@ static void intel_unpin_work_fn(struct work_struct *__work) flush_work(&work->mmio_work); mutex_lock(&dev->struct_mutex); - intel_unpin_fb_obj(work->old_fb, primary->state->rotation); + intel_unpin_fb_vma(work->old_vma); i915_gem_object_put(work->pending_flip_obj); mutex_unlock(&dev->struct_mutex); @@ -12232,8 +12236,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, goto cleanup_pending; } - work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation); - work->gtt_offset += intel_crtc->dspaddr_offset; + work->old_vma = to_intel_plane_state(primary->state)->vma; + to_intel_plane_state(primary->state)->vma = vma; + + work->gtt_offset = i915_ggtt_offset(vma) + intel_crtc->dspaddr_offset; work->rotation = crtc->primary->state->rotation; /* @@ -12287,7 +12293,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, cleanup_request: i915_add_request_no_flush(request); cleanup_unpin: - intel_unpin_fb_obj(fb, crtc->primary->state->rotation); + to_intel_plane_state(primary->state)->vma = work->old_vma; + intel_unpin_fb_vma(vma); cleanup_pending: atomic_dec(&intel_crtc->unpin_work_count); unlock: @@ -14512,8 +14519,14 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence, break; case FENCE_FREE: - drm_atomic_state_put(&state->base); - break; + { + struct intel_atomic_helper *helper = + &to_i915(state->base.dev)->atomic_helper; + + if (llist_add(&state->freed, &helper->free_list)) + schedule_work(&helper->free_work); + break; + } } return NOTIFY_DONE; @@ -14774,6 +14787,8 @@ intel_prepare_plane_fb(struct drm_plane *plane, DRM_DEBUG_KMS("failed to pin object\n"); return PTR_ERR(vma); } + + to_intel_plane_state(new_state)->vma = vma; } return 0; @@ -14792,19 +14807,12 @@ void intel_cleanup_plane_fb(struct drm_plane *plane, struct drm_plane_state *old_state) { - struct drm_i915_private *dev_priv = to_i915(plane->dev); - struct intel_plane_state *old_intel_state; - struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb); - struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb); - - old_intel_state = to_intel_plane_state(old_state); - - if (!obj && !old_obj) - return; + struct i915_vma *vma; - if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR || - !INTEL_INFO(dev_priv)->cursor_needs_physical)) - intel_unpin_fb_obj(old_state->fb, old_state->rotation); + /* Should only be called after a successful intel_prepare_plane_fb()! */ + vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma); + if (vma) + intel_unpin_fb_vma(vma); } int @@ -15146,7 +15154,7 @@ intel_update_cursor_plane(struct drm_plane *plane, if (!obj) addr = 0; else if (!INTEL_INFO(dev_priv)->cursor_needs_physical) - addr = i915_gem_object_ggtt_offset(obj, NULL); + addr = intel_plane_ggtt_offset(state); else addr = obj->phys_handle->busaddr; @@ -16392,6 +16400,18 @@ fail: drm_modeset_acquire_fini(&ctx); } +static void intel_atomic_helper_free_state(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, typeof(*dev_priv), atomic_helper.free_work); + struct intel_atomic_state *state, *next; + struct llist_node *freed; + + freed = llist_del_all(&dev_priv->atomic_helper.free_list); + llist_for_each_entry_safe(state, next, freed, freed) + drm_atomic_state_put(&state->base); +} + int intel_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -16411,6 +16431,9 @@ int intel_modeset_init(struct drm_device *dev) dev->mode_config.funcs = &intel_mode_funcs; + INIT_WORK(&dev_priv->atomic_helper.free_work, + intel_atomic_helper_free_state); + intel_init_quirks(dev); intel_init_pm(dev_priv); @@ -17024,47 +17047,19 @@ void intel_display_resume(struct drm_device *dev) if (ret) DRM_ERROR("Restoring old state failed with %i\n", ret); - drm_atomic_state_put(state); + if (state) + drm_atomic_state_put(state); } void intel_modeset_gem_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_crtc *c; - struct drm_i915_gem_object *obj; intel_init_gt_powersave(dev_priv); intel_modeset_init_hw(dev); intel_setup_overlay(dev_priv); - - /* - * Make sure any fbs we allocated at startup are properly - * pinned & fenced. When we do the allocation it's too early - * for this. - */ - for_each_crtc(dev, c) { - struct i915_vma *vma; - - obj = intel_fb_obj(c->primary->fb); - if (obj == NULL) - continue; - - mutex_lock(&dev->struct_mutex); - vma = intel_pin_and_fence_fb_obj(c->primary->fb, - c->primary->state->rotation); - mutex_unlock(&dev->struct_mutex); - if (IS_ERR(vma)) { - DRM_ERROR("failed to pin boot fb on pipe %d\n", - to_intel_crtc(c)->pipe); - drm_framebuffer_unreference(c->primary->fb); - c->primary->fb = NULL; - c->primary->crtc = c->primary->state->crtc = NULL; - update_state_fb(c->primary); - c->state->plane_mask &= ~(1 << drm_plane_index(c->primary)); - } - } } int intel_connector_register(struct drm_connector *connector) @@ -17094,6 +17089,9 @@ void intel_modeset_cleanup(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); + flush_work(&dev_priv->atomic_helper.free_work); + WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list)); + intel_disable_gt_powersave(dev_priv); /* diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index cd132c2..03a2112 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -370,11 +370,14 @@ struct intel_atomic_state { struct skl_wm_values wm_results; struct i915_sw_fence commit_ready; + + struct llist_node freed; }; struct intel_plane_state { struct drm_plane_state base; struct drm_rect clip; + struct i915_vma *vma; struct { u32 offset; @@ -1044,6 +1047,7 @@ struct intel_flip_work { struct work_struct mmio_work; struct drm_crtc *crtc; + struct i915_vma *old_vma; struct drm_framebuffer *old_fb; struct drm_i915_gem_object *pending_flip_obj; struct drm_pending_vblank_event *event; @@ -1271,7 +1275,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx); struct i915_vma * intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation); -void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation); +void intel_unpin_fb_vma(struct i915_vma *vma); struct drm_framebuffer * __intel_framebuffer_create(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, @@ -1360,7 +1364,10 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode, int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); -u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, unsigned int rotation); +static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state) +{ + return i915_ggtt_offset(state->vma); +} u32 skl_plane_ctl_format(uint32_t pixel_format); u32 skl_plane_ctl_tiling(uint64_t fb_modifier); diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 62f215b..f3a1d6a 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -173,7 +173,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) if (IS_I945GM(dev_priv)) fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; - fbc_ctl |= params->fb.fence_reg; + fbc_ctl |= params->vma->fence->id; I915_WRITE(FBC_CONTROL, fbc_ctl); } @@ -193,8 +193,8 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv) else dpfc_ctl |= DPFC_CTL_LIMIT_1X; - if (params->fb.fence_reg != I915_FENCE_REG_NONE) { - dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg; + if (params->vma->fence) { + dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id; I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset); } else { I915_WRITE(DPFC_FENCE_YOFF, 0); @@ -251,13 +251,14 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv) break; } - if (params->fb.fence_reg != I915_FENCE_REG_NONE) { + if (params->vma->fence) { dpfc_ctl |= DPFC_CTL_FENCE_EN; if (IS_GEN5(dev_priv)) - dpfc_ctl |= params->fb.fence_reg; + dpfc_ctl |= params->vma->fence->id; if (IS_GEN6(dev_priv)) { I915_WRITE(SNB_DPFC_CTL_SA, - SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); + SNB_CPU_FENCE_ENABLE | + params->vma->fence->id); I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); } @@ -269,7 +270,8 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv) } I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset); - I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID); + I915_WRITE(ILK_FBC_RT_BASE, + i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID); /* enable it... */ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); @@ -319,10 +321,11 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv) break; } - if (params->fb.fence_reg != I915_FENCE_REG_NONE) { + if (params->vma->fence) { dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; I915_WRITE(SNB_DPFC_CTL_SA, - SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); + SNB_CPU_FENCE_ENABLE | + params->vma->fence->id); I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); } else { I915_WRITE(SNB_DPFC_CTL_SA,0); @@ -727,14 +730,6 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) return effective_w <= max_w && effective_h <= max_h; } -/* XXX replace me when we have VMA tracking for intel_plane_state */ -static int get_fence_id(struct drm_framebuffer *fb) -{ - struct i915_vma *vma = i915_gem_object_to_ggtt(intel_fb_obj(fb), NULL); - - return vma && vma->fence ? vma->fence->id : I915_FENCE_REG_NONE; -} - static void intel_fbc_update_state_cache(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) @@ -743,7 +738,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, struct intel_fbc *fbc = &dev_priv->fbc; struct intel_fbc_state_cache *cache = &fbc->state_cache; struct drm_framebuffer *fb = plane_state->base.fb; - struct drm_i915_gem_object *obj; + + cache->vma = NULL; cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags; if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) @@ -758,16 +754,10 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, if (!cache->plane.visible) return; - obj = intel_fb_obj(fb); - - /* FIXME: We lack the proper locking here, so only run this on the - * platforms that need. */ - if (IS_GEN(dev_priv, 5, 6)) - cache->fb.ilk_ggtt_offset = i915_gem_object_ggtt_offset(obj, NULL); cache->fb.pixel_format = fb->pixel_format; cache->fb.stride = fb->pitches[0]; - cache->fb.fence_reg = get_fence_id(fb); - cache->fb.tiling_mode = i915_gem_object_get_tiling(obj); + + cache->vma = plane_state->vma; } static bool intel_fbc_can_activate(struct intel_crtc *crtc) @@ -784,7 +774,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) return false; } - if (!cache->plane.visible) { + if (!cache->vma) { fbc->no_fbc_reason = "primary plane not visible"; return false; } @@ -807,8 +797,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) * so have no fence associated with it) due to aperture constaints * at the time of pinning. */ - if (cache->fb.tiling_mode != I915_TILING_X || - cache->fb.fence_reg == I915_FENCE_REG_NONE) { + if (!cache->vma->fence) { fbc->no_fbc_reason = "framebuffer not tiled or fenced"; return false; } @@ -888,17 +877,16 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc, * zero. */ memset(params, 0, sizeof(*params)); + params->vma = cache->vma; + params->crtc.pipe = crtc->pipe; params->crtc.plane = crtc->plane; params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc); params->fb.pixel_format = cache->fb.pixel_format; params->fb.stride = cache->fb.stride; - params->fb.fence_reg = cache->fb.fence_reg; params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); - - params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset; } static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1, diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index beb0898..f4a8c4f 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -284,7 +284,7 @@ static int intelfb_create(struct drm_fb_helper *helper, out_destroy_fbi: drm_fb_helper_release_fbi(helper); out_unpin: - intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0); + intel_unpin_fb_vma(vma); out_unlock: mutex_unlock(&dev->struct_mutex); return ret; @@ -549,7 +549,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev) if (ifbdev->fb) { mutex_lock(&ifbdev->helper.dev->struct_mutex); - intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0); + intel_unpin_fb_vma(ifbdev->vma); mutex_unlock(&ifbdev->helper.dev->struct_mutex); drm_framebuffer_remove(&ifbdev->fb->base); @@ -742,6 +742,9 @@ void intel_fbdev_initial_config_async(struct drm_device *dev) { struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; + if (!ifbdev) + return; + ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev); } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index d4961fa..beabc17 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -979,18 +979,8 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, uint32_t *batch, uint32_t index) { - struct drm_i915_private *dev_priv = engine->i915; uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES); - /* - * WaDisableLSQCROPERFforOCL:kbl - * This WA is implemented in skl_init_clock_gating() but since - * this batch updates GEN8_L3SQCREG4 with default value we need to - * set this bit here to retain the WA during flush. - */ - if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0)) - l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; - wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT)); wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index aeb637d..91cb4c4 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1095,14 +1095,6 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine) WA_SET_BIT_MASKED(HDC_CHICKEN0, HDC_FENCE_DEST_SLM_DISABLE); - /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes - * involving this register should also be added to WA batch as required. - */ - if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0)) - /* WaDisableLSQCROPERFforOCL:kbl */ - I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | - GEN8_LQSC_RO_PERF_DIS); - /* WaToEnableHwFixForPushConstHWBug:kbl */ if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER)) WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 8f131a0..242a73e 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -273,7 +273,7 @@ skl_update_plane(struct drm_plane *drm_plane, I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl); I915_WRITE(PLANE_SURF(pipe, plane), - intel_fb_gtt_offset(fb, rotation) + surf_addr); + intel_plane_ggtt_offset(plane_state) + surf_addr); POSTING_READ(PLANE_SURF(pipe, plane)); } @@ -458,7 +458,7 @@ vlv_update_plane(struct drm_plane *dplane, I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w); I915_WRITE(SPCNTR(pipe, plane), sprctl); I915_WRITE(SPSURF(pipe, plane), - intel_fb_gtt_offset(fb, rotation) + sprsurf_offset); + intel_plane_ggtt_offset(plane_state) + sprsurf_offset); POSTING_READ(SPSURF(pipe, plane)); } @@ -594,7 +594,7 @@ ivb_update_plane(struct drm_plane *plane, I915_WRITE(SPRSCALE(pipe), sprscale); I915_WRITE(SPRCTL(pipe), sprctl); I915_WRITE(SPRSURF(pipe), - intel_fb_gtt_offset(fb, rotation) + sprsurf_offset); + intel_plane_ggtt_offset(plane_state) + sprsurf_offset); POSTING_READ(SPRSURF(pipe)); } @@ -721,7 +721,7 @@ ilk_update_plane(struct drm_plane *plane, I915_WRITE(DVSSCALE(pipe), dvsscale); I915_WRITE(DVSCNTR(pipe), dvscntr); I915_WRITE(DVSSURF(pipe), - intel_fb_gtt_offset(fb, rotation) + dvssurf_offset); + intel_plane_ggtt_offset(plane_state) + dvssurf_offset); POSTING_READ(DVSSURF(pipe)); } diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 14ff876..686a580 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -345,7 +345,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, { struct adreno_platform_config *config = pdev->dev.platform_data; struct msm_gpu *gpu = &adreno_gpu->base; - struct msm_mmu *mmu; int ret; adreno_gpu->funcs = funcs; @@ -385,8 +384,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, return ret; } - mmu = gpu->aspace->mmu; - if (mmu) { + if (gpu->aspace && gpu->aspace->mmu) { + struct msm_mmu *mmu = gpu->aspace->mmu; ret = mmu->funcs->attach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); if (ret) diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 5f6cd87..c396d45 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -119,13 +119,7 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) { - int i; struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - struct drm_plane *plane; - struct drm_plane_state *plane_state; - - for_each_plane_in_state(state, plane, plane_state, i) - mdp5_plane_complete_commit(plane, plane_state); if (mdp5_kms->smp) mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index 17b0cc1..cdfc63d 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -104,8 +104,6 @@ struct mdp5_plane_state { /* assigned by crtc blender */ enum mdp_mixer_stage_id stage; - - bool pending : 1; }; #define to_mdp5_plane_state(x) \ container_of(x, struct mdp5_plane_state, base) @@ -232,8 +230,6 @@ int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms); void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms); uint32_t mdp5_plane_get_flush(struct drm_plane *plane); -void mdp5_plane_complete_commit(struct drm_plane *plane, - struct drm_plane_state *state); enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index c099da7..25d9d0a 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -179,7 +179,6 @@ mdp5_plane_atomic_print_state(struct drm_printer *p, drm_printf(p, "\tzpos=%u\n", pstate->zpos); drm_printf(p, "\talpha=%u\n", pstate->alpha); drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage)); - drm_printf(p, "\tpending=%u\n", pstate->pending); } static void mdp5_plane_reset(struct drm_plane *plane) @@ -220,8 +219,6 @@ mdp5_plane_duplicate_state(struct drm_plane *plane) if (mdp5_state && mdp5_state->base.fb) drm_framebuffer_reference(mdp5_state->base.fb); - mdp5_state->pending = false; - return &mdp5_state->base; } @@ -288,13 +285,6 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, DBG("%s: check (%d -> %d)", plane->name, plane_enabled(old_state), plane_enabled(state)); - /* We don't allow faster-than-vblank updates.. if we did add this - * some day, we would need to disallow in cases where hwpipe - * changes - */ - if (WARN_ON(to_mdp5_plane_state(old_state)->pending)) - return -EBUSY; - max_width = config->hw->lm.max_width << 16; max_height = config->hw->lm.max_height << 16; @@ -370,12 +360,9 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { struct drm_plane_state *state = plane->state; - struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state); DBG("%s: update", plane->name); - mdp5_state->pending = true; - if (plane_enabled(state)) { int ret; @@ -851,15 +838,6 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane) return pstate->hwpipe->flush_mask; } -/* called after vsync in thread context */ -void mdp5_plane_complete_commit(struct drm_plane *plane, - struct drm_plane_state *state) -{ - struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); - - pstate->pending = false; -} - /* initialize plane */ struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary) { diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index d8bc59c..8098677 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -294,6 +294,8 @@ put_iova(struct drm_gem_object *obj) WARN_ON(!mutex_is_locked(&dev->struct_mutex)); for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { + if (!priv->aspace[id]) + continue; msm_gem_unmap_vma(priv->aspace[id], &msm_obj->domain[id], msm_obj->sgt); } diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c index 74856a8..e64f524 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/hw.c +++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c @@ -222,6 +222,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype) uint32_t mpllP; pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP); + mpllP = (mpllP >> 8) & 0xf; if (!mpllP) mpllP = 4; @@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype) uint32_t clock; pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock); - return clock; + return clock / 1000; } ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals); diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index cef08da..6a15776 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -411,7 +411,8 @@ nouveau_display_init(struct drm_device *dev) return ret; /* enable polling for external displays */ - drm_kms_helper_poll_enable(dev); + if (!dev->mode_config.poll_enabled) + drm_kms_helper_poll_enable(dev); /* enable hotplug interrupts */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 59348fc..bc85a45 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -773,7 +773,10 @@ nouveau_pmops_runtime_resume(struct device *dev) pci_set_master(pdev); ret = nouveau_do_resume(drm_dev, true); - drm_kms_helper_poll_enable(drm_dev); + + if (!drm_dev->mode_config.poll_enabled) + drm_kms_helper_poll_enable(drm_dev); + /* do magic */ nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 8d5ed5b..42c1fa5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -165,6 +165,8 @@ struct nouveau_drm { struct backlight_device *backlight; struct list_head bl_connectors; struct work_struct hpd_work; + struct work_struct fbcon_work; + int fbcon_new_state; #ifdef CONFIG_ACPI struct notifier_block acpi_nb; #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 2f2a3dc..fa2d0a9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -470,19 +470,43 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { .fb_probe = nouveau_fbcon_create, }; +static void +nouveau_fbcon_set_suspend_work(struct work_struct *work) +{ + struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work); + int state = READ_ONCE(drm->fbcon_new_state); + + if (state == FBINFO_STATE_RUNNING) + pm_runtime_get_sync(drm->dev->dev); + + console_lock(); + if (state == FBINFO_STATE_RUNNING) + nouveau_fbcon_accel_restore(drm->dev); + drm_fb_helper_set_suspend(&drm->fbcon->helper, state); + if (state != FBINFO_STATE_RUNNING) + nouveau_fbcon_accel_save_disable(drm->dev); + console_unlock(); + + if (state == FBINFO_STATE_RUNNING) { + pm_runtime_mark_last_busy(drm->dev->dev); + pm_runtime_put_sync(drm->dev->dev); + } +} + void nouveau_fbcon_set_suspend(struct drm_device *dev, int state) { struct nouveau_drm *drm = nouveau_drm(dev); - if (drm->fbcon) { - console_lock(); - if (state == FBINFO_STATE_RUNNING) - nouveau_fbcon_accel_restore(dev); - drm_fb_helper_set_suspend(&drm->fbcon->helper, state); - if (state != FBINFO_STATE_RUNNING) - nouveau_fbcon_accel_save_disable(dev); - console_unlock(); - } + + if (!drm->fbcon) + return; + + drm->fbcon_new_state = state; + /* Since runtime resume can happen as a result of a sysfs operation, + * it's possible we already have the console locked. So handle fbcon + * init/deinit from a seperate work thread + */ + schedule_work(&drm->fbcon_work); } int @@ -502,6 +526,7 @@ nouveau_fbcon_init(struct drm_device *dev) return -ENOMEM; drm->fbcon = fbcon; + INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work); drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs); diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h index ccdce1b..d5e58a3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h @@ -99,6 +99,7 @@ struct nv84_fence_priv { struct nouveau_bo *bo; struct nouveau_bo *bo_gart; u32 *suspend; + struct mutex mutex; }; int nv84_fence_context_new(struct nouveau_channel *); diff --git a/drivers/gpu/drm/nouveau/nouveau_led.h b/drivers/gpu/drm/nouveau/nouveau_led.h index 187ecdb..21a5775 100644 --- a/drivers/gpu/drm/nouveau/nouveau_led.h +++ b/drivers/gpu/drm/nouveau/nouveau_led.h @@ -42,7 +42,7 @@ nouveau_led(struct drm_device *dev) } /* nouveau_led.c */ -#if IS_ENABLED(CONFIG_LEDS_CLASS) +#if IS_REACHABLE(CONFIG_LEDS_CLASS) int nouveau_led_init(struct drm_device *dev); void nouveau_led_suspend(struct drm_device *dev); void nouveau_led_resume(struct drm_device *dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c index 08f9c6f..1fba386 100644 --- a/drivers/gpu/drm/nouveau/nouveau_usif.c +++ b/drivers/gpu/drm/nouveau/nouveau_usif.c @@ -313,7 +313,8 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc) if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) { /* block access to objects not created via this interface */ owner = argv->v0.owner; - if (argv->v0.object == 0ULL) + if (argv->v0.object == 0ULL && + argv->v0.type != NVIF_IOCTL_V0_DEL) argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */ else argv->v0.owner = NVDRM_OBJECT_USIF; diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 2c2c645..32097fd 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -4052,6 +4052,11 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) } } + for_each_crtc_in_state(state, crtc, crtc_state, i) { + if (crtc->state->event) + drm_crtc_vblank_get(crtc); + } + /* Update plane(s). */ for_each_plane_in_state(state, plane, plane_state, i) { struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state); @@ -4101,6 +4106,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) drm_crtc_send_vblank_event(crtc, crtc->state->event); spin_unlock_irqrestore(&crtc->dev->event_lock, flags); crtc->state->event = NULL; + drm_crtc_vblank_put(crtc); } } diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c index 52b87ae..f0b322b 100644 --- a/drivers/gpu/drm/nouveau/nv84_fence.c +++ b/drivers/gpu/drm/nouveau/nv84_fence.c @@ -107,8 +107,10 @@ nv84_fence_context_del(struct nouveau_channel *chan) struct nv84_fence_chan *fctx = chan->fence; nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence); + mutex_lock(&priv->mutex); nouveau_bo_vma_del(priv->bo, &fctx->vma_gart); nouveau_bo_vma_del(priv->bo, &fctx->vma); + mutex_unlock(&priv->mutex); nouveau_fence_context_del(&fctx->base); chan->fence = NULL; nouveau_fence_context_free(&fctx->base); @@ -134,11 +136,13 @@ nv84_fence_context_new(struct nouveau_channel *chan) fctx->base.sync32 = nv84_fence_sync32; fctx->base.sequence = nv84_fence_read(chan); + mutex_lock(&priv->mutex); ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma); if (ret == 0) { ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm, &fctx->vma_gart); } + mutex_unlock(&priv->mutex); if (ret) nv84_fence_context_del(chan); @@ -212,6 +216,8 @@ nv84_fence_create(struct nouveau_drm *drm) priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); priv->base.uevent = true; + mutex_init(&priv->mutex); + /* Use VRAM if there is any ; otherwise fallback to system memory */ domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM : /* diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c index 6f0436d..f8f2f16 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c @@ -59,7 +59,7 @@ gt215_hda_eld(NV50_DISP_MTHD_V1) ); } for (i = 0; i < size; i++) - nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[0]); + nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[i]); for (; i < 0x60; i++) nvkm_wr32(device, 0x61c440 + soff, (i << 8)); nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c index 567466f..0db8efb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c @@ -433,8 +433,6 @@ nv50_disp_dptmds_war(struct nvkm_device *device) case 0x94: case 0x96: case 0x98: - case 0xaa: - case 0xac: return true; default: break; diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 00ea000..30bd4a6 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -97,9 +97,10 @@ * 2.46.0 - Add PFP_SYNC_ME support on evergreen * 2.47.0 - Add UVD_NO_OP register support * 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI + * 2.49.0 - DRM_RADEON_GEM_INFO ioctl returns correct vram_size/visible values */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 48 +#define KMS_DRIVER_MINOR 49 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); @@ -366,11 +367,10 @@ static void radeon_pci_shutdown(struct pci_dev *pdev) { /* if we are running in a VM, make sure the device - * torn down properly on reboot/shutdown. - * unfortunately we can't detect certain - * hypervisors so just do this all the time. + * torn down properly on reboot/shutdown */ - radeon_pci_remove(pdev); + if (radeon_device_is_virtual()) + radeon_pci_remove(pdev); } static int radeon_pmops_suspend(struct device *dev) diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 0bcffd8..96683f5 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -220,8 +220,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, man = &rdev->mman.bdev.man[TTM_PL_VRAM]; - args->vram_size = rdev->mc.real_vram_size; - args->vram_visible = (u64)man->size << PAGE_SHIFT; + args->vram_size = (u64)man->size << PAGE_SHIFT; + args->vram_visible = rdev->mc.visible_vram_size; args->vram_visible -= rdev->vram_pin_size; args->gart_size = rdev->mc.gtt_size; args->gart_size -= rdev->gart_pin_size; diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index e8a38d2..4147768 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -114,6 +114,9 @@ MODULE_FIRMWARE("radeon/hainan_mc.bin"); MODULE_FIRMWARE("radeon/hainan_rlc.bin"); MODULE_FIRMWARE("radeon/hainan_smc.bin"); MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); +MODULE_FIRMWARE("radeon/banks_k_2_smc.bin"); + +MODULE_FIRMWARE("radeon/si58_mc.bin"); static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh); static void si_pcie_gen3_enable(struct radeon_device *rdev); @@ -1650,6 +1653,8 @@ static int si_init_microcode(struct radeon_device *rdev) int err; int new_fw = 0; bool new_smc = false; + bool si58_fw = false; + bool banks2_fw = false; DRM_DEBUG("\n"); @@ -1727,10 +1732,11 @@ static int si_init_microcode(struct radeon_device *rdev) ((rdev->pdev->device == 0x6660) || (rdev->pdev->device == 0x6663) || (rdev->pdev->device == 0x6665) || - (rdev->pdev->device == 0x6667))) || - ((rdev->pdev->revision == 0xc3) && - (rdev->pdev->device == 0x6665))) + (rdev->pdev->device == 0x6667)))) new_smc = true; + else if ((rdev->pdev->revision == 0xc3) && + (rdev->pdev->device == 0x6665)) + banks2_fw = true; new_chip_name = "hainan"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; @@ -1742,6 +1748,10 @@ static int si_init_microcode(struct radeon_device *rdev) default: BUG(); } + /* this memory configuration requires special firmware */ + if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58) + si58_fw = true; + DRM_INFO("Loading %s Microcode\n", new_chip_name); snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name); @@ -1845,7 +1855,10 @@ static int si_init_microcode(struct radeon_device *rdev) } } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name); + if (si58_fw) + snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin"); + else + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name); err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); if (err) { snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); @@ -1876,7 +1889,9 @@ static int si_init_microcode(struct radeon_device *rdev) } } - if (new_smc) + if (banks2_fw) + snprintf(fw_name, sizeof(fw_name), "radeon/banks_k_2_smc.bin"); + else if (new_smc) snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name); else snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name); diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 13ba73f..2944916 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -3008,17 +3008,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, (rdev->pdev->device == 0x6817) || (rdev->pdev->device == 0x6806)) max_mclk = 120000; - } else if (rdev->family == CHIP_OLAND) { - if ((rdev->pdev->revision == 0xC7) || - (rdev->pdev->revision == 0x80) || - (rdev->pdev->revision == 0x81) || - (rdev->pdev->revision == 0x83) || - (rdev->pdev->revision == 0x87) || - (rdev->pdev->device == 0x6604) || - (rdev->pdev->device == 0x6605)) { - max_sclk = 75000; - max_mclk = 80000; - } } else if (rdev->family == CHIP_HAINAN) { if ((rdev->pdev->revision == 0x81) || (rdev->pdev->revision == 0x83) || @@ -3027,7 +3016,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, (rdev->pdev->device == 0x6665) || (rdev->pdev->device == 0x6667)) { max_sclk = 75000; - max_mclk = 80000; } } /* Apply dpm quirks */ diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index a0fd3e6..7aadce1 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -839,7 +839,7 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc, } - __drm_atomic_helper_crtc_destroy_state(state); + drm_atomic_helper_crtc_destroy_state(crtc, state); } static const struct drm_crtc_funcs vc4_crtc_funcs = { diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index db92077..ab30169 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -594,12 +594,14 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec) args->shader_rec_count); struct vc4_bo *bo; - if (uniforms_offset < shader_rec_offset || + if (shader_rec_offset < args->bin_cl_size || + uniforms_offset < shader_rec_offset || exec_size < uniforms_offset || args->shader_rec_count >= (UINT_MAX / sizeof(struct vc4_shader_state)) || temp_size < exec_size) { DRM_ERROR("overflow in exec arguments\n"); + ret = -EINVAL; goto fail; } diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c index 08886a3..5cdd003 100644 --- a/drivers/gpu/drm/vc4/vc4_render_cl.c +++ b/drivers/gpu/drm/vc4/vc4_render_cl.c @@ -461,7 +461,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec, } ret = vc4_full_res_bounds_check(exec, *obj, surf); - if (!ret) + if (ret) return ret; return 0; diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c index dd21f95..cde9f37 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fb.c +++ b/drivers/gpu/drm/virtio/virtgpu_fb.c @@ -331,7 +331,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper, info->fbops = &virtio_gpufb_ops; info->pixmap.flags = FB_PIXMAP_SYSTEM; - info->screen_base = obj->vmap; + info->screen_buffer = obj->vmap; info->screen_size = obj->gem_base.size; drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); drm_fb_helper_fill_var(info, &vfbdev->helper, |