summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd
diff options
context:
space:
mode:
authorNicolai Hähnle <nicolai.haehnle@amd.com>2017-03-23 19:34:11 +0100
committerAlex Deucher <alexander.deucher@amd.com>2017-03-29 23:55:32 -0400
commit23e0563e48f7e9e98003df5b43d6a48e162782c6 (patch)
tree29ecc7cfb4a1725370da475d0960720887868dd3 /drivers/gpu/drm/amd
parent2de6a7c52a412985446ee358d8e27b7f3de5e3f3 (diff)
downloadop-kernel-dev-23e0563e48f7e9e98003df5b43d6a48e162782c6.zip
op-kernel-dev-23e0563e48f7e9e98003df5b43d6a48e162782c6.tar.gz
drm/amdgpu: clear freed mappings immediately when BO may be freed
Also, add the fence of the clear operations to the BO to ensure that the underlying memory can only be re-used after all PTEs pointing to it have been cleared. This avoids the following sequence of events that could be triggered by user space: 1. Submit a CS that accesses some BO _without_ adding that BO to the buffer list. 2. Free that BO. 3. Some other task re-uses the memory underlying the BO. 4. The CS is submitted to the hardware and accesses memory that is now already in use by somebody else. By clearing the page tables immediately in step 2, a GPU VM fault will be triggered in step 4 instead of wild memory accesses. v2: use amdgpu_bo_fence directly Signed-off-by: Nicolai Hähnle <nicolai.haehnle@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com> Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c12
1 files changed, 12 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7a37b93..f85520d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -152,6 +152,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct amdgpu_bo_va *bo_va;
+ struct dma_fence *fence = NULL;
int r;
INIT_LIST_HEAD(&list);
@@ -173,6 +174,17 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
if (bo_va) {
if (--bo_va->ref_count == 0) {
amdgpu_vm_bo_rmv(adev, bo_va);
+
+ r = amdgpu_vm_clear_freed(adev, vm, &fence);
+ if (unlikely(r)) {
+ dev_err(adev->dev, "failed to clear page "
+ "tables on GEM object close (%d)\n", r);
+ }
+
+ if (fence) {
+ amdgpu_bo_fence(bo, fence, true);
+ dma_fence_put(fence);
+ }
}
}
ttm_eu_backoff_reservation(&ticket, &list);
OpenPOWER on IntegriCloud