From 1aa4051b7f5474cca6009c13868c59d78d06f983 Mon Sep 17 00:00:00 2001 From: Junwei Zhang Date: Wed, 19 Aug 2015 16:24:19 +0800 Subject: drm/amdgpu: modify amdgpu_fence_wait_any() to amdgpu_fence_wait_multiple() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename the function and update the related code with this modified function. Add the new parameter of bool wait_all. If wait_all is true, it will return when all fences are signaled or timeout. If wait_all is false, it will return when any fence is signaled or timeout. Signed-off-by: Junwei Zhang Reviewed-by: Monk Liu Reviewed-by: Jammy Zhou Reviewed-by: Christian König --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 9 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 79 ++++++++++++++++++++++++------- drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c | 3 +- 3 files changed, 69 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index d050f50..4addac5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -440,9 +440,12 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring); int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); -signed long amdgpu_fence_wait_any(struct amdgpu_device *adev, - struct amdgpu_fence **fences, - bool intr, long t); +signed long amdgpu_fence_wait_multiple(struct amdgpu_device *adev, + struct amdgpu_fence **array, + uint32_t count, + bool wait_all, + bool intr, + signed long t); struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence); void amdgpu_fence_unref(struct amdgpu_fence **fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 98500f1..ae014fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -836,13 +836,12 @@ static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence) return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); } -static inline bool amdgpu_test_signaled_any(struct amdgpu_fence **fences) +static bool amdgpu_test_signaled_any(struct amdgpu_fence **fences, uint32_t count) { int idx; struct amdgpu_fence *fence; - idx = 0; - for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) { + for (idx = 0; idx < count; ++idx) { fence = fences[idx]; if (fence) { if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) @@ -852,6 +851,22 @@ static inline bool amdgpu_test_signaled_any(struct amdgpu_fence **fences) return false; } +static bool amdgpu_test_signaled_all(struct amdgpu_fence **fences, uint32_t count) +{ + int idx; + struct amdgpu_fence *fence; + + for (idx = 0; idx < count; ++idx) { + fence = fences[idx]; + if (fence) { + if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) + return false; + } + } + + return true; +} + struct amdgpu_wait_cb { struct fence_cb base; struct task_struct *task; @@ -867,33 +882,56 @@ static void amdgpu_fence_wait_cb(struct fence *fence, struct fence_cb *cb) static signed long amdgpu_fence_default_wait(struct fence *f, bool intr, signed long t) { - struct amdgpu_fence *array[AMDGPU_MAX_RINGS]; struct amdgpu_fence *fence = to_amdgpu_fence(f); struct amdgpu_device *adev = fence->ring->adev; - memset(&array[0], 0, sizeof(array)); - array[0] = fence; - - return amdgpu_fence_wait_any(adev, array, intr, t); + return amdgpu_fence_wait_multiple(adev, &fence, 1, false, intr, t); } -/* wait until any fence in array signaled */ -signed long amdgpu_fence_wait_any(struct amdgpu_device *adev, - struct amdgpu_fence **array, bool intr, signed long t) +/** + * Wait the fence array with timeout + * + * @adev: amdgpu device + * @array: the fence array with amdgpu fence pointer + * @count: the number of the fence array + * @wait_all: the flag of wait all(true) or wait any(false) + * @intr: when sleep, set the current task interruptable or not + * @t: timeout to wait + * + * If wait_all is true, it will return when all fences are signaled or timeout. + * If wait_all is false, it will return when any fence is signaled or timeout. + */ +signed long amdgpu_fence_wait_multiple(struct amdgpu_device *adev, + struct amdgpu_fence **array, + uint32_t count, + bool wait_all, + bool intr, + signed long t) { long idx = 0; - struct amdgpu_wait_cb cb[AMDGPU_MAX_RINGS]; + struct amdgpu_wait_cb *cb; struct amdgpu_fence *fence; BUG_ON(!array); - for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) { + cb = kcalloc(count, sizeof(struct amdgpu_wait_cb), GFP_KERNEL); + if (cb == NULL) { + t = -ENOMEM; + goto err_free_cb; + } + + for (idx = 0; idx < count; ++idx) { fence = array[idx]; if (fence) { cb[idx].task = current; if (fence_add_callback(&fence->base, - &cb[idx].base, amdgpu_fence_wait_cb)) - return t; /* return if fence is already signaled */ + &cb[idx].base, amdgpu_fence_wait_cb)) { + /* The fence is already signaled */ + if (wait_all) + continue; + else + goto fence_rm_cb; + } } } @@ -907,7 +945,9 @@ signed long amdgpu_fence_wait_any(struct amdgpu_device *adev, * amdgpu_test_signaled_any must be called after * set_current_state to prevent a race with wake_up_process */ - if (amdgpu_test_signaled_any(array)) + if (!wait_all && amdgpu_test_signaled_any(array, count)) + break; + if (wait_all && amdgpu_test_signaled_all(array, count)) break; if (adev->needs_reset) { @@ -923,13 +963,16 @@ signed long amdgpu_fence_wait_any(struct amdgpu_device *adev, __set_current_state(TASK_RUNNING); - idx = 0; - for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) { +fence_rm_cb: + for (idx = 0; idx < count; ++idx) { fence = array[idx]; if (fence) fence_remove_callback(&fence->base, &cb[idx].base); } +err_free_cb: + kfree(cb); + return t; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index d6398cf..4597899 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -352,7 +352,8 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev, } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); spin_unlock(&sa_manager->wq.lock); - t = amdgpu_fence_wait_any(adev, fences, false, MAX_SCHEDULE_TIMEOUT); + t = amdgpu_fence_wait_multiple(adev, fences, AMDGPU_MAX_RINGS, false, false, + MAX_SCHEDULE_TIMEOUT); r = (t > 0) ? 0 : t; spin_lock(&sa_manager->wq.lock); /* if we have nothing to wait for block */ -- cgit v1.1