diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_fence.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_fence.c | 621 |
1 files changed, 371 insertions, 250 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 4bd36a3..11f5f40 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -63,98 +63,82 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring) int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) { - unsigned long irq_flags; - - write_lock_irqsave(&rdev->fence_lock, irq_flags); - if (fence->emitted) { - write_unlock_irqrestore(&rdev->fence_lock, irq_flags); + /* we are protected by the ring emission mutex */ + if (fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) { return 0; } - fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq); - if (!rdev->ring[fence->ring].ready) - /* FIXME: cp is not running assume everythings is done right - * away - */ - radeon_fence_write(rdev, fence->seq, fence->ring); - else - radeon_fence_ring_emit(rdev, fence->ring, fence); - + fence->seq = ++rdev->fence_drv[fence->ring].seq; + radeon_fence_ring_emit(rdev, fence->ring, fence); trace_radeon_fence_emit(rdev->ddev, fence->seq); - fence->emitted = true; - list_move_tail(&fence->list, &rdev->fence_drv[fence->ring].emitted); - write_unlock_irqrestore(&rdev->fence_lock, irq_flags); return 0; } -static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring) +void radeon_fence_process(struct radeon_device *rdev, int ring) { - struct radeon_fence *fence; - struct list_head *i, *n; - uint32_t seq; + uint64_t seq, last_seq; + unsigned count_loop = 0; bool wake = false; - unsigned long cjiffies; - seq = radeon_fence_read(rdev, ring); - if (seq != rdev->fence_drv[ring].last_seq) { - rdev->fence_drv[ring].last_seq = seq; - rdev->fence_drv[ring].last_jiffies = jiffies; - rdev->fence_drv[ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT; - } else { - cjiffies = jiffies; - if (time_after(cjiffies, rdev->fence_drv[ring].last_jiffies)) { - cjiffies -= rdev->fence_drv[ring].last_jiffies; - if (time_after(rdev->fence_drv[ring].last_timeout, cjiffies)) { - /* update the timeout */ - rdev->fence_drv[ring].last_timeout -= cjiffies; - } else { - /* the 500ms timeout is elapsed we should test - * for GPU lockup - */ - rdev->fence_drv[ring].last_timeout = 1; - } - } else { - /* wrap around update last jiffies, we will just wait - * a little longer - */ - rdev->fence_drv[ring].last_jiffies = cjiffies; + /* Note there is a scenario here for an infinite loop but it's + * very unlikely to happen. For it to happen, the current polling + * process need to be interrupted by another process and another + * process needs to update the last_seq btw the atomic read and + * xchg of the current process. + * + * More over for this to go in infinite loop there need to be + * continuously new fence signaled ie radeon_fence_read needs + * to return a different value each time for both the currently + * polling process and the other process that xchg the last_seq + * btw atomic read and xchg of the current process. And the + * value the other process set as last seq must be higher than + * the seq value we just read. Which means that current process + * need to be interrupted after radeon_fence_read and before + * atomic xchg. + * + * To be even more safe we count the number of time we loop and + * we bail after 10 loop just accepting the fact that we might + * have temporarly set the last_seq not to the true real last + * seq but to an older one. + */ + last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq); + do { + seq = radeon_fence_read(rdev, ring); + seq |= last_seq & 0xffffffff00000000LL; + if (seq < last_seq) { + seq += 0x100000000LL; } - return false; - } - n = NULL; - list_for_each(i, &rdev->fence_drv[ring].emitted) { - fence = list_entry(i, struct radeon_fence, list); - if (fence->seq == seq) { - n = i; + + if (seq == last_seq) { break; } - } - /* all fence previous to this one are considered as signaled */ - if (n) { - i = n; - do { - n = i->prev; - list_move_tail(i, &rdev->fence_drv[ring].signaled); - fence = list_entry(i, struct radeon_fence, list); - fence->signaled = true; - i = n; - } while (i != &rdev->fence_drv[ring].emitted); + /* If we loop over we don't want to return without + * checking if a fence is signaled as it means that the + * seq we just read is different from the previous on. + */ wake = true; + last_seq = seq; + if ((count_loop++) > 10) { + /* We looped over too many time leave with the + * fact that we might have set an older fence + * seq then the current real last seq as signaled + * by the hw. + */ + break; + } + } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); + + if (wake) { + rdev->fence_drv[ring].last_activity = jiffies; + wake_up_all(&rdev->fence_queue); } - return wake; } static void radeon_fence_destroy(struct kref *kref) { - unsigned long irq_flags; - struct radeon_fence *fence; + struct radeon_fence *fence; fence = container_of(kref, struct radeon_fence, kref); - write_lock_irqsave(&fence->rdev->fence_lock, irq_flags); - list_del(&fence->list); - fence->emitted = false; - write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags); - if (fence->semaphore) - radeon_semaphore_free(fence->rdev, fence->semaphore); + fence->seq = RADEON_FENCE_NOTEMITED_SEQ; kfree(fence); } @@ -162,171 +146,342 @@ int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence, int ring) { - unsigned long irq_flags; - *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL); if ((*fence) == NULL) { return -ENOMEM; } kref_init(&((*fence)->kref)); (*fence)->rdev = rdev; - (*fence)->emitted = false; - (*fence)->signaled = false; - (*fence)->seq = 0; + (*fence)->seq = RADEON_FENCE_NOTEMITED_SEQ; (*fence)->ring = ring; - (*fence)->semaphore = NULL; - INIT_LIST_HEAD(&(*fence)->list); - - write_lock_irqsave(&rdev->fence_lock, irq_flags); - list_add_tail(&(*fence)->list, &rdev->fence_drv[ring].created); - write_unlock_irqrestore(&rdev->fence_lock, irq_flags); return 0; } -bool radeon_fence_signaled(struct radeon_fence *fence) +static bool radeon_fence_seq_signaled(struct radeon_device *rdev, + u64 seq, unsigned ring) { - unsigned long irq_flags; - bool signaled = false; - - if (!fence) + if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { return true; - - if (fence->rdev->gpu_lockup) + } + /* poll new last sequence at least once */ + radeon_fence_process(rdev, ring); + if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { return true; + } + return false; +} - write_lock_irqsave(&fence->rdev->fence_lock, irq_flags); - signaled = fence->signaled; - /* if we are shuting down report all fence as signaled */ - if (fence->rdev->shutdown) { - signaled = true; +bool radeon_fence_signaled(struct radeon_fence *fence) +{ + if (!fence) { + return true; } - if (!fence->emitted) { + if (fence->seq == RADEON_FENCE_NOTEMITED_SEQ) { WARN(1, "Querying an unemitted fence : %p !\n", fence); - signaled = true; + return true; } - if (!signaled) { - radeon_fence_poll_locked(fence->rdev, fence->ring); - signaled = fence->signaled; + if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) { + return true; + } + if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { + fence->seq = RADEON_FENCE_SIGNALED_SEQ; + return true; } - write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags); - return signaled; + return false; +} + +static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq, + unsigned ring, bool intr, bool lock_ring) +{ + unsigned long timeout, last_activity; + uint64_t seq; + unsigned i; + bool signaled; + int r; + + while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) { + if (!rdev->ring[ring].ready) { + return -EBUSY; + } + + timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT; + if (time_after(rdev->fence_drv[ring].last_activity, timeout)) { + /* the normal case, timeout is somewhere before last_activity */ + timeout = rdev->fence_drv[ring].last_activity - timeout; + } else { + /* either jiffies wrapped around, or no fence was signaled in the last 500ms + * anyway we will just wait for the minimum amount and then check for a lockup + */ + timeout = 1; + } + seq = atomic64_read(&rdev->fence_drv[ring].last_seq); + /* Save current last activity valuee, used to check for GPU lockups */ + last_activity = rdev->fence_drv[ring].last_activity; + + trace_radeon_fence_wait_begin(rdev->ddev, seq); + radeon_irq_kms_sw_irq_get(rdev, ring); + if (intr) { + r = wait_event_interruptible_timeout(rdev->fence_queue, + (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), + timeout); + } else { + r = wait_event_timeout(rdev->fence_queue, + (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), + timeout); + } + radeon_irq_kms_sw_irq_put(rdev, ring); + if (unlikely(r < 0)) { + return r; + } + trace_radeon_fence_wait_end(rdev->ddev, seq); + + if (unlikely(!signaled)) { + /* we were interrupted for some reason and fence + * isn't signaled yet, resume waiting */ + if (r) { + continue; + } + + /* check if sequence value has changed since last_activity */ + if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) { + continue; + } + + if (lock_ring) { + mutex_lock(&rdev->ring_lock); + } + + /* test if somebody else has already decided that this is a lockup */ + if (last_activity != rdev->fence_drv[ring].last_activity) { + if (lock_ring) { + mutex_unlock(&rdev->ring_lock); + } + continue; + } + + if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { + /* good news we believe it's a lockup */ + dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n", + target_seq, seq); + + /* change last activity so nobody else think there is a lockup */ + for (i = 0; i < RADEON_NUM_RINGS; ++i) { + rdev->fence_drv[i].last_activity = jiffies; + } + + /* mark the ring as not ready any more */ + rdev->ring[ring].ready = false; + if (lock_ring) { + mutex_unlock(&rdev->ring_lock); + } + return -EDEADLK; + } + + if (lock_ring) { + mutex_unlock(&rdev->ring_lock); + } + } + } + return 0; } int radeon_fence_wait(struct radeon_fence *fence, bool intr) { - struct radeon_device *rdev; - unsigned long irq_flags, timeout; - u32 seq; int r; if (fence == NULL) { WARN(1, "Querying an invalid fence : %p !\n", fence); - return 0; + return -EINVAL; } - rdev = fence->rdev; - if (radeon_fence_signaled(fence)) { - return 0; + + r = radeon_fence_wait_seq(fence->rdev, fence->seq, + fence->ring, intr, true); + if (r) { + return r; } - timeout = rdev->fence_drv[fence->ring].last_timeout; -retry: - /* save current sequence used to check for GPU lockup */ - seq = rdev->fence_drv[fence->ring].last_seq; - trace_radeon_fence_wait_begin(rdev->ddev, seq); - if (intr) { - radeon_irq_kms_sw_irq_get(rdev, fence->ring); - r = wait_event_interruptible_timeout(rdev->fence_drv[fence->ring].queue, - radeon_fence_signaled(fence), timeout); - radeon_irq_kms_sw_irq_put(rdev, fence->ring); - if (unlikely(r < 0)) { - return r; + fence->seq = RADEON_FENCE_SIGNALED_SEQ; + return 0; +} + +bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) +{ + unsigned i; + + for (i = 0; i < RADEON_NUM_RINGS; ++i) { + if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) { + return true; } - } else { - radeon_irq_kms_sw_irq_get(rdev, fence->ring); - r = wait_event_timeout(rdev->fence_drv[fence->ring].queue, - radeon_fence_signaled(fence), timeout); - radeon_irq_kms_sw_irq_put(rdev, fence->ring); } - trace_radeon_fence_wait_end(rdev->ddev, seq); - if (unlikely(!radeon_fence_signaled(fence))) { - /* we were interrupted for some reason and fence isn't - * isn't signaled yet, resume wait - */ - if (r) { - timeout = r; - goto retry; + return false; +} + +static int radeon_fence_wait_any_seq(struct radeon_device *rdev, + u64 *target_seq, bool intr) +{ + unsigned long timeout, last_activity, tmp; + unsigned i, ring = RADEON_NUM_RINGS; + bool signaled; + int r; + + for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) { + if (!target_seq[i]) { + continue; + } + + /* use the most recent one as indicator */ + if (time_after(rdev->fence_drv[i].last_activity, last_activity)) { + last_activity = rdev->fence_drv[i].last_activity; } - /* don't protect read access to rdev->fence_drv[t].last_seq - * if we experiencing a lockup the value doesn't change + + /* For lockup detection just pick the lowest ring we are + * actively waiting for */ - if (seq == rdev->fence_drv[fence->ring].last_seq && - radeon_gpu_is_lockup(rdev, &rdev->ring[fence->ring])) { - /* good news we believe it's a lockup */ - printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", - fence->seq, seq); - /* FIXME: what should we do ? marking everyone - * as signaled for now + if (i < ring) { + ring = i; + } + } + + /* nothing to wait for ? */ + if (ring == RADEON_NUM_RINGS) { + return 0; + } + + while (!radeon_fence_any_seq_signaled(rdev, target_seq)) { + timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT; + if (time_after(last_activity, timeout)) { + /* the normal case, timeout is somewhere before last_activity */ + timeout = last_activity - timeout; + } else { + /* either jiffies wrapped around, or no fence was signaled in the last 500ms + * anyway we will just wait for the minimum amount and then check for a lockup */ - rdev->gpu_lockup = true; - r = radeon_gpu_reset(rdev); - if (r) - return r; - radeon_fence_write(rdev, fence->seq, fence->ring); - rdev->gpu_lockup = false; + timeout = 1; + } + + trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]); + for (i = 0; i < RADEON_NUM_RINGS; ++i) { + if (target_seq[i]) { + radeon_irq_kms_sw_irq_get(rdev, i); + } + } + if (intr) { + r = wait_event_interruptible_timeout(rdev->fence_queue, + (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)), + timeout); + } else { + r = wait_event_timeout(rdev->fence_queue, + (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)), + timeout); + } + for (i = 0; i < RADEON_NUM_RINGS; ++i) { + if (target_seq[i]) { + radeon_irq_kms_sw_irq_put(rdev, i); + } + } + if (unlikely(r < 0)) { + return r; + } + trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]); + + if (unlikely(!signaled)) { + /* we were interrupted for some reason and fence + * isn't signaled yet, resume waiting */ + if (r) { + continue; + } + + mutex_lock(&rdev->ring_lock); + for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) { + if (time_after(rdev->fence_drv[i].last_activity, tmp)) { + tmp = rdev->fence_drv[i].last_activity; + } + } + /* test if somebody else has already decided that this is a lockup */ + if (last_activity != tmp) { + last_activity = tmp; + mutex_unlock(&rdev->ring_lock); + continue; + } + + if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { + /* good news we believe it's a lockup */ + dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n", + target_seq[ring]); + + /* change last activity so nobody else think there is a lockup */ + for (i = 0; i < RADEON_NUM_RINGS; ++i) { + rdev->fence_drv[i].last_activity = jiffies; + } + + /* mark the ring as not ready any more */ + rdev->ring[ring].ready = false; + mutex_unlock(&rdev->ring_lock); + return -EDEADLK; + } + mutex_unlock(&rdev->ring_lock); } - timeout = RADEON_FENCE_JIFFIES_TIMEOUT; - write_lock_irqsave(&rdev->fence_lock, irq_flags); - rdev->fence_drv[fence->ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT; - rdev->fence_drv[fence->ring].last_jiffies = jiffies; - write_unlock_irqrestore(&rdev->fence_lock, irq_flags); - goto retry; } return 0; } -int radeon_fence_wait_next(struct radeon_device *rdev, int ring) +int radeon_fence_wait_any(struct radeon_device *rdev, + struct radeon_fence **fences, + bool intr) { - unsigned long irq_flags; - struct radeon_fence *fence; + uint64_t seq[RADEON_NUM_RINGS]; + unsigned i; int r; - if (rdev->gpu_lockup) { - return 0; + for (i = 0; i < RADEON_NUM_RINGS; ++i) { + seq[i] = 0; + + if (!fences[i]) { + continue; + } + + if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) { + /* something was allready signaled */ + return 0; + } + + if (fences[i]->seq < RADEON_FENCE_NOTEMITED_SEQ) { + seq[i] = fences[i]->seq; + } } - write_lock_irqsave(&rdev->fence_lock, irq_flags); - if (list_empty(&rdev->fence_drv[ring].emitted)) { - write_unlock_irqrestore(&rdev->fence_lock, irq_flags); - return 0; + + r = radeon_fence_wait_any_seq(rdev, seq, intr); + if (r) { + return r; } - fence = list_entry(rdev->fence_drv[ring].emitted.next, - struct radeon_fence, list); - radeon_fence_ref(fence); - write_unlock_irqrestore(&rdev->fence_lock, irq_flags); - r = radeon_fence_wait(fence, false); - radeon_fence_unref(&fence); - return r; + return 0; } -int radeon_fence_wait_last(struct radeon_device *rdev, int ring) +int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) { - unsigned long irq_flags; - struct radeon_fence *fence; - int r; - - if (rdev->gpu_lockup) { - return 0; + uint64_t seq; + + /* We are not protected by ring lock when reading current seq but + * it's ok as worst case is we return to early while we could have + * wait. + */ + seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; + if (seq >= rdev->fence_drv[ring].seq) { + /* nothing to wait for, last_seq is + already the last emited fence */ + return -ENOENT; } - write_lock_irqsave(&rdev->fence_lock, irq_flags); - if (list_empty(&rdev->fence_drv[ring].emitted)) { - write_unlock_irqrestore(&rdev->fence_lock, irq_flags); - return 0; - } - fence = list_entry(rdev->fence_drv[ring].emitted.prev, - struct radeon_fence, list); - radeon_fence_ref(fence); - write_unlock_irqrestore(&rdev->fence_lock, irq_flags); - r = radeon_fence_wait(fence, false); - radeon_fence_unref(&fence); - return r; + return radeon_fence_wait_seq(rdev, seq, ring, false, false); +} + +int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) +{ + /* We are not protected by ring lock when reading current seq + * but it's ok as wait empty is call from place where no more + * activity can be scheduled so there won't be concurrent access + * to seq value. + */ + return radeon_fence_wait_seq(rdev, rdev->fence_drv[ring].seq, + ring, false, false); } struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) @@ -345,49 +500,27 @@ void radeon_fence_unref(struct radeon_fence **fence) } } -void radeon_fence_process(struct radeon_device *rdev, int ring) -{ - unsigned long irq_flags; - bool wake; - - write_lock_irqsave(&rdev->fence_lock, irq_flags); - wake = radeon_fence_poll_locked(rdev, ring); - write_unlock_irqrestore(&rdev->fence_lock, irq_flags); - if (wake) { - wake_up_all(&rdev->fence_drv[ring].queue); - } -} - -int radeon_fence_count_emitted(struct radeon_device *rdev, int ring) +unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring) { - unsigned long irq_flags; - int not_processed = 0; - - read_lock_irqsave(&rdev->fence_lock, irq_flags); - if (!rdev->fence_drv[ring].initialized) { - read_unlock_irqrestore(&rdev->fence_lock, irq_flags); - return 0; + uint64_t emitted; + + /* We are not protected by ring lock when reading the last sequence + * but it's ok to report slightly wrong fence count here. + */ + radeon_fence_process(rdev, ring); + emitted = rdev->fence_drv[ring].seq - atomic64_read(&rdev->fence_drv[ring].last_seq); + /* to avoid 32bits warp around */ + if (emitted > 0x10000000) { + emitted = 0x10000000; } - - if (!list_empty(&rdev->fence_drv[ring].emitted)) { - struct list_head *ptr; - list_for_each(ptr, &rdev->fence_drv[ring].emitted) { - /* count up to 3, that's enought info */ - if (++not_processed >= 3) - break; - } - } - read_unlock_irqrestore(&rdev->fence_lock, irq_flags); - return not_processed; + return (unsigned)emitted; } int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) { - unsigned long irq_flags; uint64_t index; int r; - write_lock_irqsave(&rdev->fence_lock, irq_flags); radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); if (rdev->wb.use_event) { rdev->fence_drv[ring].scratch_reg = 0; @@ -396,7 +529,6 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg); if (r) { dev_err(rdev->dev, "fence failed to get scratch register\n"); - write_unlock_irqrestore(&rdev->fence_lock, irq_flags); return r; } index = RADEON_WB_SCRATCH_OFFSET + @@ -405,11 +537,10 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) } rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; - radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring); + radeon_fence_write(rdev, rdev->fence_drv[ring].seq, ring); rdev->fence_drv[ring].initialized = true; - DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n", + dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n", ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr); - write_unlock_irqrestore(&rdev->fence_lock, irq_flags); return 0; } @@ -418,24 +549,20 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) rdev->fence_drv[ring].scratch_reg = -1; rdev->fence_drv[ring].cpu_addr = NULL; rdev->fence_drv[ring].gpu_addr = 0; - atomic_set(&rdev->fence_drv[ring].seq, 0); - INIT_LIST_HEAD(&rdev->fence_drv[ring].created); - INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted); - INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled); - init_waitqueue_head(&rdev->fence_drv[ring].queue); + rdev->fence_drv[ring].seq = 0; + atomic64_set(&rdev->fence_drv[ring].last_seq, 0); + rdev->fence_drv[ring].last_activity = jiffies; rdev->fence_drv[ring].initialized = false; } int radeon_fence_driver_init(struct radeon_device *rdev) { - unsigned long irq_flags; int ring; - write_lock_irqsave(&rdev->fence_lock, irq_flags); + init_waitqueue_head(&rdev->fence_queue); for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { radeon_fence_driver_init_ring(rdev, ring); } - write_unlock_irqrestore(&rdev->fence_lock, irq_flags); if (radeon_debugfs_fence_init(rdev)) { dev_err(rdev->dev, "fence debugfs file creation failed\n"); } @@ -444,19 +571,18 @@ int radeon_fence_driver_init(struct radeon_device *rdev) void radeon_fence_driver_fini(struct radeon_device *rdev) { - unsigned long irq_flags; int ring; + mutex_lock(&rdev->ring_lock); for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { if (!rdev->fence_drv[ring].initialized) continue; - radeon_fence_wait_last(rdev, ring); - wake_up_all(&rdev->fence_drv[ring].queue); - write_lock_irqsave(&rdev->fence_lock, irq_flags); + radeon_fence_wait_empty_locked(rdev, ring); + wake_up_all(&rdev->fence_queue); radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); - write_unlock_irqrestore(&rdev->fence_lock, irq_flags); rdev->fence_drv[ring].initialized = false; } + mutex_unlock(&rdev->ring_lock); } @@ -469,7 +595,6 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_device *dev = node->minor->dev; struct radeon_device *rdev = dev->dev_private; - struct radeon_fence *fence; int i; for (i = 0; i < RADEON_NUM_RINGS; ++i) { @@ -477,14 +602,10 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data) continue; seq_printf(m, "--- ring %d ---\n", i); - seq_printf(m, "Last signaled fence 0x%08X\n", - radeon_fence_read(rdev, i)); - if (!list_empty(&rdev->fence_drv[i].emitted)) { - fence = list_entry(rdev->fence_drv[i].emitted.prev, - struct radeon_fence, list); - seq_printf(m, "Last emitted fence %p with 0x%08X\n", - fence, fence->seq); - } + seq_printf(m, "Last signaled fence 0x%016llx\n", + (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq)); + seq_printf(m, "Last emitted 0x%016llx\n", + rdev->fence_drv[i].seq); } return 0; } |