summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2017-09-15 18:31:00 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2017-09-18 10:59:55 +0100
commit27a5f61b377bb62e4813af57fd91636f91ea5755 (patch)
tree6e17b8a3540ad9a9b2342753d59dd67b5a9c35c0 /drivers/gpu/drm/i915/intel_ringbuffer.c
parent309bd8ed464fc08f79152e4a18b1da2b11410842 (diff)
downloadop-kernel-dev-27a5f61b377bb62e4813af57fd91636f91ea5755.zip
op-kernel-dev-27a5f61b377bb62e4813af57fd91636f91ea5755.tar.gz
drm/i915: Cancel all ready but queued requests when wedging
When wedging the hw, we want to mark all in-flight requests as -EIO. This is made slightly more complex by execlists who store the ready but not yet submitted-to-hw requests on a private queue (an rbtree priolist). Call into execlists to cancel not only the ELSP tracking for the submitted requests, but also the queue of unsubmitted requests. v2: Move the majority of engine_set_wedged to the backends (both legacy ringbuffer and execlists handling their own lists). Reported-by: Michał Winiarski <michal.winiarski@intel.com> Testcase: igt/gem_eio/in-flight-contexts Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Michał Winiarski <michal.winiarski@intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20170915173100.26470-1-chris@chris-wilson.co.uk Reviewed-by: Michał Winiarski <michal.winiarski@intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c20
1 files changed, 20 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 22e5ea8..85e64a4 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -782,6 +782,24 @@ static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs)
return cs;
}
+static void cancel_requests(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_request *request;
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+
+ /* Mark all submitted requests as skipped. */
+ list_for_each_entry(request, &engine->timeline->requests, link) {
+ GEM_BUG_ON(!request->global_seqno);
+ if (!i915_gem_request_completed(request))
+ dma_fence_set_error(&request->fence, -EIO);
+ }
+ /* Remaining _unready_ requests will be nop'ed when submitted */
+
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
+}
+
static void i9xx_submit_request(struct drm_i915_gem_request *request)
{
struct drm_i915_private *dev_priv = request->i915;
@@ -1996,11 +2014,13 @@ static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
static void i9xx_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = i9xx_submit_request;
+ engine->cancel_requests = cancel_requests;
}
static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = gen6_bsd_submit_request;
+ engine->cancel_requests = cancel_requests;
}
static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
OpenPOWER on IntegriCloud