summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.h
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-09-09 14:11:54 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2016-09-09 14:23:05 +0100
commit5590af3e115a9db11c5d6689ddd0d0053be4f4e0 (patch)
tree8ae9701772751b9b0daf25e2db004ad05c84b4d6 /drivers/gpu/drm/i915/intel_ringbuffer.h
parent821ed7df6e2a1dbae243caebcfe21a0a4329fca0 (diff)
downloadop-kernel-dev-5590af3e115a9db11c5d6689ddd0d0053be4f4e0.zip
op-kernel-dev-5590af3e115a9db11c5d6689ddd0d0053be4f4e0.tar.gz
drm/i915: Drive request submission through fence callbacks
Drive final request submission from a callback from the fence. This way the request is queued until all dependencies are resolved, at which point it is handed to the backend for queueing to hardware. At this point, no dependencies are set on the request, so the callback is immediate. A side-effect of imposing a heavier-irqsafe spinlock for execlist submission is that we lose the softirq enabling after scheduling the execlists tasklet. To compensate, we manually kickstart the softirq by disabling and enabling the bh around the fence signaling. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: John Harrison <john.c.harrison@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20160909131201.16673-14-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.h')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h8
1 files changed, 8 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 32f5274..7f64d61 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -226,7 +226,15 @@ struct intel_engine_cs {
#define I915_DISPATCH_PINNED BIT(1)
#define I915_DISPATCH_RS BIT(2)
int (*emit_request)(struct drm_i915_gem_request *req);
+
+ /* Pass the request to the hardware queue (e.g. directly into
+ * the legacy ringbuffer or to the end of an execlist).
+ *
+ * This is called from an atomic context with irqs disabled; must
+ * be irq safe.
+ */
void (*submit_request)(struct drm_i915_gem_request *req);
+
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
OpenPOWER on IntegriCloud