summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gvt/scheduler.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/scheduler.c')
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c79
1 files changed, 50 insertions, 29 deletions
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index bada32b..4f7057d 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -69,8 +69,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
workload->ctx_desc.lrca);
- context_page_num = intel_lr_context_size(
- gvt->dev_priv->engine[ring_id]);
+ context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
@@ -139,30 +138,42 @@ static int shadow_context_status_change(struct notifier_block *nb,
struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
shadow_ctx_notifier_block[req->engine->id]);
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
- struct intel_vgpu_workload *workload =
- scheduler->current_workload[req->engine->id];
+ enum intel_engine_id ring_id = req->engine->id;
+ struct intel_vgpu_workload *workload;
+
+ if (!is_gvt_request(req)) {
+ spin_lock_bh(&scheduler->mmio_context_lock);
+ if (action == INTEL_CONTEXT_SCHEDULE_IN &&
+ scheduler->engine_owner[ring_id]) {
+ /* Switch ring from vGPU to host. */
+ intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
+ NULL, ring_id);
+ scheduler->engine_owner[ring_id] = NULL;
+ }
+ spin_unlock_bh(&scheduler->mmio_context_lock);
- if (!is_gvt_request(req) || unlikely(!workload))
+ return NOTIFY_OK;
+ }
+
+ workload = scheduler->current_workload[ring_id];
+ if (unlikely(!workload))
return NOTIFY_OK;
switch (action) {
case INTEL_CONTEXT_SCHEDULE_IN:
- intel_gvt_load_render_mmio(workload->vgpu,
- workload->ring_id);
+ spin_lock_bh(&scheduler->mmio_context_lock);
+ if (workload->vgpu != scheduler->engine_owner[ring_id]) {
+ /* Switch ring from host to vGPU or vGPU to vGPU. */
+ intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
+ workload->vgpu, ring_id);
+ scheduler->engine_owner[ring_id] = workload->vgpu;
+ } else
+ gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
+ ring_id, workload->vgpu->id);
+ spin_unlock_bh(&scheduler->mmio_context_lock);
atomic_set(&workload->shadow_ctx_active, 1);
break;
case INTEL_CONTEXT_SCHEDULE_OUT:
- intel_gvt_restore_render_mmio(workload->vgpu,
- workload->ring_id);
- /* If the status is -EINPROGRESS means this workload
- * doesn't meet any issue during dispatching so when
- * get the SCHEDULE_OUT set the status to be zero for
- * good. If the status is NOT -EINPROGRESS means there
- * is something wrong happened during dispatching and
- * the status should not be set to zero
- */
- if (workload->status == -EINPROGRESS)
- workload->status = 0;
atomic_set(&workload->shadow_ctx_active, 0);
break;
default:
@@ -181,6 +192,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct drm_i915_gem_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_ring *ring;
int ret;
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
@@ -199,8 +211,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
* shadow_ctx pages invalid. So gvt need to pin itself. After update
* the guest context, gvt can unpin the shadow_ctx safely.
*/
- ret = engine->context_pin(engine, shadow_ctx);
- if (ret) {
+ ring = engine->context_pin(engine, shadow_ctx);
+ if (IS_ERR(ring)) {
+ ret = PTR_ERR(ring);
gvt_vgpu_err("fail to pin shadow context\n");
workload->status = ret;
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -330,8 +343,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
workload->ctx_desc.lrca);
- context_page_num = intel_lr_context_size(
- gvt->dev_priv->engine[ring_id]);
+ context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
@@ -406,6 +418,18 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active));
+ /* If this request caused GPU hang, req->fence.error will
+ * be set to -EIO. Use -EIO to set workload status so
+ * that when this request caused GPU hang, didn't trigger
+ * context switch interrupt to guest.
+ */
+ if (likely(workload->status == -EINPROGRESS)) {
+ if (workload->req->fence.error == -EIO)
+ workload->status = -EIO;
+ else
+ workload->status = 0;
+ }
+
i915_gem_request_put(fetch_and_zero(&workload->req));
if (!workload->status && !vgpu->resetting) {
@@ -431,6 +455,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
atomic_dec(&vgpu->running_workload_num);
wake_up(&scheduler->workload_complete_wq);
+
+ if (gvt->scheduler.need_reschedule)
+ intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
+
mutex_unlock(&gvt->lock);
}
@@ -439,8 +467,6 @@ struct workload_thread_param {
int ring_id;
};
-static DEFINE_MUTEX(scheduler_mutex);
-
static int workload_thread(void *priv)
{
struct workload_thread_param *p = (struct workload_thread_param *)priv;
@@ -472,8 +498,6 @@ static int workload_thread(void *priv)
if (!workload)
break;
- mutex_lock(&scheduler_mutex);
-
gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
workload->ring_id, workload,
workload->vgpu->id);
@@ -512,9 +536,6 @@ complete:
FORCEWAKE_ALL);
intel_runtime_pm_put(gvt->dev_priv);
-
- mutex_unlock(&scheduler_mutex);
-
}
return 0;
}
OpenPOWER on IntegriCloud