diff options
author | Colin Xu <colin.xu@intel.com> | 2018-05-19 12:28:55 +0800 |
---|---|---|
committer | Zhenyu Wang <zhenyuw@linux.intel.com> | 2018-05-18 12:39:26 +0800 |
commit | 9a512e23f173a3598709b74d6ccf9a6616403967 (patch) | |
tree | 504f3e1a977026690fb550d5d7e5ad029f1636e9 | |
parent | f25a49ab8ab9c1b5587837c8a386b276403f315c (diff) | |
download | op-kernel-dev-9a512e23f173a3598709b74d6ccf9a6616403967.zip op-kernel-dev-9a512e23f173a3598709b74d6ccf9a6616403967.tar.gz |
drm/i915/gvt: Use sched_lock to protect gvt scheduler logic.
The scheduler lock(gvt->sched_lock) is used to protect gvt
scheduler logic, including the gvt scheduler structure(gvt->scheduler
and per vgpu schedule data(vgpu->sched_data, vgpu->sched_ctl).
v9:
- Change commit author since the patches are improved a lot compared
with original version.
Original author: Pei Zhang <pei.zhang@intel.com>
- Rebase to latest gvt-staging.
v8:
- Correct coding wqstyle.
- Rebase to latest gvt-staging.
v7:
- Remove gtt_lock since already proteced by gvt_lock and vgpu_lock.
v6:
- Rebase to latest gvt-staging.
v5:
- Rebase to latest gvt-staging.
v4:
- Rebase to latest gvt-staging.
v3: update to latest code base
Signed-off-by: Pei Zhang <pei.zhang@intel.com>
Signed-off-by: Colin Xu <colin.xu@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gvt.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gvt.h | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/sched_policy.c | 36 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.c | 8 |
4 files changed, 45 insertions, 8 deletions
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 769e06c..22a3ddf 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -376,6 +376,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) idr_init(&gvt->vgpu_idr); spin_lock_init(&gvt->scheduler.mmio_context_lock); mutex_init(&gvt->lock); + mutex_init(&gvt->sched_lock); gvt->dev_priv = dev_priv; init_device_info(gvt); diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 3600553..362e3d5 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -177,6 +177,11 @@ struct intel_vgpu { bool pv_notified; bool failsafe; unsigned int resetting_eng; + + /* Both sched_data and sched_ctl can be seen a part of the global gvt + * scheduler structure. So below 2 vgpu data are protected + * by sched_lock, not vgpu_lock. + */ void *sched_data; struct vgpu_sched_ctl sched_ctl; @@ -299,6 +304,9 @@ struct intel_gvt { * not yet protected by special locks(vgpu and scheduler lock). */ struct mutex lock; + /* scheduler scope lock, protect gvt and vgpu schedule related data */ + struct mutex sched_lock; + struct drm_i915_private *dev_priv; struct idr vgpu_idr; /* vGPU IDR pool */ diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index d053cbe..09d7bb7 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -228,7 +228,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt) struct gvt_sched_data *sched_data = gvt->scheduler.sched_data; ktime_t cur_time; - mutex_lock(&gvt->lock); + mutex_lock(&gvt->sched_lock); cur_time = ktime_get(); if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED, @@ -244,7 +244,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt) vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time); tbs_sched_func(sched_data); - mutex_unlock(&gvt->lock); + mutex_unlock(&gvt->sched_lock); } static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data) @@ -359,39 +359,65 @@ static struct intel_gvt_sched_policy_ops tbs_schedule_ops = { int intel_gvt_init_sched_policy(struct intel_gvt *gvt) { + int ret; + + mutex_lock(&gvt->sched_lock); gvt->scheduler.sched_ops = &tbs_schedule_ops; + ret = gvt->scheduler.sched_ops->init(gvt); + mutex_unlock(&gvt->sched_lock); - return gvt->scheduler.sched_ops->init(gvt); + return ret; } void intel_gvt_clean_sched_policy(struct intel_gvt *gvt) { + mutex_lock(&gvt->sched_lock); gvt->scheduler.sched_ops->clean(gvt); + mutex_unlock(&gvt->sched_lock); } +/* for per-vgpu scheduler policy, there are 2 per-vgpu data: + * sched_data, and sched_ctl. We see these 2 data as part of + * the global scheduler which are proteced by gvt->sched_lock. + * Caller should make their decision if the vgpu_lock should + * be hold outside. + */ + int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu) { - return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu); + int ret; + + mutex_lock(&vgpu->gvt->sched_lock); + ret = vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu); + mutex_unlock(&vgpu->gvt->sched_lock); + + return ret; } void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu) { + mutex_lock(&vgpu->gvt->sched_lock); vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu); + mutex_unlock(&vgpu->gvt->sched_lock); } void intel_vgpu_start_schedule(struct intel_vgpu *vgpu) { struct vgpu_sched_data *vgpu_data = vgpu->sched_data; + mutex_lock(&vgpu->gvt->sched_lock); if (!vgpu_data->active) { gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id); vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu); } + mutex_unlock(&vgpu->gvt->sched_lock); } void intel_gvt_kick_schedule(struct intel_gvt *gvt) { + mutex_lock(&gvt->sched_lock); intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED); + mutex_unlock(&gvt->sched_lock); } void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) @@ -406,6 +432,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); + mutex_lock(&vgpu->gvt->sched_lock); scheduler->sched_ops->stop_schedule(vgpu); if (scheduler->next_vgpu == vgpu) @@ -425,4 +452,5 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) } } spin_unlock_bh(&scheduler->mmio_context_lock); + mutex_unlock(&vgpu->gvt->sched_lock); } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index c0848dc..847f295 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -715,7 +715,7 @@ static struct intel_vgpu_workload *pick_next_workload( struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_vgpu_workload *workload = NULL; - mutex_lock(&gvt->lock); + mutex_lock(&gvt->sched_lock); /* * no current vgpu / will be scheduled out / no workload @@ -761,7 +761,7 @@ static struct intel_vgpu_workload *pick_next_workload( atomic_inc(&workload->vgpu->submission.running_workload_num); out: - mutex_unlock(&gvt->lock); + mutex_unlock(&gvt->sched_lock); return workload; } @@ -862,8 +862,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) struct intel_vgpu_submission *s = &vgpu->submission; int event; - mutex_lock(&gvt->lock); mutex_lock(&vgpu->vgpu_lock); + mutex_lock(&gvt->sched_lock); /* For the workload w/ request, needs to wait for the context * switch to make sure request is completed. @@ -941,8 +941,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) if (gvt->scheduler.need_reschedule) intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED); + mutex_unlock(&gvt->sched_lock); mutex_unlock(&vgpu->vgpu_lock); - mutex_unlock(&gvt->lock); } struct workload_thread_param { |