diff options
author | Changbin Du <changbin.du@intel.com> | 2017-05-04 10:52:38 +0800 |
---|---|---|
committer | Zhenyu Wang <zhenyuw@linux.intel.com> | 2017-06-08 13:59:15 +0800 |
commit | 0e86cc9ccc3bf557348befaddf5cb613cf3c4458 (patch) | |
tree | d4ca241359ce82bafdcb9b65938da9961df6b2c9 /drivers/gpu/drm/i915/gvt/scheduler.c | |
parent | 5d0f5de16ef3d127469aa09dcdf07bec5174937f (diff) | |
download | lwn-0e86cc9ccc3bf557348befaddf5cb613cf3c4458.tar.gz lwn-0e86cc9ccc3bf557348befaddf5cb613cf3c4458.zip |
drm/i915/gvt: implement per-vm mmio switching optimization
Commit ab9da627906a ("drm/i915: make context status notifier head be
per engine") gives us a chance to inspect every single request. Then
we can eliminate unnecessary mmio switching for same vGPU. We only
need mmio switching for different VMs (including host).
This patch introduced a new general API intel_gvt_switch_mmio() to
replace the old intel_gvt_load/restore_render_mmio(). This function
can be further optimized for vGPU to vGPU switching.
To support individual ring switch, we track the owner who occupy
each ring. When another VM or host request a ring we do the mmio
context switching. Otherwise no need to switch the ring.
This optimization is very useful if only one guest has plenty of
workloads and the host is mostly idle. The best case is no mmio
switching will happen.
v2:
o fix missing ring switch issue. (chuanxiao)
o support individual ring switch.
Signed-off-by: Changbin Du <changbin.du@intel.com>
Reviewed-by: Chuanxiao Dong <chuanxiao.dong@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/scheduler.c')
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.c | 35 |
1 files changed, 28 insertions, 7 deletions
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 6ae286cb5804..aa7e06df88b6 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -138,21 +138,42 @@ static int shadow_context_status_change(struct notifier_block *nb, struct intel_gvt *gvt = container_of(nb, struct intel_gvt, shadow_ctx_notifier_block[req->engine->id]); struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; - struct intel_vgpu_workload *workload = - scheduler->current_workload[req->engine->id]; + enum intel_engine_id ring_id = req->engine->id; + struct intel_vgpu_workload *workload; + + if (!is_gvt_request(req)) { + spin_lock_bh(&scheduler->mmio_context_lock); + if (action == INTEL_CONTEXT_SCHEDULE_IN && + scheduler->engine_owner[ring_id]) { + /* Switch ring from vGPU to host. */ + intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], + NULL, ring_id); + scheduler->engine_owner[ring_id] = NULL; + } + spin_unlock_bh(&scheduler->mmio_context_lock); - if (!is_gvt_request(req) || unlikely(!workload)) + return NOTIFY_OK; + } + + workload = scheduler->current_workload[ring_id]; + if (unlikely(!workload)) return NOTIFY_OK; switch (action) { case INTEL_CONTEXT_SCHEDULE_IN: - intel_gvt_load_render_mmio(workload->vgpu, - workload->ring_id); + spin_lock_bh(&scheduler->mmio_context_lock); + if (workload->vgpu != scheduler->engine_owner[ring_id]) { + /* Switch ring from host to vGPU or vGPU to vGPU. */ + intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], + workload->vgpu, ring_id); + scheduler->engine_owner[ring_id] = workload->vgpu; + } else + gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n", + ring_id, workload->vgpu->id); + spin_unlock_bh(&scheduler->mmio_context_lock); atomic_set(&workload->shadow_ctx_active, 1); break; case INTEL_CONTEXT_SCHEDULE_OUT: - intel_gvt_restore_render_mmio(workload->vgpu, - workload->ring_id); /* If the status is -EINPROGRESS means this workload * doesn't meet any issue during dispatching so when * get the SCHEDULE_OUT set the status to be zero for |