summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gvt/vgpu.c
diff options
context:
space:
mode:
authorZhi Wang <zhi.a.wang@intel.com>2017-03-29 20:48:39 +0300
committerZhenyu Wang <zhenyuw@linux.intel.com>2017-03-30 08:37:53 +0300
commitb79c52aef3cdee903017c1e9834b53996d70010e (patch)
treed17a828c431dc76a2b79dedfdaecfb41ccbf7aff /drivers/gpu/drm/i915/gvt/vgpu.c
parentbc2d4b62db67f817b09c782219996630e9c2f5e2 (diff)
downloadlinux-b79c52aef3cdee903017c1e9834b53996d70010e.tar.xz
drm/i915/gvt: Activate/de-activate vGPU in mdev ops.
This patch introduces two functions for activating/de-activating vGPU in mdev ops. A racing condition was found between virtual vblank emulation and KVGMT mdev release path. V-blank emulation will emulate and inject V-blank interrupt for every active vGPU with holding gvt->lock, while in mdev release path, it will directly release hypervisor handle without changing vGPU status or taking gvt->lock, so a kernel oops is encountered when vblank emulation is injecting a interrupt with a invalid hypervisor handle. (Reported by Terrence) To solve this problem, we factor out vGPU activation/de-activation from vGPU creation/destruction path and let KVMGT mdev release ops de-activate the vGPU before release hypervisor handle. Once a vGPU is de-activated, GVT-g will not emulate v-blank for it or touch the hypervisor handle. Fixes: 659643f ("drm/i915/gvt/kvmgt: add vfio/mdev support to KVMGT") Signed-off-by: Zhi Wang <zhi.a.wang@intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/vgpu.c')
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c43
1 files changed, 38 insertions, 5 deletions
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 41cfa5ccae84..2f5792a1ce38 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -179,20 +179,34 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
}
/**
- * intel_gvt_destroy_vgpu - destroy a virtual GPU
+ * intel_gvt_active_vgpu - activate a virtual GPU
* @vgpu: virtual GPU
*
- * This function is called when user wants to destroy a virtual GPU.
+ * This function is called when user wants to activate a virtual GPU.
*
*/
-void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
+void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
+{
+ mutex_lock(&vgpu->gvt->lock);
+ vgpu->active = true;
+ mutex_unlock(&vgpu->gvt->lock);
+}
+
+/**
+ * intel_gvt_deactive_vgpu - deactivate a virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to deactivate a virtual GPU.
+ * All virtual GPU runtime information will be destroyed.
+ *
+ */
+void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
mutex_lock(&gvt->lock);
vgpu->active = false;
- idr_remove(&gvt->vgpu_idr, vgpu->id);
if (atomic_read(&vgpu->running_workload_num)) {
mutex_unlock(&gvt->lock);
@@ -201,6 +215,26 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
}
intel_vgpu_stop_schedule(vgpu);
+
+ mutex_unlock(&gvt->lock);
+}
+
+/**
+ * intel_gvt_destroy_vgpu - destroy a virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to destroy a virtual GPU.
+ *
+ */
+void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+
+ mutex_lock(&gvt->lock);
+
+ WARN(vgpu->active, "vGPU is still active!\n");
+
+ idr_remove(&gvt->vgpu_idr, vgpu->id);
intel_vgpu_clean_sched_policy(vgpu);
intel_vgpu_clean_gvt_context(vgpu);
intel_vgpu_clean_execlist(vgpu);
@@ -277,7 +311,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_shadow_ctx;
- vgpu->active = true;
mutex_unlock(&gvt->lock);
return vgpu;