drm/i915/gvt: Add VM healthy check for workload_thread
When a scan error occurs in dispatch_workload, this patch is to check the healthy state and free all the queued workloads before the failsafe mode is entered. Signed-off-by: fred gao <fred.gao@intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
This commit is contained in:
parent
5c56883a95
commit
e011c6ce2b
3 changed files with 16 additions and 1 deletions
|
@ -193,6 +193,10 @@ struct intel_vgpu {
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* validating GM healthy status*/
|
||||||
|
#define vgpu_is_vm_unhealthy(ret_val) \
|
||||||
|
(((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT))
|
||||||
|
|
||||||
struct intel_gvt_gm {
|
struct intel_gvt_gm {
|
||||||
unsigned long vgpu_allocated_low_gm_size;
|
unsigned long vgpu_allocated_low_gm_size;
|
||||||
unsigned long vgpu_allocated_high_gm_size;
|
unsigned long vgpu_allocated_high_gm_size;
|
||||||
|
@ -497,6 +501,7 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
|
||||||
void populate_pvinfo_page(struct intel_vgpu *vgpu);
|
void populate_pvinfo_page(struct intel_vgpu *vgpu);
|
||||||
|
|
||||||
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
|
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
|
||||||
|
void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason);
|
||||||
|
|
||||||
struct intel_gvt_ops {
|
struct intel_gvt_ops {
|
||||||
int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
|
int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
|
||||||
|
@ -519,6 +524,7 @@ struct intel_gvt_ops {
|
||||||
enum {
|
enum {
|
||||||
GVT_FAILSAFE_UNSUPPORTED_GUEST,
|
GVT_FAILSAFE_UNSUPPORTED_GUEST,
|
||||||
GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
|
GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
|
||||||
|
GVT_FAILSAFE_GUEST_ERR,
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
|
static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
|
||||||
|
|
|
@ -157,7 +157,7 @@ static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
|
||||||
(num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
|
(num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
|
||||||
|
|
||||||
|
|
||||||
static void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
|
void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
|
||||||
{
|
{
|
||||||
switch (reason) {
|
switch (reason) {
|
||||||
case GVT_FAILSAFE_UNSUPPORTED_GUEST:
|
case GVT_FAILSAFE_UNSUPPORTED_GUEST:
|
||||||
|
@ -165,6 +165,8 @@ static void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
|
||||||
break;
|
break;
|
||||||
case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
|
case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
|
||||||
pr_err("Graphics resource is not enough for the guest\n");
|
pr_err("Graphics resource is not enough for the guest\n");
|
||||||
|
case GVT_FAILSAFE_GUEST_ERR:
|
||||||
|
pr_err("GVT Internal error for the guest\n");
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -634,6 +634,13 @@ complete:
|
||||||
FORCEWAKE_ALL);
|
FORCEWAKE_ALL);
|
||||||
|
|
||||||
intel_runtime_pm_put(gvt->dev_priv);
|
intel_runtime_pm_put(gvt->dev_priv);
|
||||||
|
if (ret && (vgpu_is_vm_unhealthy(ret))) {
|
||||||
|
mutex_lock(&gvt->lock);
|
||||||
|
intel_vgpu_clean_execlist(vgpu);
|
||||||
|
mutex_unlock(&gvt->lock);
|
||||||
|
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue