|
@@ -173,8 +173,8 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
|
|
|
_clear_vgpu_fence(vgpu);
|
|
|
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
|
|
|
reg = vgpu->fence.regs[i];
|
|
|
- list_add_tail(®->link,
|
|
|
- &dev_priv->mm.fence_list);
|
|
|
+ i915_unreserve_fence(reg);
|
|
|
+ vgpu->fence.regs[i] = NULL;
|
|
|
}
|
|
|
mutex_unlock(&dev_priv->drm.struct_mutex);
|
|
|
|
|
@@ -187,24 +187,19 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
|
|
|
struct drm_i915_private *dev_priv = gvt->dev_priv;
|
|
|
struct drm_i915_fence_reg *reg;
|
|
|
int i;
|
|
|
- struct list_head *pos, *q;
|
|
|
|
|
|
intel_runtime_pm_get(dev_priv);
|
|
|
|
|
|
/* Request fences from host */
|
|
|
mutex_lock(&dev_priv->drm.struct_mutex);
|
|
|
- i = 0;
|
|
|
- list_for_each_safe(pos, q, &dev_priv->mm.fence_list) {
|
|
|
- reg = list_entry(pos, struct drm_i915_fence_reg, link);
|
|
|
- if (reg->pin_count || reg->vma)
|
|
|
- continue;
|
|
|
- list_del(pos);
|
|
|
+
|
|
|
+ for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
|
|
|
+ reg = i915_reserve_fence(dev_priv);
|
|
|
+ if (IS_ERR(reg))
|
|
|
+ goto out_free_fence;
|
|
|
+
|
|
|
vgpu->fence.regs[i] = reg;
|
|
|
- if (++i == vgpu_fence_sz(vgpu))
|
|
|
- break;
|
|
|
}
|
|
|
- if (i != vgpu_fence_sz(vgpu))
|
|
|
- goto out_free_fence;
|
|
|
|
|
|
_clear_vgpu_fence(vgpu);
|
|
|
|
|
@@ -212,13 +207,14 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
|
|
|
intel_runtime_pm_put(dev_priv);
|
|
|
return 0;
|
|
|
out_free_fence:
|
|
|
+ gvt_vgpu_err("Failed to alloc fences\n");
|
|
|
/* Return fences to host, if fail */
|
|
|
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
|
|
|
reg = vgpu->fence.regs[i];
|
|
|
if (!reg)
|
|
|
continue;
|
|
|
- list_add_tail(®->link,
|
|
|
- &dev_priv->mm.fence_list);
|
|
|
+ i915_unreserve_fence(reg);
|
|
|
+ vgpu->fence.regs[i] = NULL;
|
|
|
}
|
|
|
mutex_unlock(&dev_priv->drm.struct_mutex);
|
|
|
intel_runtime_pm_put(dev_priv);
|