Browse Source

drm/i915/gvt: refine function emulate_mmio_read/write

These 2 functions are coded by multiple person in multiple patches. The
'return' and 'goto err' are mix-used in same place, which cause the
function looks disorder. Unify to use only 'goto' so that the gvt lock
is acquired in one place and released in one place.

Signed-off-by: Pei Zhang <pei.zhang@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Pei Zhang 7 years ago
parent
commit
eb3f05171c
1 changed files with 15 additions and 21 deletions
  1. 15 21
      drivers/gpu/drm/i915/gvt/mmio.c

+ 15 - 21
drivers/gpu/drm/i915/gvt/mmio.c

@@ -157,7 +157,6 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
 	unsigned int offset = 0;
 	unsigned int offset = 0;
 	int ret = -EINVAL;
 	int ret = -EINVAL;
 
 
-
 	if (vgpu->failsafe) {
 	if (vgpu->failsafe) {
 		failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
 		failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
 		return 0;
 		return 0;
@@ -166,8 +165,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
 
 
 	if (vgpu_gpa_is_aperture(vgpu, pa)) {
 	if (vgpu_gpa_is_aperture(vgpu, pa)) {
 		ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, true);
 		ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, true);
-		mutex_unlock(&gvt->lock);
-		return ret;
+		goto out;
 	}
 	}
 
 
 	if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
 	if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
@@ -183,8 +181,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
 					ret, t->gfn, pa, *(u32 *)p_data,
 					ret, t->gfn, pa, *(u32 *)p_data,
 					bytes);
 					bytes);
 			}
 			}
-			mutex_unlock(&gvt->lock);
-			return ret;
+			goto out;
 		}
 		}
 	}
 	}
 
 
@@ -205,14 +202,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
 				p_data, bytes);
 				p_data, bytes);
 		if (ret)
 		if (ret)
 			goto err;
 			goto err;
-		mutex_unlock(&gvt->lock);
-		return ret;
+		goto out;
 	}
 	}
 
 
 	if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
 	if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
 		ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
 		ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
-		mutex_unlock(&gvt->lock);
-		return ret;
+		goto out;
 	}
 	}
 
 
 	if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
 	if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
@@ -228,11 +223,13 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
 		goto err;
 		goto err;
 
 
 	intel_gvt_mmio_set_accessed(gvt, offset);
 	intel_gvt_mmio_set_accessed(gvt, offset);
-	mutex_unlock(&gvt->lock);
-	return 0;
+	ret = 0;
+	goto out;
+
 err:
 err:
 	gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
 	gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
 			offset, bytes);
 			offset, bytes);
+out:
 	mutex_unlock(&gvt->lock);
 	mutex_unlock(&gvt->lock);
 	return ret;
 	return ret;
 }
 }
@@ -263,8 +260,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
 
 
 	if (vgpu_gpa_is_aperture(vgpu, pa)) {
 	if (vgpu_gpa_is_aperture(vgpu, pa)) {
 		ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, false);
 		ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, false);
-		mutex_unlock(&gvt->lock);
-		return ret;
+		goto out;
 	}
 	}
 
 
 	if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
 	if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
@@ -280,8 +276,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
 					ret, t->gfn, pa,
 					ret, t->gfn, pa,
 					*(u32 *)p_data, bytes);
 					*(u32 *)p_data, bytes);
 			}
 			}
-			mutex_unlock(&gvt->lock);
-			return ret;
+			goto out;
 		}
 		}
 	}
 	}
 
 
@@ -302,14 +297,12 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
 				p_data, bytes);
 				p_data, bytes);
 		if (ret)
 		if (ret)
 			goto err;
 			goto err;
-		mutex_unlock(&gvt->lock);
-		return ret;
+		goto out;
 	}
 	}
 
 
 	if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
 	if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
 		ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
 		ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
-		mutex_unlock(&gvt->lock);
-		return ret;
+		goto out;
 	}
 	}
 
 
 	ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, false);
 	ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, false);
@@ -317,11 +310,12 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
 		goto err;
 		goto err;
 
 
 	intel_gvt_mmio_set_accessed(gvt, offset);
 	intel_gvt_mmio_set_accessed(gvt, offset);
-	mutex_unlock(&gvt->lock);
-	return 0;
+	ret = 0;
+	goto out;
 err:
 err:
 	gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
 	gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
 		     bytes);
 		     bytes);
+out:
 	mutex_unlock(&gvt->lock);
 	mutex_unlock(&gvt->lock);
 	return ret;
 	return ret;
 }
 }