Browse Source

Merge tag 'gvt-next-fixes-2018-08-14' of https://github.com/intel/gvt-linux into drm-intel-next-fixes

gvt-next-fixes-2018-08-14

- Fix an error code in gvt_dma_map_page() (Dan)
- Fix off by one error in intel_vgpu_write_fence() (Dan)
- Fix potential Spectre v1 (Gustavo)
- Fix workload free in vgpu release (Henry)
- Fix cleanup sequence in intel_gvt_clean_device (Henry)
- dmabuf mutex init place fix (Henry)
- possible memory leak in intel_vgpu_ioctl() err path (Yi)
- return error on cmd access check failure (Yan)

Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
From: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180814073140.GJ22630@zhen-hp.sh.intel.com
Rodrigo Vivi 7 years ago
parent
commit
4795ac626a

+ 1 - 1
drivers/gpu/drm/i915/gvt/aperture_gm.c

@@ -131,7 +131,7 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
 
 	assert_rpm_wakelock_held(dev_priv);
 
-	if (WARN_ON(fence > vgpu_fence_sz(vgpu)))
+	if (WARN_ON(fence >= vgpu_fence_sz(vgpu)))
 		return;
 
 	reg = vgpu->fence.regs[fence];

+ 1 - 1
drivers/gpu/drm/i915/gvt/cmd_parser.c

@@ -874,7 +874,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
 	if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
 		gvt_vgpu_err("%s access to non-render register (%x)\n",
 				cmd, offset);
-		return 0;
+		return -EBADRQC;
 	}
 
 	if (is_shadowed_mmio(offset)) {

+ 7 - 8
drivers/gpu/drm/i915/gvt/gvt.c

@@ -176,6 +176,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
 	.emulate_mmio_write = intel_vgpu_emulate_mmio_write,
 	.vgpu_create = intel_gvt_create_vgpu,
 	.vgpu_destroy = intel_gvt_destroy_vgpu,
+	.vgpu_release = intel_gvt_release_vgpu,
 	.vgpu_reset = intel_gvt_reset_vgpu,
 	.vgpu_activate = intel_gvt_activate_vgpu,
 	.vgpu_deactivate = intel_gvt_deactivate_vgpu,
@@ -315,6 +316,11 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
 	if (WARN_ON(!gvt))
 		return;
 
+	intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
+	intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
+	intel_gvt_cleanup_vgpu_type_groups(gvt);
+	intel_gvt_clean_vgpu_types(gvt);
+
 	intel_gvt_debugfs_clean(gvt);
 	clean_service_thread(gvt);
 	intel_gvt_clean_cmd_parser(gvt);
@@ -322,17 +328,10 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
 	intel_gvt_clean_workload_scheduler(gvt);
 	intel_gvt_clean_gtt(gvt);
 	intel_gvt_clean_irq(gvt);
-	intel_gvt_clean_mmio_info(gvt);
 	intel_gvt_free_firmware(gvt);
-
-	intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
-	intel_gvt_cleanup_vgpu_type_groups(gvt);
-	intel_gvt_clean_vgpu_types(gvt);
-
+	intel_gvt_clean_mmio_info(gvt);
 	idr_destroy(&gvt->vgpu_idr);
 
-	intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
-
 	kfree(dev_priv->gvt);
 	dev_priv->gvt = NULL;
 }

+ 3 - 1
drivers/gpu/drm/i915/gvt/gvt.h

@@ -486,6 +486,7 @@ void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
 					 struct intel_vgpu_type *type);
 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
 void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
 				 unsigned int engine_mask);
 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
@@ -563,7 +564,8 @@ struct intel_gvt_ops {
 				unsigned int);
 	struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
 				struct intel_vgpu_type *);
-	void (*vgpu_destroy)(struct intel_vgpu *);
+	void (*vgpu_destroy)(struct intel_vgpu *vgpu);
+	void (*vgpu_release)(struct intel_vgpu *vgpu);
 	void (*vgpu_reset)(struct intel_vgpu *);
 	void (*vgpu_activate)(struct intel_vgpu *);
 	void (*vgpu_deactivate)(struct intel_vgpu *);

+ 18 - 8
drivers/gpu/drm/i915/gvt/kvmgt.c

@@ -43,6 +43,8 @@
 #include <linux/mdev.h>
 #include <linux/debugfs.h>
 
+#include <linux/nospec.h>
+
 #include "i915_drv.h"
 #include "gvt.h"
 
@@ -187,14 +189,14 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
 
 	/* Setup DMA mapping. */
 	*dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL);
-	ret = dma_mapping_error(dev, *dma_addr);
-	if (ret) {
+	if (dma_mapping_error(dev, *dma_addr)) {
 		gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
 			     page_to_pfn(page), ret);
 		gvt_unpin_guest_page(vgpu, gfn, size);
+		return -ENOMEM;
 	}
 
-	return ret;
+	return 0;
 }
 
 static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
@@ -666,7 +668,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
 	if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
 		return;
 
-	intel_gvt_ops->vgpu_deactivate(vgpu);
+	intel_gvt_ops->vgpu_release(vgpu);
 
 	ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
 					&vgpu->vdev.iommu_notifier);
@@ -1139,7 +1141,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
 	} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
 		struct vfio_region_info info;
 		struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
-		int i, ret;
+		unsigned int i;
+		int ret;
 		struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
 		size_t size;
 		int nr_areas = 1;
@@ -1224,6 +1227,10 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
 				if (info.index >= VFIO_PCI_NUM_REGIONS +
 						vgpu->vdev.num_regions)
 					return -EINVAL;
+				info.index =
+					array_index_nospec(info.index,
+							VFIO_PCI_NUM_REGIONS +
+							vgpu->vdev.num_regions);
 
 				i = info.index - VFIO_PCI_NUM_REGIONS;
 
@@ -1250,11 +1257,13 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
 					&sparse->header, sizeof(*sparse) +
 					(sparse->nr_areas *
 						sizeof(*sparse->areas)));
-				kfree(sparse);
-				if (ret)
+				if (ret) {
+					kfree(sparse);
 					return ret;
+				}
 				break;
 			default:
+				kfree(sparse);
 				return -EINVAL;
 			}
 		}
@@ -1270,6 +1279,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
 						  sizeof(info), caps.buf,
 						  caps.size)) {
 					kfree(caps.buf);
+					kfree(sparse);
 					return -EFAULT;
 				}
 				info.cap_offset = sizeof(info);
@@ -1278,6 +1288,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
 			kfree(caps.buf);
 		}
 
+		kfree(sparse);
 		return copy_to_user((void __user *)arg, &info, minsz) ?
 			-EFAULT : 0;
 	} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
@@ -1615,7 +1626,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
 	kvmgt_protect_table_init(info);
 	gvt_cache_init(vgpu);
 
-	mutex_init(&vgpu->dmabuf_lock);
 	init_completion(&vgpu->vblank_done);
 
 	info->track_node.track_write = kvmgt_page_track_write;

+ 4 - 3
drivers/gpu/drm/i915/gvt/scheduler.c

@@ -784,7 +784,8 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
 	kunmap(page);
 }
 
-static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
+void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
+				unsigned long engine_mask)
 {
 	struct intel_vgpu_submission *s = &vgpu->submission;
 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
@@ -879,7 +880,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 		 * cleaned up during the resetting process later, so doing
 		 * the workload clean up here doesn't have any impact.
 		 **/
-		clean_workloads(vgpu, ENGINE_MASK(ring_id));
+		intel_vgpu_clean_workloads(vgpu, ENGINE_MASK(ring_id));
 	}
 
 	workload->complete(workload);
@@ -1081,7 +1082,7 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
 	if (!s->active)
 		return;
 
-	clean_workloads(vgpu, engine_mask);
+	intel_vgpu_clean_workloads(vgpu, engine_mask);
 	s->ops->reset(vgpu, engine_mask);
 }
 

+ 3 - 0
drivers/gpu/drm/i915/gvt/scheduler.h

@@ -158,4 +158,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
 
 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
 
+void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
+				unsigned long engine_mask);
+
 #endif

+ 21 - 2
drivers/gpu/drm/i915/gvt/vgpu.c

@@ -222,7 +222,7 @@ void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
  * @vgpu: virtual GPU
  *
  * This function is called when user wants to deactivate a virtual GPU.
- * All virtual GPU runtime information will be destroyed.
+ * The virtual GPU will be stopped.
  *
  */
 void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
@@ -238,11 +238,29 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
 	}
 
 	intel_vgpu_stop_schedule(vgpu);
-	intel_vgpu_dmabuf_cleanup(vgpu);
 
 	mutex_unlock(&vgpu->vgpu_lock);
 }
 
+/**
+ * intel_gvt_release_vgpu - release a virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to release a virtual GPU.
+ * The virtual GPU will be stopped and all runtime information will be
+ * destroyed.
+ *
+ */
+void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
+{
+	intel_gvt_deactivate_vgpu(vgpu);
+
+	mutex_lock(&vgpu->vgpu_lock);
+	intel_vgpu_clean_workloads(vgpu, ALL_ENGINES);
+	intel_vgpu_dmabuf_cleanup(vgpu);
+	mutex_unlock(&vgpu->vgpu_lock);
+}
+
 /**
  * intel_gvt_destroy_vgpu - destroy a virtual GPU
  * @vgpu: virtual GPU
@@ -361,6 +379,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
 	vgpu->gvt = gvt;
 	vgpu->sched_ctl.weight = param->weight;
 	mutex_init(&vgpu->vgpu_lock);
+	mutex_init(&vgpu->dmabuf_lock);
 	INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
 	INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
 	idr_init(&vgpu->object_idr);