Ver Fonte

Merge tag 'drm-intel-next-fixes-2017-02-17' of git://anongit.freedesktop.org/git/drm-intel into drm-next

i915 and GVT fixes for v4.11 merge window

* tag 'drm-intel-next-fixes-2017-02-17' of git://anongit.freedesktop.org/git/drm-intel: (32 commits)
  drm/i915: Fix not finding the VBT when it overlaps with OPREGION_ASLE_EXT
  drm/i915: Pass timeout==0 on to i915_gem_object_wait_fence()
  drm/i915/gvt: Disable access to stolen memory as a guest
  drm/i915: Avoid spurious WARNs about the wrong pipe in the PPS code
  drm/i915: Check for timeout completion when waiting for the rq to submitted
  drm/i915: A hotfix for making aliasing PPGTT work for GVT-g
  drm/i915: Restore context and pd for ringbuffer submission after reset
  drm/i915: Let execlist_update_context() cover !FULL_PPGTT mode.
  drm/i915/lspcon: Fix resume time initialization due to unasserted HPD
  drm/i915/gen9+: Enable hotplug detection early
  drm/i915: Reject set-tiling-ioctl with stride==0 and a tiling mode
  drm/i915: Recreate internal objects with single page segments if dmar fails
  drm/i915/gvt: return error code if dma map iova failed
  drm/i915/gvt: optimize the inhibit context mmio load
  drm/i915/gvt: add sprite plane flip done support.
  drm/i915/gvt: add missing display part reset for vGPU reset
  drm/i915/gvt: Fix shadow context descriptor
  drm/i915/gvt: Fix alignment for GTT allocation
  drm/i915/gvt: fix crash at function release_shadow_wa_ctx
  drm/i915/gvt: enable IOMMU for gvt
  ...
Dave Airlie há 8 anos atrás
pai
commit
601109c5c7

+ 8 - 7
drivers/gpu/drm/i915/gvt/aperture_gm.c

@@ -49,20 +49,21 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
 	if (high_gm) {
 		node = &vgpu->gm.high_gm_node;
 		size = vgpu_hidden_sz(vgpu);
-		start = gvt_hidden_gmadr_base(gvt);
-		end = gvt_hidden_gmadr_end(gvt);
+		start = ALIGN(gvt_hidden_gmadr_base(gvt), I915_GTT_PAGE_SIZE);
+		end = ALIGN(gvt_hidden_gmadr_end(gvt), I915_GTT_PAGE_SIZE);
 		flags = PIN_HIGH;
 	} else {
 		node = &vgpu->gm.low_gm_node;
 		size = vgpu_aperture_sz(vgpu);
-		start = gvt_aperture_gmadr_base(gvt);
-		end = gvt_aperture_gmadr_end(gvt);
+		start = ALIGN(gvt_aperture_gmadr_base(gvt), I915_GTT_PAGE_SIZE);
+		end = ALIGN(gvt_aperture_gmadr_end(gvt), I915_GTT_PAGE_SIZE);
 		flags = PIN_MAPPABLE;
 	}
 
 	mutex_lock(&dev_priv->drm.struct_mutex);
 	ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node,
-				  size, 4096, I915_COLOR_UNEVICTABLE,
+				  size, I915_GTT_PAGE_SIZE,
+				  I915_COLOR_UNEVICTABLE,
 				  start, end, flags);
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 	if (ret)
@@ -254,7 +255,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
 	if (request > avail)
 		goto no_enough_resource;
 
-	vgpu_aperture_sz(vgpu) = request;
+	vgpu_aperture_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE);
 
 	item = "high GM space";
 	max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
@@ -265,7 +266,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
 	if (request > avail)
 		goto no_enough_resource;
 
-	vgpu_hidden_sz(vgpu) = request;
+	vgpu_hidden_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE);
 
 	item = "fence";
 	max = gvt_fence_sz(gvt) - HOST_FENCE;

+ 19 - 1
drivers/gpu/drm/i915/gvt/cmd_parser.c

@@ -1135,6 +1135,8 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
 	u32 dword2 = cmd_val(s, 2);
 	u32 plane = (dword0 & GENMASK(12, 8)) >> 8;
 
+	info->plane = PRIMARY_PLANE;
+
 	switch (plane) {
 	case MI_DISPLAY_FLIP_SKL_PLANE_1_A:
 		info->pipe = PIPE_A;
@@ -1148,12 +1150,28 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
 		info->pipe = PIPE_C;
 		info->event = PRIMARY_C_FLIP_DONE;
 		break;
+
+	case MI_DISPLAY_FLIP_SKL_PLANE_2_A:
+		info->pipe = PIPE_A;
+		info->event = SPRITE_A_FLIP_DONE;
+		info->plane = SPRITE_PLANE;
+		break;
+	case MI_DISPLAY_FLIP_SKL_PLANE_2_B:
+		info->pipe = PIPE_B;
+		info->event = SPRITE_B_FLIP_DONE;
+		info->plane = SPRITE_PLANE;
+		break;
+	case MI_DISPLAY_FLIP_SKL_PLANE_2_C:
+		info->pipe = PIPE_C;
+		info->event = SPRITE_C_FLIP_DONE;
+		info->plane = SPRITE_PLANE;
+		break;
+
 	default:
 		gvt_err("unknown plane code %d\n", plane);
 		return -EINVAL;
 	}
 
-	info->pipe = PRIMARY_PLANE;
 	info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
 	info->tile_val = (dword1 & GENMASK(2, 0));
 	info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;

+ 24 - 7
drivers/gpu/drm/i915/gvt/display.c

@@ -83,7 +83,7 @@ static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
 	return 0;
 }
 
-/* EDID with 1024x768 as its resolution */
+/* EDID with 1920x1200 as its resolution */
 static unsigned char virtual_dp_monitor_edid[] = {
 	/*Header*/
 	0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
@@ -97,11 +97,16 @@ static unsigned char virtual_dp_monitor_edid[] = {
 	0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
 	/* Established Timings: maximum resolution is 1024x768 */
 	0x21, 0x08, 0x00,
-	/* Standard Timings. All invalid */
-	0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00,
-	0x00, 0x40, 0x00, 0x00, 0x00, 0x01,
-	/* 18 Byte Data Blocks 1: invalid */
-	0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0,
+	/*
+	 * Standard Timings.
+	 * below new resolutions can be supported:
+	 * 1920x1080, 1280x720, 1280x960, 1280x1024,
+	 * 1440x900, 1600x1200, 1680x1050
+	 */
+	0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00,
+	0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01,
+	/* 18 Byte Data Blocks 1: max resolution is 1920x1200 */
+	0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
 	0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
 	/* 18 Byte Data Blocks 2: invalid */
 	0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
@@ -115,7 +120,7 @@ static unsigned char virtual_dp_monitor_edid[] = {
 	/* Extension Block Count */
 	0x00,
 	/* Checksum */
-	0xef,
+	0x45,
 };
 
 #define DPCD_HEADER_SIZE        0xb
@@ -328,3 +333,15 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu)
 	else
 		return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B);
 }
+
+/**
+ * intel_vgpu_reset_display- reset vGPU virtual display emulation
+ * @vgpu: a vGPU
+ *
+ * This function is used to reset vGPU virtual display emulation stuffs
+ *
+ */
+void intel_vgpu_reset_display(struct intel_vgpu *vgpu)
+{
+	emulate_monitor_status_change(vgpu);
+}

+ 1 - 0
drivers/gpu/drm/i915/gvt/display.h

@@ -158,6 +158,7 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
 void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
 
 int intel_vgpu_init_display(struct intel_vgpu *vgpu);
+void intel_vgpu_reset_display(struct intel_vgpu *vgpu);
 void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
 
 #endif

+ 1 - 1
drivers/gpu/drm/i915/gvt/execlist.c

@@ -515,7 +515,7 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
 
 static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 {
-	if (wa_ctx->indirect_ctx.size == 0)
+	if (!wa_ctx->indirect_ctx.obj)
 		return;
 
 	i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);

+ 4 - 43
drivers/gpu/drm/i915/gvt/firmware.c

@@ -48,31 +48,6 @@ struct gvt_firmware_header {
 	unsigned char data[1];
 };
 
-#define RD(offset) (readl(mmio + offset.reg))
-#define WR(v, offset) (writel(v, mmio + offset.reg))
-
-static void bdw_forcewake_get(void __iomem *mmio)
-{
-	WR(_MASKED_BIT_DISABLE(0xffff), FORCEWAKE_MT);
-
-	RD(ECOBUS);
-
-	if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL) == 0, 50))
-		gvt_err("fail to wait forcewake idle\n");
-
-	WR(_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL), FORCEWAKE_MT);
-
-	if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL), 50))
-		gvt_err("fail to wait forcewake ack\n");
-
-	if (wait_for((RD(GEN6_GT_THREAD_STATUS_REG) &
-		      GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 50))
-		gvt_err("fail to wait c0 wake up\n");
-}
-
-#undef RD
-#undef WR
-
 #define dev_to_drm_minor(d) dev_get_drvdata((d))
 
 static ssize_t
@@ -91,9 +66,9 @@ static struct bin_attribute firmware_attr = {
 	.mmap = NULL,
 };
 
-static int expose_firmware_sysfs(struct intel_gvt *gvt,
-					void __iomem *mmio)
+static int expose_firmware_sysfs(struct intel_gvt *gvt)
 {
+	struct drm_i915_private *dev_priv = gvt->dev_priv;
 	struct intel_gvt_device_info *info = &gvt->device_info;
 	struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
 	struct intel_gvt_mmio_info *e;
@@ -132,7 +107,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt,
 
 		for (j = 0; j < e->length; j += 4)
 			*(u32 *)(p + e->offset + j) =
-				readl(mmio + e->offset + j);
+				I915_READ_NOTRACE(_MMIO(e->offset + j));
 	}
 
 	memcpy(gvt->firmware.mmio, p, info->mmio_size);
@@ -235,7 +210,6 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
 	struct gvt_firmware_header *h;
 	const struct firmware *fw;
 	char *path;
-	void __iomem *mmio;
 	void *mem;
 	int ret;
 
@@ -260,17 +234,6 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
 
 	firmware->mmio = mem;
 
-	mmio = pci_iomap(pdev, info->mmio_bar, info->mmio_size);
-	if (!mmio) {
-		kfree(path);
-		kfree(firmware->cfg_space);
-		kfree(firmware->mmio);
-		return -EINVAL;
-	}
-
-	if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv))
-		bdw_forcewake_get(mmio);
-
 	sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state",
 		 GVT_FIRMWARE_PATH, pdev->vendor, pdev->device,
 		 pdev->revision);
@@ -300,13 +263,11 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
 
 	release_firmware(fw);
 	firmware->firmware_loaded = true;
-	pci_iounmap(pdev, mmio);
 	return 0;
 
 out_free_fw:
 	release_firmware(fw);
 expose_firmware:
-	expose_firmware_sysfs(gvt, mmio);
-	pci_iounmap(pdev, mmio);
+	expose_firmware_sysfs(gvt);
 	return 0;
 }

+ 50 - 20
drivers/gpu/drm/i915/gvt/gtt.c

@@ -606,21 +606,33 @@ struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
 static inline int init_shadow_page(struct intel_vgpu *vgpu,
 		struct intel_vgpu_shadow_page *p, int type)
 {
+	struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+	dma_addr_t daddr;
+
+	daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(kdev, daddr)) {
+		gvt_err("fail to map dma addr\n");
+		return -EINVAL;
+	}
+
 	p->vaddr = page_address(p->page);
 	p->type = type;
 
 	INIT_HLIST_NODE(&p->node);
 
-	p->mfn = intel_gvt_hypervisor_virt_to_mfn(p->vaddr);
-	if (p->mfn == INTEL_GVT_INVALID_ADDR)
-		return -EFAULT;
-
+	p->mfn = daddr >> GTT_PAGE_SHIFT;
 	hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
 	return 0;
 }
 
-static inline void clean_shadow_page(struct intel_vgpu_shadow_page *p)
+static inline void clean_shadow_page(struct intel_vgpu *vgpu,
+		struct intel_vgpu_shadow_page *p)
 {
+	struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+
+	dma_unmap_page(kdev, p->mfn << GTT_PAGE_SHIFT, 4096,
+			PCI_DMA_BIDIRECTIONAL);
+
 	if (!hlist_unhashed(&p->node))
 		hash_del(&p->node);
 }
@@ -670,7 +682,7 @@ static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
 {
 	trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type);
 
-	clean_shadow_page(&spt->shadow_page);
+	clean_shadow_page(spt->vgpu, &spt->shadow_page);
 	intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page);
 	list_del_init(&spt->post_shadow_list);
 
@@ -1875,8 +1887,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
 	int page_entry_num = GTT_PAGE_SIZE >>
 				vgpu->gvt->device_info.gtt_entry_size_shift;
 	void *scratch_pt;
-	unsigned long mfn;
 	int i;
+	struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+	dma_addr_t daddr;
 
 	if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
 		return -EINVAL;
@@ -1887,16 +1900,18 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
 		return -ENOMEM;
 	}
 
-	mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt);
-	if (mfn == INTEL_GVT_INVALID_ADDR) {
-		gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt);
-		free_page((unsigned long)scratch_pt);
-		return -EFAULT;
+	daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
+			4096, PCI_DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, daddr)) {
+		gvt_err("fail to dmamap scratch_pt\n");
+		__free_page(virt_to_page(scratch_pt));
+		return -ENOMEM;
 	}
-	gtt->scratch_pt[type].page_mfn = mfn;
+	gtt->scratch_pt[type].page_mfn =
+		(unsigned long)(daddr >> GTT_PAGE_SHIFT);
 	gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
 	gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
-			vgpu->id, type, mfn);
+			vgpu->id, type, gtt->scratch_pt[type].page_mfn);
 
 	/* Build the tree by full filled the scratch pt with the entries which
 	 * point to the next level scratch pt or scratch page. The
@@ -1930,9 +1945,14 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
 static int release_scratch_page_tree(struct intel_vgpu *vgpu)
 {
 	int i;
+	struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+	dma_addr_t daddr;
 
 	for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
 		if (vgpu->gtt.scratch_pt[i].page != NULL) {
+			daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
+					GTT_PAGE_SHIFT);
+			dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
 			__free_page(vgpu->gtt.scratch_pt[i].page);
 			vgpu->gtt.scratch_pt[i].page = NULL;
 			vgpu->gtt.scratch_pt[i].page_mfn = 0;
@@ -2192,6 +2212,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
 {
 	int ret;
 	void *page;
+	struct device *dev = &gvt->dev_priv->drm.pdev->dev;
+	dma_addr_t daddr;
 
 	gvt_dbg_core("init gtt\n");
 
@@ -2209,14 +2231,16 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
 		gvt_err("fail to allocate scratch ggtt page\n");
 		return -ENOMEM;
 	}
-	gvt->gtt.scratch_ggtt_page = virt_to_page(page);
 
-	gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page);
-	if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
-		gvt_err("fail to translate scratch ggtt page\n");
-		__free_page(gvt->gtt.scratch_ggtt_page);
-		return -EFAULT;
+	daddr = dma_map_page(dev, virt_to_page(page), 0,
+			4096, PCI_DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, daddr)) {
+		gvt_err("fail to dmamap scratch ggtt page\n");
+		__free_page(virt_to_page(page));
+		return -ENOMEM;
 	}
+	gvt->gtt.scratch_ggtt_page = virt_to_page(page);
+	gvt->gtt.scratch_ggtt_mfn = (unsigned long)(daddr >> GTT_PAGE_SHIFT);
 
 	if (enable_out_of_sync) {
 		ret = setup_spt_oos(gvt);
@@ -2239,6 +2263,12 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
  */
 void intel_gvt_clean_gtt(struct intel_gvt *gvt)
 {
+	struct device *dev = &gvt->dev_priv->drm.pdev->dev;
+	dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_ggtt_mfn <<
+					GTT_PAGE_SHIFT);
+
+	dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+
 	__free_page(gvt->gtt.scratch_ggtt_page);
 
 	if (enable_out_of_sync)

+ 0 - 7
drivers/gpu/drm/i915/gvt/gvt.c

@@ -68,8 +68,6 @@ static const struct intel_gvt_ops intel_gvt_ops = {
  */
 int intel_gvt_init_host(void)
 {
-	int ret;
-
 	if (intel_gvt_host.initialized)
 		return 0;
 
@@ -96,11 +94,6 @@ int intel_gvt_init_host(void)
 	if (!intel_gvt_host.mpt)
 		return -EINVAL;
 
-	/* Try to detect if we're running in host instead of VM. */
-	ret = intel_gvt_hypervisor_detect_host();
-	if (ret)
-		return -ENODEV;
-
 	gvt_dbg_core("Running with hypervisor %s in host mode\n",
 			supported_hypervisors[intel_gvt_host.hypervisor_type]);
 

+ 0 - 1
drivers/gpu/drm/i915/gvt/hypercall.h

@@ -38,7 +38,6 @@
  * both Xen and KVM by providing dedicated hypervisor-related MPT modules.
  */
 struct intel_gvt_mpt {
-	int (*detect_host)(void);
 	int (*host_init)(struct device *dev, void *gvt, const void *ops);
 	void (*host_exit)(struct device *dev, void *gvt);
 	int (*attach_vgpu)(void *vgpu, unsigned long *handle);

+ 15 - 42
drivers/gpu/drm/i915/gvt/interrupt.c

@@ -176,26 +176,15 @@ int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
 {
 	struct intel_gvt *gvt = vgpu->gvt;
 	struct intel_gvt_irq_ops *ops = gvt->irq.ops;
-	u32 changed, masked, unmasked;
 	u32 imr = *(u32 *)p_data;
 
-	gvt_dbg_irq("write IMR %x with val %x\n",
-		reg, imr);
-
-	gvt_dbg_irq("old vIMR %x\n", vgpu_vreg(vgpu, reg));
-
-	/* figure out newly masked/unmasked bits */
-	changed = vgpu_vreg(vgpu, reg) ^ imr;
-	masked = (vgpu_vreg(vgpu, reg) & changed) ^ changed;
-	unmasked = masked ^ changed;
-
-	gvt_dbg_irq("changed %x, masked %x, unmasked %x\n",
-		changed, masked, unmasked);
+	gvt_dbg_irq("write IMR %x, new %08x, old %08x, changed %08x\n",
+		    reg, imr, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ imr);
 
 	vgpu_vreg(vgpu, reg) = imr;
 
 	ops->check_pending_irq(vgpu);
-	gvt_dbg_irq("IRQ: new vIMR %x\n", vgpu_vreg(vgpu, reg));
+
 	return 0;
 }
 
@@ -217,14 +206,11 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
 {
 	struct intel_gvt *gvt = vgpu->gvt;
 	struct intel_gvt_irq_ops *ops = gvt->irq.ops;
-	u32 changed, enabled, disabled;
 	u32 ier = *(u32 *)p_data;
 	u32 virtual_ier = vgpu_vreg(vgpu, reg);
 
-	gvt_dbg_irq("write master irq reg %x with val %x\n",
-		reg, ier);
-
-	gvt_dbg_irq("old vreg %x\n", vgpu_vreg(vgpu, reg));
+	gvt_dbg_irq("write MASTER_IRQ %x, new %08x, old %08x, changed %08x\n",
+		    reg, ier, virtual_ier, virtual_ier ^ ier);
 
 	/*
 	 * GEN8_MASTER_IRQ is a special irq register,
@@ -236,16 +222,8 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
 	vgpu_vreg(vgpu, reg) &= ~GEN8_MASTER_IRQ_CONTROL;
 	vgpu_vreg(vgpu, reg) |= ier;
 
-	/* figure out newly enabled/disable bits */
-	changed = virtual_ier ^ ier;
-	enabled = (virtual_ier & changed) ^ changed;
-	disabled = enabled ^ changed;
-
-	gvt_dbg_irq("changed %x, enabled %x, disabled %x\n",
-			changed, enabled, disabled);
-
 	ops->check_pending_irq(vgpu);
-	gvt_dbg_irq("new vreg %x\n", vgpu_vreg(vgpu, reg));
+
 	return 0;
 }
 
@@ -268,21 +246,11 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
 	struct intel_gvt *gvt = vgpu->gvt;
 	struct intel_gvt_irq_ops *ops = gvt->irq.ops;
 	struct intel_gvt_irq_info *info;
-	u32 changed, enabled, disabled;
 	u32 ier = *(u32 *)p_data;
 
-	gvt_dbg_irq("write IER %x with val %x\n",
-		reg, ier);
-
-	gvt_dbg_irq("old vIER %x\n", vgpu_vreg(vgpu, reg));
+	gvt_dbg_irq("write IER %x, new %08x, old %08x, changed %08x\n",
+		    reg, ier, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ ier);
 
-	/* figure out newly enabled/disable bits */
-	changed = vgpu_vreg(vgpu, reg) ^ ier;
-	enabled = (vgpu_vreg(vgpu, reg) & changed) ^ changed;
-	disabled = enabled ^ changed;
-
-	gvt_dbg_irq("changed %x, enabled %x, disabled %x\n",
-			changed, enabled, disabled);
 	vgpu_vreg(vgpu, reg) = ier;
 
 	info = regbase_to_irq_info(gvt, ier_to_regbase(reg));
@@ -293,7 +261,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
 		update_upstream_irq(vgpu, info);
 
 	ops->check_pending_irq(vgpu);
-	gvt_dbg_irq("new vIER %x\n", vgpu_vreg(vgpu, reg));
+
 	return 0;
 }
 
@@ -317,7 +285,8 @@ int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
 		iir_to_regbase(reg));
 	u32 iir = *(u32 *)p_data;
 
-	gvt_dbg_irq("write IIR %x with val %x\n", reg, iir);
+	gvt_dbg_irq("write IIR %x, new %08x, old %08x, changed %08x\n",
+		    reg, iir, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ iir);
 
 	if (WARN_ON(!info))
 		return -EINVAL;
@@ -619,6 +588,10 @@ static void gen8_init_irq(
 		SET_BIT_INFO(irq, 3, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
 		SET_BIT_INFO(irq, 3, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
 		SET_BIT_INFO(irq, 3, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
+
+		SET_BIT_INFO(irq, 4, SPRITE_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
+		SET_BIT_INFO(irq, 4, SPRITE_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
+		SET_BIT_INFO(irq, 4, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
 	}
 
 	/* GEN8 interrupt PCU events */

+ 52 - 56
drivers/gpu/drm/i915/gvt/kvmgt.c

@@ -77,7 +77,7 @@ struct kvmgt_guest_info {
 struct gvt_dma {
 	struct rb_node node;
 	gfn_t gfn;
-	kvm_pfn_t pfn;
+	unsigned long iova;
 };
 
 static inline bool handle_valid(unsigned long handle)
@@ -89,6 +89,35 @@ static int kvmgt_guest_init(struct mdev_device *mdev);
 static void intel_vgpu_release_work(struct work_struct *work);
 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
 
+static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn,
+		unsigned long *iova)
+{
+	struct page *page;
+	struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+	dma_addr_t daddr;
+
+	page = pfn_to_page(pfn);
+	if (is_error_page(page))
+		return -EFAULT;
+
+	daddr = dma_map_page(dev, page, 0, PAGE_SIZE,
+			PCI_DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, daddr))
+		return -ENOMEM;
+
+	*iova = (unsigned long)(daddr >> PAGE_SHIFT);
+	return 0;
+}
+
+static void gvt_dma_unmap_iova(struct intel_vgpu *vgpu, unsigned long iova)
+{
+	struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+	dma_addr_t daddr;
+
+	daddr = (dma_addr_t)(iova << PAGE_SHIFT);
+	dma_unmap_page(dev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+}
+
 static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
 {
 	struct rb_node *node = vgpu->vdev.cache.rb_node;
@@ -111,21 +140,22 @@ out:
 	return ret;
 }
 
-static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
+static unsigned long gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
 {
 	struct gvt_dma *entry;
-	kvm_pfn_t pfn;
+	unsigned long iova;
 
 	mutex_lock(&vgpu->vdev.cache_lock);
 
 	entry = __gvt_cache_find(vgpu, gfn);
-	pfn = (entry == NULL) ? 0 : entry->pfn;
+	iova = (entry == NULL) ? INTEL_GVT_INVALID_ADDR : entry->iova;
 
 	mutex_unlock(&vgpu->vdev.cache_lock);
-	return pfn;
+	return iova;
 }
 
-static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
+static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
+		unsigned long iova)
 {
 	struct gvt_dma *new, *itr;
 	struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL;
@@ -135,7 +165,7 @@ static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
 		return;
 
 	new->gfn = gfn;
-	new->pfn = pfn;
+	new->iova = iova;
 
 	mutex_lock(&vgpu->vdev.cache_lock);
 	while (*link) {
@@ -182,6 +212,7 @@ static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
 	}
 
 	g1 = gfn;
+	gvt_dma_unmap_iova(vgpu, this->iova);
 	rc = vfio_unpin_pages(dev, &g1, 1);
 	WARN_ON(rc != 1);
 	__gvt_cache_remove_entry(vgpu, this);
@@ -204,6 +235,7 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
 	mutex_lock(&vgpu->vdev.cache_lock);
 	while ((node = rb_first(&vgpu->vdev.cache))) {
 		dma = rb_entry(node, struct gvt_dma, node);
+		gvt_dma_unmap_iova(vgpu, dma->iova);
 		gfn = dma->gfn;
 
 		vfio_unpin_pages(dev, &gfn, 1);
@@ -965,11 +997,6 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
 			sparse->areas[0].offset =
 					PAGE_ALIGN(vgpu_aperture_offset(vgpu));
 			sparse->areas[0].size = vgpu_aperture_sz(vgpu);
-			if (!caps.buf) {
-				kfree(caps.buf);
-				caps.buf = NULL;
-				caps.size = 0;
-			}
 			break;
 
 		case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
@@ -1248,43 +1275,6 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm,
 	spin_unlock(&kvm->mmu_lock);
 }
 
-static bool kvmgt_check_guest(void)
-{
-	unsigned int eax, ebx, ecx, edx;
-	char s[12];
-	unsigned int *i;
-
-	eax = KVM_CPUID_SIGNATURE;
-	ebx = ecx = edx = 0;
-
-	asm volatile ("cpuid"
-		      : "+a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx)
-		      :
-		      : "cc", "memory");
-	i = (unsigned int *)s;
-	i[0] = ebx;
-	i[1] = ecx;
-	i[2] = edx;
-
-	return !strncmp(s, "KVMKVMKVM", strlen("KVMKVMKVM"));
-}
-
-/**
- * NOTE:
- * It's actually impossible to check if we are running in KVM host,
- * since the "KVM host" is simply native. So we only dectect guest here.
- */
-static int kvmgt_detect_host(void)
-{
-#ifdef CONFIG_INTEL_IOMMU
-	if (intel_iommu_gfx_mapped) {
-		gvt_err("Hardware IOMMU compatibility not yet supported, try to boot with intel_iommu=igfx_off\n");
-		return -ENODEV;
-	}
-#endif
-	return kvmgt_check_guest() ? -ENODEV : 0;
-}
-
 static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
 {
 	struct intel_vgpu *itr;
@@ -1390,7 +1380,7 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
 
 static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
 {
-	unsigned long pfn;
+	unsigned long iova, pfn;
 	struct kvmgt_guest_info *info;
 	struct device *dev;
 	int rc;
@@ -1399,9 +1389,9 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
 		return INTEL_GVT_INVALID_ADDR;
 
 	info = (struct kvmgt_guest_info *)handle;
-	pfn = gvt_cache_find(info->vgpu, gfn);
-	if (pfn != 0)
-		return pfn;
+	iova = gvt_cache_find(info->vgpu, gfn);
+	if (iova != INTEL_GVT_INVALID_ADDR)
+		return iova;
 
 	pfn = INTEL_GVT_INVALID_ADDR;
 	dev = mdev_dev(info->vgpu->vdev.mdev);
@@ -1410,9 +1400,16 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
 		gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc);
 		return INTEL_GVT_INVALID_ADDR;
 	}
+	/* transfer to host iova for GFX to use DMA */
+	rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
+	if (rc) {
+		gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
+		vfio_unpin_pages(dev, &gfn, 1);
+		return INTEL_GVT_INVALID_ADDR;
+	}
 
-	gvt_cache_add(info->vgpu, gfn, pfn);
-	return pfn;
+	gvt_cache_add(info->vgpu, gfn, iova);
+	return iova;
 }
 
 static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
@@ -1459,7 +1456,6 @@ static unsigned long kvmgt_virt_to_pfn(void *addr)
 }
 
 struct intel_gvt_mpt kvmgt_mpt = {
-	.detect_host = kvmgt_detect_host,
 	.host_init = kvmgt_host_init,
 	.host_exit = kvmgt_host_exit,
 	.attach_vgpu = kvmgt_attach_vgpu,

+ 0 - 12
drivers/gpu/drm/i915/gvt/mpt.h

@@ -43,18 +43,6 @@
  * hypervisor.
  */
 
-/**
- * intel_gvt_hypervisor_detect_host - check if GVT-g is running within
- * hypervisor host/privilged domain
- *
- * Returns:
- * Zero on success, -ENODEV if current kernel is running inside a VM
- */
-static inline int intel_gvt_hypervisor_detect_host(void)
-{
-	return intel_gvt_host.mpt->detect_host();
-}
-
 /**
  * intel_gvt_hypervisor_host_init - init GVT-g host side
  *

+ 17 - 0
drivers/gpu/drm/i915/gvt/render.c

@@ -236,12 +236,18 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
 	}
 }
 
+#define CTX_CONTEXT_CONTROL_VAL	0x03
+
 void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
 {
 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 	struct render_mmio *mmio;
 	u32 v;
 	int i, array_size;
+	u32 *reg_state = vgpu->shadow_ctx->engine[ring_id].lrc_reg_state;
+	u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
+	u32 inhibit_mask =
+		_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
 
 	if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
 		mmio = gen9_render_mmio_list;
@@ -257,6 +263,17 @@ void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
 			continue;
 
 		mmio->value = I915_READ(mmio->reg);
+
+		/*
+		 * if it is an inhibit context, load in_context mmio
+		 * into HW by mmio write. If it is not, skip this mmio
+		 * write.
+		 */
+		if (mmio->in_context &&
+				((ctx_ctrl & inhibit_mask) != inhibit_mask) &&
+				i915.enable_execlists)
+			continue;
+
 		if (mmio->mask)
 			v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16);
 		else

+ 0 - 1
drivers/gpu/drm/i915/gvt/sched_policy.c

@@ -125,7 +125,6 @@ static void tbs_sched_func(struct work_struct *work)
 		vgpu_data = scheduler->current_vgpu->sched_data;
 		head = &vgpu_data->list;
 	} else {
-		gvt_dbg_sched("no current vgpu search from q head\n");
 		head = &sched_data->runq_head;
 	}
 

+ 3 - 2
drivers/gpu/drm/i915/gvt/scheduler.c

@@ -169,7 +169,8 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
 	gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
 		ring_id, workload);
 
-	shadow_ctx->desc_template = workload->ctx_desc.addressing_mode <<
+	shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
+	shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
 				    GEN8_CTX_ADDRESSING_MODE_SHIFT;
 
 	mutex_lock(&dev_priv->drm.struct_mutex);
@@ -456,7 +457,7 @@ static int workload_thread(void *priv)
 		}
 
 complete:
-		gvt_dbg_sched("will complete workload %p\n, status: %d\n",
+		gvt_dbg_sched("will complete workload %p, status: %d\n",
 				workload, workload->status);
 
 		if (workload->req)

+ 8 - 6
drivers/gpu/drm/i915/gvt/vgpu.c

@@ -74,7 +74,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
 int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
 {
 	unsigned int num_types;
-	unsigned int i, low_avail;
+	unsigned int i, low_avail, high_avail;
 	unsigned int min_low;
 
 	/* vGPU type name is defined as GVTg_Vx_y which contains
@@ -89,9 +89,9 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
 	 * to indicate how many vGPU instance can be created for this
 	 * type.
 	 *
-	 * Currently use static size here as we init type earlier..
 	 */
-	low_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE;
+	low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
+	high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
 	num_types = 4;
 
 	gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
@@ -106,7 +106,8 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
 		gvt->types[i].low_gm_size = min_low;
 		gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U));
 		gvt->types[i].fence = 4;
-		gvt->types[i].max_instance = low_avail / min_low;
+		gvt->types[i].max_instance = min(low_avail / min_low,
+						 high_avail / gvt->types[i].high_gm_size);
 		gvt->types[i].avail_instance = gvt->types[i].max_instance;
 
 		if (IS_GEN8(gvt->dev_priv))
@@ -142,9 +143,9 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
 	/* Need to depend on maxium hw resource size but keep on
 	 * static config for now.
 	 */
-	low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE -
+	low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE -
 		gvt->gm.vgpu_allocated_low_gm_size;
-	high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE -
+	high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE -
 		gvt->gm.vgpu_allocated_high_gm_size;
 	fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
 		gvt->fence.vgpu_allocated_fence_num;
@@ -384,6 +385,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
 		intel_vgpu_reset_resource(vgpu);
 		intel_vgpu_reset_mmio(vgpu);
 		populate_pvinfo_page(vgpu);
+		intel_vgpu_reset_display(vgpu);
 
 		if (dmlr)
 			intel_vgpu_reset_cfg_space(vgpu);

+ 7 - 7
drivers/gpu/drm/i915/i915_drv.c

@@ -824,10 +824,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
 	if (ret < 0)
 		return ret;
 
-	ret = intel_gvt_init(dev_priv);
-	if (ret < 0)
-		goto err_workqueues;
-
 	/* This must be called before any calls to HAS_PCH_* */
 	intel_detect_pch(dev_priv);
 
@@ -841,7 +837,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
 	intel_init_audio_hooks(dev_priv);
 	ret = i915_gem_load_init(dev_priv);
 	if (ret < 0)
-		goto err_gvt;
+		goto err_workqueues;
 
 	intel_display_crc_init(dev_priv);
 
@@ -853,8 +849,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
 
 	return 0;
 
-err_gvt:
-	intel_gvt_cleanup(dev_priv);
 err_workqueues:
 	i915_workqueues_cleanup(dev_priv);
 	return ret;
@@ -1077,6 +1071,10 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 			DRM_DEBUG_DRIVER("can't enable MSI");
 	}
 
+	ret = intel_gvt_init(dev_priv);
+	if (ret)
+		goto out_ggtt;
+
 	return 0;
 
 out_ggtt:
@@ -1290,6 +1288,8 @@ void i915_driver_unload(struct drm_device *dev)
 
 	intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
 
+	intel_gvt_cleanup(dev_priv);
+
 	i915_driver_unregister(dev_priv);
 
 	drm_vblank_cleanup(dev);

+ 9 - 13
drivers/gpu/drm/i915/i915_gem.c

@@ -440,7 +440,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
 			timeout = i915_gem_object_wait_fence(shared[i],
 							     flags, timeout,
 							     rps);
-			if (timeout <= 0)
+			if (timeout < 0)
 				break;
 
 			dma_fence_put(shared[i]);
@@ -453,7 +453,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
 		excl = reservation_object_get_excl_rcu(resv);
 	}
 
-	if (excl && timeout > 0)
+	if (excl && timeout >= 0)
 		timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);
 
 	dma_fence_put(excl);
@@ -2735,21 +2735,17 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
 		engine->irq_seqno_barrier(engine);
 
 	request = i915_gem_find_active_request(engine);
-	if (!request)
-		return;
-
-	if (!i915_gem_reset_request(request))
-		return;
+	if (request && i915_gem_reset_request(request)) {
+		DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
+				 engine->name, request->global_seqno);
 
-	DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
-			 engine->name, request->global_seqno);
+		/* If this context is now banned, skip all pending requests. */
+		if (i915_gem_context_is_banned(request->ctx))
+			engine_skip_context(request);
+	}
 
 	/* Setup the CS to resume from the breadcrumb of the hung request */
 	engine->reset_hw(engine, request);
-
-	/* If this context is now banned, skip all of its pending requests. */
-	if (i915_gem_context_is_banned(request->ctx))
-		engine_skip_context(request);
 }
 
 void i915_gem_reset_finish(struct drm_i915_private *dev_priv)

+ 4 - 3
drivers/gpu/drm/i915/i915_gem_gtt.c

@@ -755,9 +755,10 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
 	GEM_BUG_ON(pte_end > GEN8_PTES);
 
 	bitmap_clear(pt->used_ptes, pte, num_entries);
-
-	if (bitmap_empty(pt->used_ptes, GEN8_PTES))
-		return true;
+	if (USES_FULL_PPGTT(vm->i915)) {
+		if (bitmap_empty(pt->used_ptes, GEN8_PTES))
+			return true;
+	}
 
 	pt_vaddr = kmap_px(pt);
 

+ 23 - 14
drivers/gpu/drm/i915/i915_gem_internal.c

@@ -46,24 +46,12 @@ static struct sg_table *
 i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
-	unsigned int npages = obj->base.size / PAGE_SIZE;
 	struct sg_table *st;
 	struct scatterlist *sg;
+	unsigned int npages;
 	int max_order;
 	gfp_t gfp;
 
-	st = kmalloc(sizeof(*st), GFP_KERNEL);
-	if (!st)
-		return ERR_PTR(-ENOMEM);
-
-	if (sg_alloc_table(st, npages, GFP_KERNEL)) {
-		kfree(st);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	sg = st->sgl;
-	st->nents = 0;
-
 	max_order = MAX_ORDER;
 #ifdef CONFIG_SWIOTLB
 	if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */
@@ -77,6 +65,20 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
 		gfp |= __GFP_DMA32;
 	}
 
+create_st:
+	st = kmalloc(sizeof(*st), GFP_KERNEL);
+	if (!st)
+		return ERR_PTR(-ENOMEM);
+
+	npages = obj->base.size / PAGE_SIZE;
+	if (sg_alloc_table(st, npages, GFP_KERNEL)) {
+		kfree(st);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	sg = st->sgl;
+	st->nents = 0;
+
 	do {
 		int order = min(fls(npages) - 1, max_order);
 		struct page *page;
@@ -104,8 +106,15 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
 		sg = __sg_next(sg);
 	} while (1);
 
-	if (i915_gem_gtt_prepare_pages(obj, st))
+	if (i915_gem_gtt_prepare_pages(obj, st)) {
+		/* Failed to dma-map try again with single page sg segments */
+		if (get_order(st->sgl->length)) {
+			internal_free_pages(st);
+			max_order = 0;
+			goto create_st;
+		}
 		goto err;
+	}
 
 	/* Mark the pages as dontneed whilst they are still pinned. As soon
 	 * as they are unpinned they are allowed to be reaped by the shrinker,

+ 6 - 1
drivers/gpu/drm/i915/i915_gem_request.c

@@ -1025,8 +1025,13 @@ __i915_request_wait_for_execute(struct drm_i915_gem_request *request,
 			break;
 		}
 
+		if (!timeout) {
+			timeout = -ETIME;
+			break;
+		}
+
 		timeout = io_schedule_timeout(timeout);
-	} while (timeout);
+	} while (1);
 	finish_wait(&request->execute.wait, &wait);
 
 	if (flags & I915_WAIT_LOCKED)

+ 5 - 0
drivers/gpu/drm/i915/i915_gem_stolen.c

@@ -405,6 +405,11 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
 
 	mutex_init(&dev_priv->mm.stolen_lock);
 
+	if (intel_vgpu_active(dev_priv)) {
+		DRM_INFO("iGVT-g active, disabling use of stolen memory\n");
+		return 0;
+	}
+
 #ifdef CONFIG_INTEL_IOMMU
 	if (intel_iommu_gfx_mapped && INTEL_GEN(dev_priv) < 8) {
 		DRM_INFO("DMAR active, disabling use of stolen memory\n");

+ 1 - 1
drivers/gpu/drm/i915/i915_gem_tiling.c

@@ -173,7 +173,7 @@ i915_tiling_ok(struct drm_i915_gem_object *obj,
 	else
 		tile_width = 512;
 
-	if (!IS_ALIGNED(stride, tile_width))
+	if (!stride || !IS_ALIGNED(stride, tile_width))
 		return false;
 
 	/* 965+ just needs multiples of tile width */

+ 50 - 19
drivers/gpu/drm/i915/i915_irq.c

@@ -3123,19 +3123,16 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
 }
 
-static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
 {
-	u32 hotplug_irqs, hotplug, enabled_irqs;
-
-	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
-	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
-
-	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+	u32 hotplug;
 
 	/* Enable digital hotplug on the PCH */
 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
-	hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
-		PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
+	hotplug |= PORTA_HOTPLUG_ENABLE |
+		   PORTB_HOTPLUG_ENABLE |
+		   PORTC_HOTPLUG_ENABLE |
+		   PORTD_HOTPLUG_ENABLE;
 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
 
 	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
@@ -3143,6 +3140,18 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
 	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
 }
 
+static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
+{
+	u32 hotplug_irqs, enabled_irqs;
+
+	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
+	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
+
+	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+
+	spt_hpd_detection_setup(dev_priv);
+}
+
 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
 {
 	u32 hotplug_irqs, hotplug, enabled_irqs;
@@ -3177,18 +3186,15 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
 	ibx_hpd_irq_setup(dev_priv);
 }
 
-static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
+				      u32 enabled_irqs)
 {
-	u32 hotplug_irqs, hotplug, enabled_irqs;
-
-	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
-	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
-
-	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
+	u32 hotplug;
 
 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
-	hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
-		PORTA_HOTPLUG_ENABLE;
+	hotplug |= PORTA_HOTPLUG_ENABLE |
+		   PORTB_HOTPLUG_ENABLE |
+		   PORTC_HOTPLUG_ENABLE;
 
 	DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
 		      hotplug, enabled_irqs);
@@ -3198,7 +3204,6 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
 	 * For BXT invert bit has to be set based on AOB design
 	 * for HPD detection logic, update it based on VBT fields.
 	 */
-
 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
 		hotplug |= BXT_DDIA_HPD_INVERT;
@@ -3212,6 +3217,23 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
 }
 
+static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
+{
+	__bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
+}
+
+static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
+{
+	u32 hotplug_irqs, enabled_irqs;
+
+	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
+	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
+
+	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
+
+	__bxt_hpd_detection_setup(dev_priv, enabled_irqs);
+}
+
 static void ibx_irq_postinstall(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -3227,6 +3249,12 @@ static void ibx_irq_postinstall(struct drm_device *dev)
 
 	gen5_assert_iir_is_zero(dev_priv, SDEIIR);
 	I915_WRITE(SDEIMR, ~mask);
+
+	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
+	    HAS_PCH_LPT(dev_priv))
+		; /* TODO: Enable HPD detection on older PCH platforms too */
+	else
+		spt_hpd_detection_setup(dev_priv);
 }
 
 static void gen5_gt_irq_postinstall(struct drm_device *dev)
@@ -3438,6 +3466,9 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
 
 	GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
 	GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
+
+	if (IS_GEN9_LP(dev_priv))
+		bxt_hpd_detection_setup(dev_priv);
 }
 
 static int gen8_irq_postinstall(struct drm_device *dev)

+ 4 - 2
drivers/gpu/drm/i915/i915_reg.h

@@ -3307,8 +3307,10 @@ enum skl_disp_power_wells {
 /*
  * Logical Context regs
  */
-#define CCID			_MMIO(0x2180)
-#define   CCID_EN		(1<<0)
+#define CCID				_MMIO(0x2180)
+#define   CCID_EN			BIT(0)
+#define   CCID_EXTENDED_STATE_RESTORE	BIT(2)
+#define   CCID_EXTENDED_STATE_SAVE	BIT(3)
 /*
  * Notes on SNB/IVB/VLV context size:
  * - Power context is saved elsewhere (LLC or stolen)

+ 5 - 5
drivers/gpu/drm/i915/intel_dp.c

@@ -2887,6 +2887,9 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
 
 	WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
 
+	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+		return;
+
 	edp_panel_vdd_off_sync(intel_dp);
 
 	/*
@@ -2914,9 +2917,6 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
 
 	lockdep_assert_held(&dev_priv->pps_mutex);
 
-	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
-		return;
-
 	for_each_intel_encoder(dev, encoder) {
 		struct intel_dp *intel_dp;
 		enum port port;
@@ -4406,8 +4406,8 @@ static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
  *
  * Return %true if @port is connected, %false otherwise.
  */
-static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
-					 struct intel_digital_port *port)
+bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
+				  struct intel_digital_port *port)
 {
 	if (HAS_PCH_IBX(dev_priv))
 		return ibx_digital_port_connected(dev_priv, port);

+ 2 - 0
drivers/gpu/drm/i915/intel_drv.h

@@ -1485,6 +1485,8 @@ bool __intel_dp_read_desc(struct intel_dp *intel_dp,
 bool intel_dp_read_desc(struct intel_dp *intel_dp);
 int intel_dp_link_required(int pixel_clock, int bpp);
 int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
+bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
+				  struct intel_digital_port *port);
 
 /* intel_dp_aux_backlight.c */
 int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);

+ 5 - 0
drivers/gpu/drm/i915/intel_gvt.c

@@ -67,6 +67,11 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
 		return 0;
 	}
 
+	if (intel_vgpu_active(dev_priv)) {
+		DRM_DEBUG_DRIVER("GVT-g is disabled for guest\n");
+		goto bail;
+	}
+
 	if (!is_supported_device(dev_priv)) {
 		DRM_DEBUG_DRIVER("Unsupported device. GVT-g is disabled\n");
 		goto bail;

+ 17 - 2
drivers/gpu/drm/i915/intel_lrc.c

@@ -360,7 +360,8 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
 static u64 execlists_update_context(struct drm_i915_gem_request *rq)
 {
 	struct intel_context *ce = &rq->ctx->engine[rq->engine->id];
-	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
+	struct i915_hw_ppgtt *ppgtt =
+		rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
 	u32 *reg_state = ce->lrc_reg_state;
 
 	reg_state[CTX_RING_TAIL+1] = rq->tail;
@@ -1389,7 +1390,20 @@ static void reset_common_ring(struct intel_engine_cs *engine,
 {
 	struct drm_i915_private *dev_priv = engine->i915;
 	struct execlist_port *port = engine->execlist_port;
-	struct intel_context *ce = &request->ctx->engine[engine->id];
+	struct intel_context *ce;
+
+	/* If the request was innocent, we leave the request in the ELSP
+	 * and will try to replay it on restarting. The context image may
+	 * have been corrupted by the reset, in which case we may have
+	 * to service a new GPU hang, but more likely we can continue on
+	 * without impact.
+	 *
+	 * If the request was guilty, we presume the context is corrupt
+	 * and have to at least restore the RING register in the context
+	 * image back to the expected values to skip over the guilty request.
+	 */
+	if (!request || request->fence.error != -EIO)
+		return;
 
 	/* We want a simple context + ring to execute the breadcrumb update.
 	 * We cannot rely on the context being intact across the GPU hang,
@@ -1398,6 +1412,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
 	 * future request will be after userspace has had the opportunity
 	 * to recreate its own state.
 	 */
+	ce = &request->ctx->engine[engine->id];
 	execlists_init_reg_state(ce->lrc_reg_state,
 				 request->ctx, engine, ce->ring);
 

+ 4 - 1
drivers/gpu/drm/i915/intel_lspcon.c

@@ -158,6 +158,8 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
 static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
 {
 	struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
 	unsigned long start = jiffies;
 
 	if (!lspcon->desc_valid)
@@ -173,7 +175,8 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
 		if (!__intel_dp_read_desc(intel_dp, &desc))
 			return;
 
-		if (!memcmp(&intel_dp->desc, &desc, sizeof(desc))) {
+		if (intel_digital_port_connected(dev_priv, dig_port) &&
+		    !memcmp(&intel_dp->desc, &desc, sizeof(desc))) {
 			DRM_DEBUG_KMS("LSPCON recovering in PCON mode after %u ms\n",
 				      jiffies_to_msecs(jiffies - start));
 			return;

+ 12 - 1
drivers/gpu/drm/i915/intel_opregion.c

@@ -982,7 +982,18 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
 			opregion->vbt_size = vbt_size;
 		} else {
 			vbt = base + OPREGION_VBT_OFFSET;
-			vbt_size = OPREGION_ASLE_EXT_OFFSET - OPREGION_VBT_OFFSET;
+			/*
+			 * The VBT specification says that if the ASLE ext
+			 * mailbox is not used its area is reserved, but
+			 * on some CHT boards the VBT extends into the
+			 * ASLE ext area. Allow this even though it is
+			 * against the spec, so we do not end up rejecting
+			 * the VBT on those boards (and end up not finding the
+			 * LCD panel because of this).
+			 */
+			vbt_size = (mboxes & MBOX_ASLE_EXT) ?
+				OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE;
+			vbt_size -= OPREGION_VBT_OFFSET;
 			if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
 				DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
 				opregion->vbt = vbt;

+ 55 - 3
drivers/gpu/drm/i915/intel_ringbuffer.c

@@ -599,10 +599,62 @@ out:
 static void reset_ring_common(struct intel_engine_cs *engine,
 			      struct drm_i915_gem_request *request)
 {
-	struct intel_ring *ring = request->ring;
+	/* Try to restore the logical GPU state to match the continuation
+	 * of the request queue. If we skip the context/PD restore, then
+	 * the next request may try to execute assuming that its context
+	 * is valid and loaded on the GPU and so may try to access invalid
+	 * memory, prompting repeated GPU hangs.
+	 *
+	 * If the request was guilty, we still restore the logical state
+	 * in case the next request requires it (e.g. the aliasing ppgtt),
+	 * but skip over the hung batch.
+	 *
+	 * If the request was innocent, we try to replay the request with
+	 * the restored context.
+	 */
+	if (request) {
+		struct drm_i915_private *dev_priv = request->i915;
+		struct intel_context *ce = &request->ctx->engine[engine->id];
+		struct i915_hw_ppgtt *ppgtt;
+
+		/* FIXME consider gen8 reset */
+
+		if (ce->state) {
+			I915_WRITE(CCID,
+				   i915_ggtt_offset(ce->state) |
+				   BIT(8) /* must be set! */ |
+				   CCID_EXTENDED_STATE_SAVE |
+				   CCID_EXTENDED_STATE_RESTORE |
+				   CCID_EN);
+		}
 
-	ring->head = request->postfix;
-	ring->last_retired_head = -1;
+		ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt;
+		if (ppgtt) {
+			u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10;
+
+			I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
+			I915_WRITE(RING_PP_DIR_BASE(engine), pd_offset);
+
+			/* Wait for the PD reload to complete */
+			if (intel_wait_for_register(dev_priv,
+						    RING_PP_DIR_BASE(engine),
+						    BIT(0), 0,
+						    10))
+				DRM_ERROR("Wait for reload of ppgtt page-directory timed out\n");
+
+			ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
+		}
+
+		/* If the rq hung, jump to its breadcrumb and skip the batch */
+		if (request->fence.error == -EIO) {
+			struct intel_ring *ring = request->ring;
+
+			ring->head = request->postfix;
+			ring->last_retired_head = -1;
+		}
+	} else {
+		engine->legacy_active_context = NULL;
+	}
 }
 
 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)