Browse Source

Merge tag 'gvt-next-2017-11-16' of https://github.com/intel/gvt-linux into drm-intel-next-queued

gvt-next-2017-11-16

- CSB HWSP update support (Weinan)
- GVT debug helpers, dyndbg and debugfs (Chuanxiao, Shuo)
- full virtualized opregion (Xiaolin)
- VM health check for sane fallback (Fred)
- workload submission code refactor for future enabling (Zhi)
- Updated repo URL in MAINTAINERS (Zhenyu)
- other many misc fixes

Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171116092007.ww5bvfx7rf36bjmn@zhen-hp.sh.intel.com
Rodrigo Vivi 7 years ago
parent
commit
3dd435ef69

+ 1 - 1
MAINTAINERS

@@ -6928,7 +6928,7 @@ M:	Zhi Wang <zhi.a.wang@intel.com>
 L:	intel-gvt-dev@lists.freedesktop.org
 L:	intel-gfx@lists.freedesktop.org
 W:	https://01.org/igvt-g
-T:	git https://github.com/01org/gvt-linux.git
+T:	git https://github.com/intel/gvt-linux.git
 S:	Supported
 F:	drivers/gpu/drm/i915/gvt/
 

+ 1 - 1
drivers/gpu/drm/i915/gvt/Makefile

@@ -1,7 +1,7 @@
 GVT_DIR := gvt
 GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
 	interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
-	execlist.o scheduler.o sched_policy.o render.o cmd_parser.o
+	execlist.o scheduler.o sched_policy.o render.o cmd_parser.o debugfs.o
 
 ccflags-y				+= -I$(src) -I$(src)/$(GVT_DIR)
 i915-y					+= $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))

+ 21 - 0
drivers/gpu/drm/i915/gvt/cfg_space.c

@@ -208,6 +208,20 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
 	return 0;
 }
 
+static int emulate_pci_rom_bar_write(struct intel_vgpu *vgpu,
+	unsigned int offset, void *p_data, unsigned int bytes)
+{
+	u32 *pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
+	u32 new = *(u32 *)(p_data);
+
+	if ((new & PCI_ROM_ADDRESS_MASK) == PCI_ROM_ADDRESS_MASK)
+		/* We don't have rom, return size of 0. */
+		*pval = 0;
+	else
+		vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
+	return 0;
+}
+
 static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
 	void *p_data, unsigned int bytes)
 {
@@ -300,6 +314,11 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
 	}
 
 	switch (rounddown(offset, 4)) {
+	case PCI_ROM_ADDRESS:
+		if (WARN_ON(!IS_ALIGNED(offset, 4)))
+			return -EINVAL;
+		return emulate_pci_rom_bar_write(vgpu, offset, p_data, bytes);
+
 	case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
 		if (WARN_ON(!IS_ALIGNED(offset, 4)))
 			return -EINVAL;
@@ -375,6 +394,8 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
 				pci_resource_len(gvt->dev_priv->drm.pdev, 0);
 	vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
 				pci_resource_len(gvt->dev_priv->drm.pdev, 2);
+
+	memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
 }
 
 /**

+ 124 - 101
drivers/gpu/drm/i915/gvt/cmd_parser.c

@@ -709,18 +709,13 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
 
 	print_opcode(cmd_val(s, 0), s->ring_id);
 
-	/* print the whole page to trace */
-	pr_err("    ip_va=%p: %08x %08x %08x %08x\n",
-			s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
-			cmd_val(s, 2), cmd_val(s, 3));
-
 	s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
 
 	while (cnt < 1024) {
-		pr_err("ip_va=%p: ", s->ip_va);
+		gvt_dbg_cmd("ip_va=%p: ", s->ip_va);
 		for (i = 0; i < 8; i++)
-			pr_err("%08x ", cmd_val(s, i));
-		pr_err("\n");
+			gvt_dbg_cmd("%08x ", cmd_val(s, i));
+		gvt_dbg_cmd("\n");
 
 		s->ip_va += 8 * sizeof(u32);
 		cnt += 8;
@@ -825,7 +820,7 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s,
 	if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) {
 		gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
 			offset, data);
-		return -EINVAL;
+		return -EPERM;
 	}
 	return 0;
 }
@@ -839,7 +834,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
 	if (offset + 4 > gvt->device_info.mmio_size) {
 		gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
 				cmd, offset);
-		return -EINVAL;
+		return -EFAULT;
 	}
 
 	if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
@@ -854,8 +849,8 @@ static int cmd_reg_handler(struct parser_exec_state *s,
 	}
 
 	if (is_force_nonpriv_mmio(offset) &&
-	    force_nonpriv_reg_handler(s, offset, index))
-		return -EINVAL;
+		force_nonpriv_reg_handler(s, offset, index))
+		return -EPERM;
 
 	if (offset == i915_mmio_reg_offset(DERRMR) ||
 		offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
@@ -894,11 +889,14 @@ static int cmd_handler_lri(struct parser_exec_state *s)
 					i915_mmio_reg_offset(DERRMR))
 				ret |= 0;
 			else
-				ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
+				ret |= (cmd_reg_inhibit(s, i)) ?
+					-EBADRQC : 0;
 		}
 		if (ret)
 			break;
 		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri");
+		if (ret)
+			break;
 	}
 	return ret;
 }
@@ -912,11 +910,15 @@ static int cmd_handler_lrr(struct parser_exec_state *s)
 		if (IS_BROADWELL(s->vgpu->gvt->dev_priv))
 			ret |= ((cmd_reg_inhibit(s, i) ||
 					(cmd_reg_inhibit(s, i + 1)))) ?
-				-EINVAL : 0;
+				-EBADRQC : 0;
 		if (ret)
 			break;
 		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src");
+		if (ret)
+			break;
 		ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst");
+		if (ret)
+			break;
 	}
 	return ret;
 }
@@ -934,15 +936,19 @@ static int cmd_handler_lrm(struct parser_exec_state *s)
 
 	for (i = 1; i < cmd_len;) {
 		if (IS_BROADWELL(gvt->dev_priv))
-			ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
+			ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0;
 		if (ret)
 			break;
 		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm");
+		if (ret)
+			break;
 		if (cmd_val(s, 0) & (1 << 22)) {
 			gma = cmd_gma(s, i + 1);
 			if (gmadr_bytes == 8)
 				gma |= (cmd_gma_hi(s, i + 2)) << 32;
 			ret |= cmd_address_audit(s, gma, sizeof(u32), false);
+			if (ret)
+				break;
 		}
 		i += gmadr_dw_number(s) + 1;
 	}
@@ -958,11 +964,15 @@ static int cmd_handler_srm(struct parser_exec_state *s)
 
 	for (i = 1; i < cmd_len;) {
 		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm");
+		if (ret)
+			break;
 		if (cmd_val(s, 0) & (1 << 22)) {
 			gma = cmd_gma(s, i + 1);
 			if (gmadr_bytes == 8)
 				gma |= (cmd_gma_hi(s, i + 2)) << 32;
 			ret |= cmd_address_audit(s, gma, sizeof(u32), false);
+			if (ret)
+				break;
 		}
 		i += gmadr_dw_number(s) + 1;
 	}
@@ -1116,7 +1126,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
 
 	v = (dword0 & GENMASK(21, 19)) >> 19;
 	if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code)))
-		return -EINVAL;
+		return -EBADRQC;
 
 	info->pipe = gen8_plane_code[v].pipe;
 	info->plane = gen8_plane_code[v].plane;
@@ -1136,7 +1146,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
 		info->surf_reg = SPRSURF(info->pipe);
 	} else {
 		WARN_ON(1);
-		return -EINVAL;
+		return -EBADRQC;
 	}
 	return 0;
 }
@@ -1185,7 +1195,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
 
 	default:
 		gvt_vgpu_err("unknown plane code %d\n", plane);
-		return -EINVAL;
+		return -EBADRQC;
 	}
 
 	info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
@@ -1348,10 +1358,13 @@ static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index)
 {
 	unsigned long addr;
 	unsigned long gma_high, gma_low;
-	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+	struct intel_vgpu *vgpu = s->vgpu;
+	int gmadr_bytes = vgpu->gvt->device_info.gmadr_bytes_in_cmd;
 
-	if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
+	if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8)) {
+		gvt_vgpu_err("invalid gma bytes %d\n", gmadr_bytes);
 		return INTEL_GVT_INVALID_ADDR;
+	}
 
 	gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK;
 	if (gmadr_bytes == 4) {
@@ -1374,16 +1387,16 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
 	if (op_size > max_surface_size) {
 		gvt_vgpu_err("command address audit fail name %s\n",
 			s->info->name);
-		return -EINVAL;
+		return -EFAULT;
 	}
 
 	if (index_mode)	{
-		if (guest_gma >= GTT_PAGE_SIZE / sizeof(u64)) {
-			ret = -EINVAL;
+		if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) {
+			ret = -EFAULT;
 			goto err;
 		}
 	} else if (!intel_gvt_ggtt_validate_range(vgpu, guest_gma, op_size)) {
-		ret = -EINVAL;
+		ret = -EFAULT;
 		goto err;
 	}
 
@@ -1439,7 +1452,7 @@ static inline int unexpected_cmd(struct parser_exec_state *s)
 
 	gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
 
-	return -EINVAL;
+	return -EBADRQC;
 }
 
 static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s)
@@ -1545,10 +1558,10 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
 			return -EFAULT;
 		}
 
-		offset = gma & (GTT_PAGE_SIZE - 1);
+		offset = gma & (I915_GTT_PAGE_SIZE - 1);
 
-		copy_len = (end_gma - gma) >= (GTT_PAGE_SIZE - offset) ?
-			GTT_PAGE_SIZE - offset : end_gma - gma;
+		copy_len = (end_gma - gma) >= (I915_GTT_PAGE_SIZE - offset) ?
+			I915_GTT_PAGE_SIZE - offset : end_gma - gma;
 
 		intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
 
@@ -1576,110 +1589,113 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
 	return 1;
 }
 
-static int find_bb_size(struct parser_exec_state *s)
+static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
 {
 	unsigned long gma = 0;
 	struct cmd_info *info;
-	int bb_size = 0;
 	uint32_t cmd_len = 0;
-	bool met_bb_end = false;
+	bool bb_end = false;
 	struct intel_vgpu *vgpu = s->vgpu;
 	u32 cmd;
 
+	*bb_size = 0;
+
 	/* get the start gm address of the batch buffer */
 	gma = get_gma_bb_from_cmd(s, 1);
-	cmd = cmd_val(s, 0);
+	if (gma == INTEL_GVT_INVALID_ADDR)
+		return -EFAULT;
 
+	cmd = cmd_val(s, 0);
 	info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
 	if (info == NULL) {
 		gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
 				cmd, get_opcode(cmd, s->ring_id));
-		return -EINVAL;
+		return -EBADRQC;
 	}
 	do {
-		copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
-				gma, gma + 4, &cmd);
+		if (copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
+				gma, gma + 4, &cmd) < 0)
+			return -EFAULT;
 		info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
 		if (info == NULL) {
 			gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
 				cmd, get_opcode(cmd, s->ring_id));
-			return -EINVAL;
+			return -EBADRQC;
 		}
 
 		if (info->opcode == OP_MI_BATCH_BUFFER_END) {
-			met_bb_end = true;
+			bb_end = true;
 		} else if (info->opcode == OP_MI_BATCH_BUFFER_START) {
-			if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0) {
+			if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0)
 				/* chained batch buffer */
-				met_bb_end = true;
-			}
+				bb_end = true;
 		}
 		cmd_len = get_cmd_length(info, cmd) << 2;
-		bb_size += cmd_len;
+		*bb_size += cmd_len;
 		gma += cmd_len;
+	} while (!bb_end);
 
-	} while (!met_bb_end);
-
-	return bb_size;
+	return 0;
 }
 
 static int perform_bb_shadow(struct parser_exec_state *s)
 {
-	struct intel_shadow_bb_entry *entry_obj;
 	struct intel_vgpu *vgpu = s->vgpu;
+	struct intel_vgpu_shadow_bb *bb;
 	unsigned long gma = 0;
-	uint32_t bb_size;
-	void *dst = NULL;
+	unsigned long bb_size;
 	int ret = 0;
 
 	/* get the start gm address of the batch buffer */
 	gma = get_gma_bb_from_cmd(s, 1);
+	if (gma == INTEL_GVT_INVALID_ADDR)
+		return -EFAULT;
 
-	/* get the size of the batch buffer */
-	bb_size = find_bb_size(s);
-	if (bb_size < 0)
-		return -EINVAL;
+	ret = find_bb_size(s, &bb_size);
+	if (ret)
+		return ret;
 
-	/* allocate shadow batch buffer */
-	entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL);
-	if (entry_obj == NULL)
+	bb = kzalloc(sizeof(*bb), GFP_KERNEL);
+	if (!bb)
 		return -ENOMEM;
 
-	entry_obj->obj =
-		i915_gem_object_create(s->vgpu->gvt->dev_priv,
-				       roundup(bb_size, PAGE_SIZE));
-	if (IS_ERR(entry_obj->obj)) {
-		ret = PTR_ERR(entry_obj->obj);
-		goto free_entry;
+	bb->obj = i915_gem_object_create(s->vgpu->gvt->dev_priv,
+					 roundup(bb_size, PAGE_SIZE));
+	if (IS_ERR(bb->obj)) {
+		ret = PTR_ERR(bb->obj);
+		goto err_free_bb;
 	}
-	entry_obj->len = bb_size;
-	INIT_LIST_HEAD(&entry_obj->list);
 
-	dst = i915_gem_object_pin_map(entry_obj->obj, I915_MAP_WB);
-	if (IS_ERR(dst)) {
-		ret = PTR_ERR(dst);
-		goto put_obj;
-	}
+	ret = i915_gem_obj_prepare_shmem_write(bb->obj, &bb->clflush);
+	if (ret)
+		goto err_free_obj;
 
-	ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
-	if (ret) {
-		gvt_vgpu_err("failed to set shadow batch to CPU\n");
-		goto unmap_src;
+	bb->va = i915_gem_object_pin_map(bb->obj, I915_MAP_WB);
+	if (IS_ERR(bb->va)) {
+		ret = PTR_ERR(bb->va);
+		goto err_finish_shmem_access;
 	}
 
-	entry_obj->va = dst;
-	entry_obj->bb_start_cmd_va = s->ip_va;
+	if (bb->clflush & CLFLUSH_BEFORE) {
+		drm_clflush_virt_range(bb->va, bb->obj->base.size);
+		bb->clflush &= ~CLFLUSH_BEFORE;
+	}
 
-	/* copy batch buffer to shadow batch buffer*/
 	ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
 			      gma, gma + bb_size,
-			      dst);
+			      bb->va);
 	if (ret < 0) {
 		gvt_vgpu_err("fail to copy guest ring buffer\n");
-		goto unmap_src;
+		ret = -EFAULT;
+		goto err_unmap;
 	}
 
-	list_add(&entry_obj->list, &s->workload->shadow_bb);
+	INIT_LIST_HEAD(&bb->list);
+	list_add(&bb->list, &s->workload->shadow_bb);
+
+	bb->accessing = true;
+	bb->bb_start_cmd_va = s->ip_va;
+
 	/*
 	 * ip_va saves the virtual address of the shadow batch buffer, while
 	 * ip_gma saves the graphics address of the original batch buffer.
@@ -1688,17 +1704,17 @@ static int perform_bb_shadow(struct parser_exec_state *s)
 	 * buffer's gma in pair. After all, we don't want to pin the shadow
 	 * buffer here (too early).
 	 */
-	s->ip_va = dst;
+	s->ip_va = bb->va;
 	s->ip_gma = gma;
-
 	return 0;
-
-unmap_src:
-	i915_gem_object_unpin_map(entry_obj->obj);
-put_obj:
-	i915_gem_object_put(entry_obj->obj);
-free_entry:
-	kfree(entry_obj);
+err_unmap:
+	i915_gem_object_unpin_map(bb->obj);
+err_finish_shmem_access:
+	i915_gem_obj_finish_shmem_access(bb->obj);
+err_free_obj:
+	i915_gem_object_put(bb->obj);
+err_free_bb:
+	kfree(bb);
 	return ret;
 }
 
@@ -1710,13 +1726,13 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
 
 	if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
 		gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
-		return -EINVAL;
+		return -EFAULT;
 	}
 
 	second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
 	if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
 		gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
-		return -EINVAL;
+		return -EFAULT;
 	}
 
 	s->saved_buf_addr_type = s->buf_addr_type;
@@ -1740,7 +1756,6 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
 		if (ret < 0)
 			return ret;
 	}
-
 	return ret;
 }
 
@@ -2430,7 +2445,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
 	if (info == NULL) {
 		gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
 				cmd, get_opcode(cmd, s->ring_id));
-		return -EINVAL;
+		return -EBADRQC;
 	}
 
 	s->info = info;
@@ -2465,6 +2480,10 @@ static inline bool gma_out_of_range(unsigned long gma,
 		return (gma > gma_tail) && (gma < gma_head);
 }
 
+/* Keep the consistent return type, e.g EBADRQC for unknown
+ * cmd, EFAULT for invalid address, EPERM for nonpriv. later
+ * works as the input of VM healthy status.
+ */
 static int command_scan(struct parser_exec_state *s,
 		unsigned long rb_head, unsigned long rb_tail,
 		unsigned long rb_start, unsigned long rb_len)
@@ -2487,7 +2506,7 @@ static int command_scan(struct parser_exec_state *s,
 					s->ip_gma, rb_start,
 					gma_bottom);
 				parser_exec_state_dump(s);
-				return -EINVAL;
+				return -EFAULT;
 			}
 			if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
 				gvt_vgpu_err("ip_gma %lx out of range."
@@ -2516,7 +2535,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
 	int ret = 0;
 
 	/* ring base is page aligned */
-	if (WARN_ON(!IS_ALIGNED(workload->rb_start, GTT_PAGE_SIZE)))
+	if (WARN_ON(!IS_ALIGNED(workload->rb_start, I915_GTT_PAGE_SIZE)))
 		return -EINVAL;
 
 	gma_head = workload->rb_start + workload->rb_head;
@@ -2565,7 +2584,8 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 				wa_ctx);
 
 	/* ring base is page aligned */
-	if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE)))
+	if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma,
+					I915_GTT_PAGE_SIZE)))
 		return -EINVAL;
 
 	ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t);
@@ -2604,6 +2624,7 @@ out:
 static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
 {
 	struct intel_vgpu *vgpu = workload->vgpu;
+	struct intel_vgpu_submission *s = &vgpu->submission;
 	unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
 	void *shadow_ring_buffer_va;
 	int ring_id = workload->ring_id;
@@ -2619,19 +2640,21 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
 	gma_tail = workload->rb_start + workload->rb_tail;
 	gma_top = workload->rb_start + guest_rb_size;
 
-	if (workload->rb_len > vgpu->reserve_ring_buffer_size[ring_id]) {
-		void *va = vgpu->reserve_ring_buffer_va[ring_id];
+	if (workload->rb_len > s->ring_scan_buffer_size[ring_id]) {
+		void *p;
+
 		/* realloc the new ring buffer if needed */
-		vgpu->reserve_ring_buffer_va[ring_id] =
-			krealloc(va, workload->rb_len, GFP_KERNEL);
-		if (!vgpu->reserve_ring_buffer_va[ring_id]) {
-			gvt_vgpu_err("fail to alloc reserve ring buffer\n");
+		p = krealloc(s->ring_scan_buffer[ring_id], workload->rb_len,
+				GFP_KERNEL);
+		if (!p) {
+			gvt_vgpu_err("fail to re-alloc ring scan buffer\n");
 			return -ENOMEM;
 		}
-		vgpu->reserve_ring_buffer_size[ring_id] = workload->rb_len;
+		s->ring_scan_buffer[ring_id] = p;
+		s->ring_scan_buffer_size[ring_id] = workload->rb_len;
 	}
 
-	shadow_ring_buffer_va = vgpu->reserve_ring_buffer_va[ring_id];
+	shadow_ring_buffer_va = s->ring_scan_buffer[ring_id];
 
 	/* get shadow ring buffer va */
 	workload->shadow_ring_buffer_va = shadow_ring_buffer_va;

+ 12 - 12
drivers/gpu/drm/i915/gvt/debug.h

@@ -25,41 +25,41 @@
 #define __GVT_DEBUG_H__
 
 #define gvt_err(fmt, args...) \
-	DRM_ERROR("gvt: "fmt, ##args)
+	pr_err("gvt: "fmt, ##args)
 
 #define gvt_vgpu_err(fmt, args...)					\
 do {									\
 	if (IS_ERR_OR_NULL(vgpu))					\
-		DRM_DEBUG_DRIVER("gvt: "fmt, ##args);			\
+		pr_err("gvt: "fmt, ##args);			\
 	else								\
-		DRM_DEBUG_DRIVER("gvt: vgpu %d: "fmt, vgpu->id, ##args);\
+		pr_err("gvt: vgpu %d: "fmt, vgpu->id, ##args);\
 } while (0)
 
 #define gvt_dbg_core(fmt, args...) \
-	DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
+	pr_debug("gvt: core: "fmt, ##args)
 
 #define gvt_dbg_irq(fmt, args...) \
-	DRM_DEBUG_DRIVER("gvt: irq: "fmt, ##args)
+	pr_debug("gvt: irq: "fmt, ##args)
 
 #define gvt_dbg_mm(fmt, args...) \
-	DRM_DEBUG_DRIVER("gvt: mm: "fmt, ##args)
+	pr_debug("gvt: mm: "fmt, ##args)
 
 #define gvt_dbg_mmio(fmt, args...) \
-	DRM_DEBUG_DRIVER("gvt: mmio: "fmt, ##args)
+	pr_debug("gvt: mmio: "fmt, ##args)
 
 #define gvt_dbg_dpy(fmt, args...) \
-	DRM_DEBUG_DRIVER("gvt: dpy: "fmt, ##args)
+	pr_debug("gvt: dpy: "fmt, ##args)
 
 #define gvt_dbg_el(fmt, args...) \
-	DRM_DEBUG_DRIVER("gvt: el: "fmt, ##args)
+	pr_debug("gvt: el: "fmt, ##args)
 
 #define gvt_dbg_sched(fmt, args...) \
-	DRM_DEBUG_DRIVER("gvt: sched: "fmt, ##args)
+	pr_debug("gvt: sched: "fmt, ##args)
 
 #define gvt_dbg_render(fmt, args...) \
-	DRM_DEBUG_DRIVER("gvt: render: "fmt, ##args)
+	pr_debug("gvt: render: "fmt, ##args)
 
 #define gvt_dbg_cmd(fmt, args...) \
-	DRM_DEBUG_DRIVER("gvt: cmd: "fmt, ##args)
+	pr_debug("gvt: cmd: "fmt, ##args)
 
 #endif

+ 212 - 0
drivers/gpu/drm/i915/gvt/debugfs.c

@@ -0,0 +1,212 @@
+/*
+ * Copyright(c) 2011-2017 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/debugfs.h>
+#include <linux/list_sort.h>
+#include "i915_drv.h"
+#include "gvt.h"
+
+struct mmio_diff_param {
+	struct intel_vgpu *vgpu;
+	int total;
+	int diff;
+	struct list_head diff_mmio_list;
+};
+
+struct diff_mmio {
+	struct list_head node;
+	u32 offset;
+	u32 preg;
+	u32 vreg;
+};
+
+/* Compare two diff_mmio items. */
+static int mmio_offset_compare(void *priv,
+	struct list_head *a, struct list_head *b)
+{
+	struct diff_mmio *ma;
+	struct diff_mmio *mb;
+
+	ma = container_of(a, struct diff_mmio, node);
+	mb = container_of(b, struct diff_mmio, node);
+	if (ma->offset < mb->offset)
+		return -1;
+	else if (ma->offset > mb->offset)
+		return 1;
+	return 0;
+}
+
+static inline int mmio_diff_handler(struct intel_gvt *gvt,
+				    u32 offset, void *data)
+{
+	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct mmio_diff_param *param = data;
+	struct diff_mmio *node;
+	u32 preg, vreg;
+
+	preg = I915_READ_NOTRACE(_MMIO(offset));
+	vreg = vgpu_vreg(param->vgpu, offset);
+
+	if (preg != vreg) {
+		node = kmalloc(sizeof(*node), GFP_KERNEL);
+		if (!node)
+			return -ENOMEM;
+
+		node->offset = offset;
+		node->preg = preg;
+		node->vreg = vreg;
+		list_add(&node->node, &param->diff_mmio_list);
+		param->diff++;
+	}
+	param->total++;
+	return 0;
+}
+
+/* Show the all the different values of tracked mmio. */
+static int vgpu_mmio_diff_show(struct seq_file *s, void *unused)
+{
+	struct intel_vgpu *vgpu = s->private;
+	struct intel_gvt *gvt = vgpu->gvt;
+	struct mmio_diff_param param = {
+		.vgpu = vgpu,
+		.total = 0,
+		.diff = 0,
+	};
+	struct diff_mmio *node, *next;
+
+	INIT_LIST_HEAD(&param.diff_mmio_list);
+
+	mutex_lock(&gvt->lock);
+	spin_lock_bh(&gvt->scheduler.mmio_context_lock);
+
+	mmio_hw_access_pre(gvt->dev_priv);
+	/* Recognize all the diff mmios to list. */
+	intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, &param);
+	mmio_hw_access_post(gvt->dev_priv);
+
+	spin_unlock_bh(&gvt->scheduler.mmio_context_lock);
+	mutex_unlock(&gvt->lock);
+
+	/* In an ascending order by mmio offset. */
+	list_sort(NULL, &param.diff_mmio_list, mmio_offset_compare);
+
+	seq_printf(s, "%-8s %-8s %-8s %-8s\n", "Offset", "HW", "vGPU", "Diff");
+	list_for_each_entry_safe(node, next, &param.diff_mmio_list, node) {
+		u32 diff = node->preg ^ node->vreg;
+
+		seq_printf(s, "%08x %08x %08x %*pbl\n",
+			   node->offset, node->preg, node->vreg,
+			   32, &diff);
+		list_del(&node->node);
+		kfree(node);
+	}
+	seq_printf(s, "Total: %d, Diff: %d\n", param.total, param.diff);
+	return 0;
+}
+
+static int vgpu_mmio_diff_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, vgpu_mmio_diff_show, inode->i_private);
+}
+
+static const struct file_operations vgpu_mmio_diff_fops = {
+	.open		= vgpu_mmio_diff_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+/**
+ * intel_gvt_debugfs_add_vgpu - register debugfs entries for a vGPU
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
+{
+	struct dentry *ent;
+	char name[10] = "";
+
+	sprintf(name, "vgpu%d", vgpu->id);
+	vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root);
+	if (!vgpu->debugfs)
+		return -ENOMEM;
+
+	ent = debugfs_create_bool("active", 0444, vgpu->debugfs,
+				  &vgpu->active);
+	if (!ent)
+		return -ENOMEM;
+
+	ent = debugfs_create_file("mmio_diff", 0444, vgpu->debugfs,
+				  vgpu, &vgpu_mmio_diff_fops);
+	if (!ent)
+		return -ENOMEM;
+
+	return 0;
+}
+
+/**
+ * intel_gvt_debugfs_remove_vgpu - remove debugfs entries of a vGPU
+ * @vgpu: a vGPU
+ */
+void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
+{
+	debugfs_remove_recursive(vgpu->debugfs);
+	vgpu->debugfs = NULL;
+}
+
+/**
+ * intel_gvt_debugfs_init - register gvt debugfs root entry
+ * @gvt: GVT device
+ *
+ * Returns:
+ * zero on success, negative if failed.
+ */
+int intel_gvt_debugfs_init(struct intel_gvt *gvt)
+{
+	struct drm_minor *minor = gvt->dev_priv->drm.primary;
+	struct dentry *ent;
+
+	gvt->debugfs_root = debugfs_create_dir("gvt", minor->debugfs_root);
+	if (!gvt->debugfs_root) {
+		gvt_err("Cannot create debugfs dir\n");
+		return -ENOMEM;
+	}
+
+	ent = debugfs_create_ulong("num_tracked_mmio", 0444, gvt->debugfs_root,
+				   &gvt->mmio.num_tracked_mmio);
+	if (!ent)
+		return -ENOMEM;
+
+	return 0;
+}
+
+/**
+ * intel_gvt_debugfs_clean - remove debugfs entries
+ * @gvt: GVT device
+ */
+void intel_gvt_debugfs_clean(struct intel_gvt *gvt)
+{
+	debugfs_remove_recursive(gvt->debugfs_root);
+	gvt->debugfs_root = NULL;
+}

+ 68 - 422
drivers/gpu/drm/i915/gvt/execlist.c

@@ -46,8 +46,6 @@
 #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
 		((a)->lrca == (b)->lrca))
 
-static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask);
-
 static int context_switch_events[] = {
 	[RCS] = RCS_AS_CONTEXT_SWITCH,
 	[BCS] = BCS_AS_CONTEXT_SWITCH,
@@ -135,6 +133,8 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
 	struct execlist_context_status_pointer_format ctx_status_ptr;
 	u32 write_pointer;
 	u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset;
+	unsigned long hwsp_gpa;
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 
 	ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
 			_EL_OFFSET_STATUS_PTR);
@@ -160,6 +160,20 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
 	ctx_status_ptr.write_ptr = write_pointer;
 	vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
 
+	/* Update the CSB and CSB write pointer in HWSP */
+	hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+					 vgpu->hws_pga[ring_id]);
+	if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) {
+		intel_gvt_hypervisor_write_gpa(vgpu,
+			hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 +
+			write_pointer * 8,
+			status, 8);
+		intel_gvt_hypervisor_write_gpa(vgpu,
+			hwsp_gpa +
+			intel_hws_csb_write_index(dev_priv) * 4,
+			&write_pointer, 4);
+	}
+
 	gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
 		vgpu->id, write_pointer, offset, status->ldw, status->udw);
 
@@ -358,212 +372,47 @@ static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
 	return 0;
 }
 
-static void free_workload(struct intel_vgpu_workload *workload)
-{
-	intel_vgpu_unpin_mm(workload->shadow_mm);
-	intel_gvt_mm_unreference(workload->shadow_mm);
-	kmem_cache_free(workload->vgpu->workloads, workload);
-}
-
 #define get_desc_from_elsp_dwords(ed, i) \
 	((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
 
-static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
-{
-	const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
-	struct intel_shadow_bb_entry *entry_obj;
-
-	/* pin the gem object to ggtt */
-	list_for_each_entry(entry_obj, &workload->shadow_bb, list) {
-		struct i915_vma *vma;
-
-		vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
-		if (IS_ERR(vma)) {
-			return PTR_ERR(vma);
-		}
-
-		/* FIXME: we are not tracking our pinned VMA leaving it
-		 * up to the core to fix up the stray pin_count upon
-		 * free.
-		 */
-
-		/* update the relocate gma with shadow batch buffer*/
-		entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma);
-		if (gmadr_bytes == 8)
-			entry_obj->bb_start_cmd_va[2] = 0;
-	}
-	return 0;
-}
-
-static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
-{
-	struct intel_vgpu_workload *workload = container_of(wa_ctx,
-					struct intel_vgpu_workload,
-					wa_ctx);
-	int ring_id = workload->ring_id;
-	struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
-	struct drm_i915_gem_object *ctx_obj =
-		shadow_ctx->engine[ring_id].state->obj;
-	struct execlist_ring_context *shadow_ring_context;
-	struct page *page;
-
-	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
-	shadow_ring_context = kmap_atomic(page);
-
-	shadow_ring_context->bb_per_ctx_ptr.val =
-		(shadow_ring_context->bb_per_ctx_ptr.val &
-		(~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
-	shadow_ring_context->rcs_indirect_ctx.val =
-		(shadow_ring_context->rcs_indirect_ctx.val &
-		(~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
-
-	kunmap_atomic(shadow_ring_context);
-	return 0;
-}
-
-static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
-{
-	struct i915_vma *vma;
-	unsigned char *per_ctx_va =
-		(unsigned char *)wa_ctx->indirect_ctx.shadow_va +
-		wa_ctx->indirect_ctx.size;
-
-	if (wa_ctx->indirect_ctx.size == 0)
-		return 0;
-
-	vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
-				       0, CACHELINE_BYTES, 0);
-	if (IS_ERR(vma)) {
-		return PTR_ERR(vma);
-	}
-
-	/* FIXME: we are not tracking our pinned VMA leaving it
-	 * up to the core to fix up the stray pin_count upon
-	 * free.
-	 */
-
-	wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
-
-	wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
-	memset(per_ctx_va, 0, CACHELINE_BYTES);
-
-	update_wa_ctx_2_shadow_ctx(wa_ctx);
-	return 0;
-}
-
-static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
-{
-	/* release all the shadow batch buffer */
-	if (!list_empty(&workload->shadow_bb)) {
-		struct intel_shadow_bb_entry *entry_obj =
-			list_first_entry(&workload->shadow_bb,
-					 struct intel_shadow_bb_entry,
-					 list);
-		struct intel_shadow_bb_entry *temp;
-
-		list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
-					 list) {
-			i915_gem_object_unpin_map(entry_obj->obj);
-			i915_gem_object_put(entry_obj->obj);
-			list_del(&entry_obj->list);
-			kfree(entry_obj);
-		}
-	}
-}
-
 static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
 {
 	struct intel_vgpu *vgpu = workload->vgpu;
+	struct intel_vgpu_submission *s = &vgpu->submission;
 	struct execlist_ctx_descriptor_format ctx[2];
 	int ring_id = workload->ring_id;
 	int ret;
 
-	ret = intel_vgpu_pin_mm(workload->shadow_mm);
-	if (ret) {
-		gvt_vgpu_err("fail to vgpu pin mm\n");
-		goto out;
-	}
-
-	ret = intel_vgpu_sync_oos_pages(workload->vgpu);
-	if (ret) {
-		gvt_vgpu_err("fail to vgpu sync oos pages\n");
-		goto err_unpin_mm;
-	}
-
-	ret = intel_vgpu_flush_post_shadow(workload->vgpu);
-	if (ret) {
-		gvt_vgpu_err("fail to flush post shadow\n");
-		goto err_unpin_mm;
-	}
-
-	ret = prepare_shadow_batch_buffer(workload);
-	if (ret) {
-		gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
-		goto err_unpin_mm;
-	}
-
-	ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
-	if (ret) {
-		gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
-		goto err_shadow_batch;
-	}
-
 	if (!workload->emulate_schedule_in)
 		return 0;
 
-	ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
-	ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
+	ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
+	ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
 
-	ret = emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
-	if (!ret)
-		goto out;
-	else
+	ret = emulate_execlist_schedule_in(&s->execlist[ring_id], ctx);
+	if (ret) {
 		gvt_vgpu_err("fail to emulate execlist schedule in\n");
-
-	release_shadow_wa_ctx(&workload->wa_ctx);
-err_shadow_batch:
-	release_shadow_batch_buffer(workload);
-err_unpin_mm:
-	intel_vgpu_unpin_mm(workload->shadow_mm);
-out:
-	return ret;
+		return ret;
+	}
+	return 0;
 }
 
 static int complete_execlist_workload(struct intel_vgpu_workload *workload)
 {
 	struct intel_vgpu *vgpu = workload->vgpu;
 	int ring_id = workload->ring_id;
-	struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
+	struct intel_vgpu_submission *s = &vgpu->submission;
+	struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
 	struct intel_vgpu_workload *next_workload;
 	struct list_head *next = workload_q_head(vgpu, ring_id)->next;
 	bool lite_restore = false;
-	int ret;
+	int ret = 0;
 
 	gvt_dbg_el("complete workload %p status %d\n", workload,
 			workload->status);
 
-	if (!workload->status) {
-		release_shadow_batch_buffer(workload);
-		release_shadow_wa_ctx(&workload->wa_ctx);
-	}
-
-	if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
-		/* if workload->status is not successful means HW GPU
-		 * has occurred GPU hang or something wrong with i915/GVT,
-		 * and GVT won't inject context switch interrupt to guest.
-		 * So this error is a vGPU hang actually to the guest.
-		 * According to this we should emunlate a vGPU hang. If
-		 * there are pending workloads which are already submitted
-		 * from guest, we should clean them up like HW GPU does.
-		 *
-		 * if it is in middle of engine resetting, the pending
-		 * workloads won't be submitted to HW GPU and will be
-		 * cleaned up during the resetting process later, so doing
-		 * the workload clean up here doesn't have any impact.
-		 **/
-		clean_workloads(vgpu, ENGINE_MASK(ring_id));
+	if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id)))
 		goto out;
-	}
 
 	if (!list_empty(workload_q_head(vgpu, ring_id))) {
 		struct execlist_ctx_descriptor_format *this_desc, *next_desc;
@@ -578,213 +427,60 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
 
 	if (lite_restore) {
 		gvt_dbg_el("next context == current - no schedule-out\n");
-		free_workload(workload);
-		return 0;
+		goto out;
 	}
 
 	ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
-	if (ret)
-		goto err;
 out:
-	free_workload(workload);
-	return 0;
-err:
-	free_workload(workload);
+	intel_vgpu_unpin_mm(workload->shadow_mm);
+	intel_vgpu_destroy_workload(workload);
 	return ret;
 }
 
-#define RING_CTX_OFF(x) \
-	offsetof(struct execlist_ring_context, x)
-
-static void read_guest_pdps(struct intel_vgpu *vgpu,
-		u64 ring_context_gpa, u32 pdp[8])
-{
-	u64 gpa;
-	int i;
-
-	gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
-
-	for (i = 0; i < 8; i++)
-		intel_gvt_hypervisor_read_gpa(vgpu,
-				gpa + i * 8, &pdp[7 - i], 4);
-}
-
-static int prepare_mm(struct intel_vgpu_workload *workload)
-{
-	struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
-	struct intel_vgpu_mm *mm;
-	struct intel_vgpu *vgpu = workload->vgpu;
-	int page_table_level;
-	u32 pdp[8];
-
-	if (desc->addressing_mode == 1) { /* legacy 32-bit */
-		page_table_level = 3;
-	} else if (desc->addressing_mode == 3) { /* legacy 64 bit */
-		page_table_level = 4;
-	} else {
-		gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
-		return -EINVAL;
-	}
-
-	read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp);
-
-	mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp);
-	if (mm) {
-		intel_gvt_mm_reference(mm);
-	} else {
-
-		mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
-				pdp, page_table_level, 0);
-		if (IS_ERR(mm)) {
-			gvt_vgpu_err("fail to create mm object.\n");
-			return PTR_ERR(mm);
-		}
-	}
-	workload->shadow_mm = mm;
-	return 0;
-}
-
-#define get_last_workload(q) \
-	(list_empty(q) ? NULL : container_of(q->prev, \
-	struct intel_vgpu_workload, list))
-
 static int submit_context(struct intel_vgpu *vgpu, int ring_id,
 		struct execlist_ctx_descriptor_format *desc,
 		bool emulate_schedule_in)
 {
-	struct list_head *q = workload_q_head(vgpu, ring_id);
-	struct intel_vgpu_workload *last_workload = get_last_workload(q);
+	struct intel_vgpu_submission *s = &vgpu->submission;
 	struct intel_vgpu_workload *workload = NULL;
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-	u64 ring_context_gpa;
-	u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
-	int ret;
-
-	ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
-			(u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
-	if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
-		gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
-		return -EINVAL;
-	}
-
-	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
-			RING_CTX_OFF(ring_header.val), &head, 4);
-
-	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
-			RING_CTX_OFF(ring_tail.val), &tail, 4);
 
-	head &= RB_HEAD_OFF_MASK;
-	tail &= RB_TAIL_OFF_MASK;
+	workload = intel_vgpu_create_workload(vgpu, ring_id, desc);
+	if (IS_ERR(workload))
+		return PTR_ERR(workload);
 
-	if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
-		gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
-		gvt_dbg_el("ctx head %x real head %lx\n", head,
-				last_workload->rb_tail);
-		/*
-		 * cannot use guest context head pointer here,
-		 * as it might not be updated at this time
-		 */
-		head = last_workload->rb_tail;
-	}
-
-	gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
-
-	workload = kmem_cache_zalloc(vgpu->workloads, GFP_KERNEL);
-	if (!workload)
-		return -ENOMEM;
-
-	/* record some ring buffer register values for scan and shadow */
-	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
-			RING_CTX_OFF(rb_start.val), &start, 4);
-	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
-			RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
-	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
-			RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
-
-	INIT_LIST_HEAD(&workload->list);
-	INIT_LIST_HEAD(&workload->shadow_bb);
-
-	init_waitqueue_head(&workload->shadow_ctx_status_wq);
-	atomic_set(&workload->shadow_ctx_active, 0);
-
-	workload->vgpu = vgpu;
-	workload->ring_id = ring_id;
-	workload->ctx_desc = *desc;
-	workload->ring_context_gpa = ring_context_gpa;
-	workload->rb_head = head;
-	workload->rb_tail = tail;
-	workload->rb_start = start;
-	workload->rb_ctl = ctl;
 	workload->prepare = prepare_execlist_workload;
 	workload->complete = complete_execlist_workload;
-	workload->status = -EINPROGRESS;
 	workload->emulate_schedule_in = emulate_schedule_in;
-	workload->shadowed = false;
-
-	if (ring_id == RCS) {
-		intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
-			RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
-		intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
-			RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
-
-		workload->wa_ctx.indirect_ctx.guest_gma =
-			indirect_ctx & INDIRECT_CTX_ADDR_MASK;
-		workload->wa_ctx.indirect_ctx.size =
-			(indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
-			CACHELINE_BYTES;
-		workload->wa_ctx.per_ctx.guest_gma =
-			per_ctx & PER_CTX_ADDR_MASK;
-		workload->wa_ctx.per_ctx.valid = per_ctx & 1;
-	}
 
 	if (emulate_schedule_in)
-		workload->elsp_dwords = vgpu->execlist[ring_id].elsp_dwords;
-
-	gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
-			workload, ring_id, head, tail, start, ctl);
+		workload->elsp_dwords = s->execlist[ring_id].elsp_dwords;
 
 	gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
 			emulate_schedule_in);
 
-	ret = prepare_mm(workload);
-	if (ret) {
-		kmem_cache_free(vgpu->workloads, workload);
-		return ret;
-	}
-
-	/* Only scan and shadow the first workload in the queue
-	 * as there is only one pre-allocated buf-obj for shadow.
-	 */
-	if (list_empty(workload_q_head(vgpu, ring_id))) {
-		intel_runtime_pm_get(dev_priv);
-		mutex_lock(&dev_priv->drm.struct_mutex);
-		intel_gvt_scan_and_shadow_workload(workload);
-		mutex_unlock(&dev_priv->drm.struct_mutex);
-		intel_runtime_pm_put(dev_priv);
-	}
-
 	queue_workload(workload);
 	return 0;
 }
 
 int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
 {
-	struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
-	struct execlist_ctx_descriptor_format desc[2];
+	struct intel_vgpu_submission *s = &vgpu->submission;
+	struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
+	struct execlist_ctx_descriptor_format *desc[2];
 	int i, ret;
 
-	desc[0] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
-	desc[1] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
+	desc[0] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
+	desc[1] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
 
-	if (!desc[0].valid) {
+	if (!desc[0]->valid) {
 		gvt_vgpu_err("invalid elsp submission, desc0 is invalid\n");
 		goto inv_desc;
 	}
 
 	for (i = 0; i < ARRAY_SIZE(desc); i++) {
-		if (!desc[i].valid)
+		if (!desc[i]->valid)
 			continue;
-		if (!desc[i].privilege_access) {
+		if (!desc[i]->privilege_access) {
 			gvt_vgpu_err("unexpected GGTT elsp submission\n");
 			goto inv_desc;
 		}
@@ -792,9 +488,9 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
 
 	/* submit workload */
 	for (i = 0; i < ARRAY_SIZE(desc); i++) {
-		if (!desc[i].valid)
+		if (!desc[i]->valid)
 			continue;
-		ret = submit_context(vgpu, ring_id, &desc[i], i == 0);
+		ret = submit_context(vgpu, ring_id, desc[i], i == 0);
 		if (ret) {
 			gvt_vgpu_err("failed to submit desc %d\n", i);
 			return ret;
@@ -805,13 +501,14 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
 
 inv_desc:
 	gvt_vgpu_err("descriptors content: desc0 %08x %08x desc1 %08x %08x\n",
-		     desc[0].udw, desc[0].ldw, desc[1].udw, desc[1].ldw);
+		     desc[0]->udw, desc[0]->ldw, desc[1]->udw, desc[1]->ldw);
 	return -EINVAL;
 }
 
 static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
 {
-	struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
+	struct intel_vgpu_submission *s = &vgpu->submission;
+	struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
 	struct execlist_context_status_pointer_format ctx_status_ptr;
 	u32 ctx_status_ptr_reg;
 
@@ -831,91 +528,40 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
 	vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
 }
 
-static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
-{
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-	struct intel_engine_cs *engine;
-	struct intel_vgpu_workload *pos, *n;
-	unsigned int tmp;
-
-	/* free the unsubmited workloads in the queues. */
-	for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
-		list_for_each_entry_safe(pos, n,
-			&vgpu->workload_q_head[engine->id], list) {
-			list_del_init(&pos->list);
-			free_workload(pos);
-		}
-
-		clear_bit(engine->id, vgpu->shadow_ctx_desc_updated);
-	}
-}
-
-void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
-{
-	enum intel_engine_id i;
-	struct intel_engine_cs *engine;
-
-	clean_workloads(vgpu, ALL_ENGINES);
-	kmem_cache_destroy(vgpu->workloads);
-
-	for_each_engine(engine, vgpu->gvt->dev_priv, i) {
-		kfree(vgpu->reserve_ring_buffer_va[i]);
-		vgpu->reserve_ring_buffer_va[i] = NULL;
-		vgpu->reserve_ring_buffer_size[i] = 0;
-	}
-
-}
-
-#define RESERVE_RING_BUFFER_SIZE		((1 * PAGE_SIZE)/8)
-int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
+void clean_execlist(struct intel_vgpu *vgpu)
 {
 	enum intel_engine_id i;
 	struct intel_engine_cs *engine;
 
-	/* each ring has a virtual execlist engine */
 	for_each_engine(engine, vgpu->gvt->dev_priv, i) {
-		init_vgpu_execlist(vgpu, i);
-		INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
-	}
-
-	vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
-			sizeof(struct intel_vgpu_workload), 0,
-			SLAB_HWCACHE_ALIGN,
-			NULL);
+		struct intel_vgpu_submission *s = &vgpu->submission;
 
-	if (!vgpu->workloads)
-		return -ENOMEM;
-
-	/* each ring has a shadow ring buffer until vgpu destroyed */
-	for_each_engine(engine, vgpu->gvt->dev_priv, i) {
-		vgpu->reserve_ring_buffer_va[i] =
-			kmalloc(RESERVE_RING_BUFFER_SIZE, GFP_KERNEL);
-		if (!vgpu->reserve_ring_buffer_va[i]) {
-			gvt_vgpu_err("fail to alloc reserve ring buffer\n");
-			goto out;
-		}
-		vgpu->reserve_ring_buffer_size[i] = RESERVE_RING_BUFFER_SIZE;
+		kfree(s->ring_scan_buffer[i]);
+		s->ring_scan_buffer[i] = NULL;
+		s->ring_scan_buffer_size[i] = 0;
 	}
-	return 0;
-out:
-	for_each_engine(engine, vgpu->gvt->dev_priv, i) {
-		if (vgpu->reserve_ring_buffer_size[i]) {
-			kfree(vgpu->reserve_ring_buffer_va[i]);
-			vgpu->reserve_ring_buffer_va[i] = NULL;
-			vgpu->reserve_ring_buffer_size[i] = 0;
-		}
-	}
-	return -ENOMEM;
 }
 
-void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
+void reset_execlist(struct intel_vgpu *vgpu,
 		unsigned long engine_mask)
 {
 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 	struct intel_engine_cs *engine;
 	unsigned int tmp;
 
-	clean_workloads(vgpu, engine_mask);
 	for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
 		init_vgpu_execlist(vgpu, engine->id);
 }
+
+int init_execlist(struct intel_vgpu *vgpu)
+{
+	reset_execlist(vgpu, ALL_ENGINES);
+	return 0;
+}
+
+const struct intel_vgpu_submission_ops intel_vgpu_execlist_submission_ops = {
+	.name = "execlist",
+	.init = init_execlist,
+	.reset = reset_execlist,
+	.clean = clean_execlist,
+};

+ 4 - 4
drivers/gpu/drm/i915/gvt/execlist.h

@@ -36,10 +36,6 @@
 #define _GVT_EXECLIST_H_
 
 struct execlist_ctx_descriptor_format {
-	union {
-		u32 udw;
-		u32 context_id;
-	};
 	union {
 		u32 ldw;
 		struct {
@@ -54,6 +50,10 @@ struct execlist_ctx_descriptor_format {
 			u32 lrca                   : 20;
 		};
 	};
+	union {
+		u32 udw;
+		u32 context_id;
+	};
 };
 
 struct execlist_status_format {

+ 11 - 15
drivers/gpu/drm/i915/gvt/firmware.c

@@ -66,20 +66,23 @@ static struct bin_attribute firmware_attr = {
 	.mmap = NULL,
 };
 
-static int expose_firmware_sysfs(struct intel_gvt *gvt)
+static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
 {
 	struct drm_i915_private *dev_priv = gvt->dev_priv;
+
+	*(u32 *)(data + offset) = I915_READ_NOTRACE(_MMIO(offset));
+	return 0;
+}
+
+static int expose_firmware_sysfs(struct intel_gvt *gvt)
+{
 	struct intel_gvt_device_info *info = &gvt->device_info;
 	struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
-	struct intel_gvt_mmio_info *e;
-	struct gvt_mmio_block *block = gvt->mmio.mmio_block;
-	int num = gvt->mmio.num_mmio_block;
 	struct gvt_firmware_header *h;
 	void *firmware;
 	void *p;
 	unsigned long size, crc32_start;
-	int i, j;
-	int ret;
+	int i, ret;
 
 	size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
 	firmware = vzalloc(size);
@@ -104,15 +107,8 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
 
 	p = firmware + h->mmio_offset;
 
-	hash_for_each(gvt->mmio.mmio_info_table, i, e, node)
-		*(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));
-
-	for (i = 0; i < num; i++, block++) {
-		for (j = 0; j < block->size; j += 4)
-			*(u32 *)(p + INTEL_GVT_MMIO_OFFSET(block->offset) + j) =
-				I915_READ_NOTRACE(_MMIO(INTEL_GVT_MMIO_OFFSET(
-							block->offset) + j));
-	}
+	/* Take a snapshot of hw mmio registers. */
+	intel_gvt_for_each_tracked_mmio(gvt, mmio_snapshot_handler, p);
 
 	memcpy(gvt->firmware.mmio, p, info->mmio_size);
 

+ 152 - 109
drivers/gpu/drm/i915/gvt/gtt.c

@@ -94,12 +94,12 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
 	u64 h_addr;
 	int ret;
 
-	ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << GTT_PAGE_SHIFT,
+	ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT,
 				       &h_addr);
 	if (ret)
 		return ret;
 
-	*h_index = h_addr >> GTT_PAGE_SHIFT;
+	*h_index = h_addr >> I915_GTT_PAGE_SHIFT;
 	return 0;
 }
 
@@ -109,12 +109,12 @@ int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
 	u64 g_addr;
 	int ret;
 
-	ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << GTT_PAGE_SHIFT,
+	ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT,
 				       &g_addr);
 	if (ret)
 		return ret;
 
-	*g_index = g_addr >> GTT_PAGE_SHIFT;
+	*g_index = g_addr >> I915_GTT_PAGE_SHIFT;
 	return 0;
 }
 
@@ -156,13 +156,15 @@ int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
 
 struct gtt_type_table_entry {
 	int entry_type;
+	int pt_type;
 	int next_pt_type;
 	int pse_entry_type;
 };
 
-#define GTT_TYPE_TABLE_ENTRY(type, e_type, npt_type, pse_type) \
+#define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \
 	[type] = { \
 		.entry_type = e_type, \
+		.pt_type = cpt_type, \
 		.next_pt_type = npt_type, \
 		.pse_entry_type = pse_type, \
 	}
@@ -170,55 +172,68 @@ struct gtt_type_table_entry {
 static struct gtt_type_table_entry gtt_type_table[] = {
 	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
 			GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
+			GTT_TYPE_INVALID,
 			GTT_TYPE_PPGTT_PML4_PT,
 			GTT_TYPE_INVALID),
 	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
 			GTT_TYPE_PPGTT_PML4_ENTRY,
+			GTT_TYPE_PPGTT_PML4_PT,
 			GTT_TYPE_PPGTT_PDP_PT,
 			GTT_TYPE_INVALID),
 	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
 			GTT_TYPE_PPGTT_PML4_ENTRY,
+			GTT_TYPE_PPGTT_PML4_PT,
 			GTT_TYPE_PPGTT_PDP_PT,
 			GTT_TYPE_INVALID),
 	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
 			GTT_TYPE_PPGTT_PDP_ENTRY,
+			GTT_TYPE_PPGTT_PDP_PT,
 			GTT_TYPE_PPGTT_PDE_PT,
 			GTT_TYPE_PPGTT_PTE_1G_ENTRY),
 	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
 			GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
+			GTT_TYPE_INVALID,
 			GTT_TYPE_PPGTT_PDE_PT,
 			GTT_TYPE_PPGTT_PTE_1G_ENTRY),
 	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
 			GTT_TYPE_PPGTT_PDP_ENTRY,
+			GTT_TYPE_PPGTT_PDP_PT,
 			GTT_TYPE_PPGTT_PDE_PT,
 			GTT_TYPE_PPGTT_PTE_1G_ENTRY),
 	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
 			GTT_TYPE_PPGTT_PDE_ENTRY,
+			GTT_TYPE_PPGTT_PDE_PT,
 			GTT_TYPE_PPGTT_PTE_PT,
 			GTT_TYPE_PPGTT_PTE_2M_ENTRY),
 	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
 			GTT_TYPE_PPGTT_PDE_ENTRY,
+			GTT_TYPE_PPGTT_PDE_PT,
 			GTT_TYPE_PPGTT_PTE_PT,
 			GTT_TYPE_PPGTT_PTE_2M_ENTRY),
 	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
 			GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+			GTT_TYPE_PPGTT_PTE_PT,
 			GTT_TYPE_INVALID,
 			GTT_TYPE_INVALID),
 	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
 			GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+			GTT_TYPE_PPGTT_PTE_PT,
 			GTT_TYPE_INVALID,
 			GTT_TYPE_INVALID),
 	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
 			GTT_TYPE_PPGTT_PDE_ENTRY,
+			GTT_TYPE_PPGTT_PDE_PT,
 			GTT_TYPE_INVALID,
 			GTT_TYPE_PPGTT_PTE_2M_ENTRY),
 	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
 			GTT_TYPE_PPGTT_PDP_ENTRY,
+			GTT_TYPE_PPGTT_PDP_PT,
 			GTT_TYPE_INVALID,
 			GTT_TYPE_PPGTT_PTE_1G_ENTRY),
 	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
 			GTT_TYPE_GGTT_PTE,
 			GTT_TYPE_INVALID,
+			GTT_TYPE_INVALID,
 			GTT_TYPE_INVALID),
 };
 
@@ -227,6 +242,11 @@ static inline int get_next_pt_type(int type)
 	return gtt_type_table[type].next_pt_type;
 }
 
+static inline int get_pt_type(int type)
+{
+	return gtt_type_table[type].pt_type;
+}
+
 static inline int get_entry_type(int type)
 {
 	return gtt_type_table[type].entry_type;
@@ -351,7 +371,7 @@ static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
 		return false;
 
 	e->type = get_entry_type(e->type);
-	if (!(e->val64 & (1 << 7)))
+	if (!(e->val64 & BIT(7)))
 		return false;
 
 	e->type = get_pse_type(e->type);
@@ -369,12 +389,17 @@ static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
 			|| e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
 		return (e->val64 != 0);
 	else
-		return (e->val64 & (1 << 0));
+		return (e->val64 & BIT(0));
 }
 
 static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
 {
-	e->val64 &= ~(1 << 0);
+	e->val64 &= ~BIT(0);
+}
+
+static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
+{
+	e->val64 |= BIT(0);
 }
 
 /*
@@ -382,7 +407,7 @@ static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
  */
 static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
 {
-	unsigned long x = (gma >> GTT_PAGE_SHIFT);
+	unsigned long x = (gma >> I915_GTT_PAGE_SHIFT);
 
 	trace_gma_index(__func__, gma, x);
 	return x;
@@ -406,6 +431,7 @@ static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
 	.get_entry = gtt_get_entry64,
 	.set_entry = gtt_set_entry64,
 	.clear_present = gtt_entry_clear_present,
+	.set_present = gtt_entry_set_present,
 	.test_present = gen8_gtt_test_present,
 	.test_pse = gen8_gtt_test_pse,
 	.get_pfn = gen8_gtt_get_pfn,
@@ -494,7 +520,7 @@ static inline int ppgtt_spt_get_entry(
 		return -EINVAL;
 
 	ret = ops->get_entry(page_table, e, index, guest,
-			spt->guest_page.gfn << GTT_PAGE_SHIFT,
+			spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT,
 			spt->vgpu);
 	if (ret)
 		return ret;
@@ -516,7 +542,7 @@ static inline int ppgtt_spt_set_entry(
 		return -EINVAL;
 
 	return ops->set_entry(page_table, e, index, guest,
-			spt->guest_page.gfn << GTT_PAGE_SHIFT,
+			spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT,
 			spt->vgpu);
 }
 
@@ -537,88 +563,103 @@ static inline int ppgtt_spt_set_entry(
 		spt->shadow_page.type, e, index, false)
 
 /**
- * intel_vgpu_init_guest_page - init a guest page data structure
+ * intel_vgpu_init_page_track - init a page track data structure
  * @vgpu: a vGPU
- * @p: a guest page data structure
+ * @t: a page track data structure
  * @gfn: guest memory page frame number
- * @handler: function will be called when target guest memory page has
+ * @handler: the function will be called when target guest memory page has
  * been modified.
  *
- * This function is called when user wants to track a guest memory page.
+ * This function is called when a user wants to prepare a page track data
+ * structure to track a guest memory page.
  *
  * Returns:
  * Zero on success, negative error code if failed.
  */
-int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
-		struct intel_vgpu_guest_page *p,
+int intel_vgpu_init_page_track(struct intel_vgpu *vgpu,
+		struct intel_vgpu_page_track *t,
 		unsigned long gfn,
 		int (*handler)(void *, u64, void *, int),
 		void *data)
 {
-	INIT_HLIST_NODE(&p->node);
+	INIT_HLIST_NODE(&t->node);
 
-	p->writeprotection = false;
-	p->gfn = gfn;
-	p->handler = handler;
-	p->data = data;
-	p->oos_page = NULL;
-	p->write_cnt = 0;
+	t->tracked = false;
+	t->gfn = gfn;
+	t->handler = handler;
+	t->data = data;
 
-	hash_add(vgpu->gtt.guest_page_hash_table, &p->node, p->gfn);
+	hash_add(vgpu->gtt.tracked_guest_page_hash_table, &t->node, t->gfn);
 	return 0;
 }
 
-static int detach_oos_page(struct intel_vgpu *vgpu,
-		struct intel_vgpu_oos_page *oos_page);
-
 /**
- * intel_vgpu_clean_guest_page - release the resource owned by guest page data
- * structure
+ * intel_vgpu_clean_page_track - release a page track data structure
  * @vgpu: a vGPU
- * @p: a tracked guest page
+ * @t: a page track data structure
  *
- * This function is called when user tries to stop tracking a guest memory
- * page.
+ * This function is called before a user frees a page track data structure.
  */
-void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
-		struct intel_vgpu_guest_page *p)
+void intel_vgpu_clean_page_track(struct intel_vgpu *vgpu,
+		struct intel_vgpu_page_track *t)
 {
-	if (!hlist_unhashed(&p->node))
-		hash_del(&p->node);
-
-	if (p->oos_page)
-		detach_oos_page(vgpu, p->oos_page);
+	if (!hlist_unhashed(&t->node))
+		hash_del(&t->node);
 
-	if (p->writeprotection)
-		intel_gvt_hypervisor_unset_wp_page(vgpu, p);
+	if (t->tracked)
+		intel_gvt_hypervisor_disable_page_track(vgpu, t);
 }
 
 /**
- * intel_vgpu_find_guest_page - find a guest page data structure by GFN.
+ * intel_vgpu_find_tracked_page - find a tracked guest page
  * @vgpu: a vGPU
  * @gfn: guest memory page frame number
  *
- * This function is called when emulation logic wants to know if a trapped GFN
- * is a tracked guest page.
+ * This function is called when the emulation layer wants to figure out if a
+ * trapped GFN is a tracked guest page.
  *
  * Returns:
- * Pointer to guest page data structure, NULL if failed.
+ * Pointer to page track data structure, NULL if not found.
  */
-struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
+struct intel_vgpu_page_track *intel_vgpu_find_tracked_page(
 		struct intel_vgpu *vgpu, unsigned long gfn)
 {
-	struct intel_vgpu_guest_page *p;
+	struct intel_vgpu_page_track *t;
 
-	hash_for_each_possible(vgpu->gtt.guest_page_hash_table,
-		p, node, gfn) {
-		if (p->gfn == gfn)
-			return p;
+	hash_for_each_possible(vgpu->gtt.tracked_guest_page_hash_table,
+			t, node, gfn) {
+		if (t->gfn == gfn)
+			return t;
 	}
 	return NULL;
 }
 
+static int init_guest_page(struct intel_vgpu *vgpu,
+		struct intel_vgpu_guest_page *p,
+		unsigned long gfn,
+		int (*handler)(void *, u64, void *, int),
+		void *data)
+{
+	p->oos_page = NULL;
+	p->write_cnt = 0;
+
+	return intel_vgpu_init_page_track(vgpu, &p->track, gfn, handler, data);
+}
+
+static int detach_oos_page(struct intel_vgpu *vgpu,
+		struct intel_vgpu_oos_page *oos_page);
+
+static void clean_guest_page(struct intel_vgpu *vgpu,
+		struct intel_vgpu_guest_page *p)
+{
+	if (p->oos_page)
+		detach_oos_page(vgpu, p->oos_page);
+
+	intel_vgpu_clean_page_track(vgpu, &p->track);
+}
+
 static inline int init_shadow_page(struct intel_vgpu *vgpu,
-		struct intel_vgpu_shadow_page *p, int type)
+		struct intel_vgpu_shadow_page *p, int type, bool hash)
 {
 	struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
 	dma_addr_t daddr;
@@ -634,8 +675,9 @@ static inline int init_shadow_page(struct intel_vgpu *vgpu,
 
 	INIT_HLIST_NODE(&p->node);
 
-	p->mfn = daddr >> GTT_PAGE_SHIFT;
-	hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
+	p->mfn = daddr >> I915_GTT_PAGE_SHIFT;
+	if (hash)
+		hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
 	return 0;
 }
 
@@ -644,7 +686,7 @@ static inline void clean_shadow_page(struct intel_vgpu *vgpu,
 {
 	struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
 
-	dma_unmap_page(kdev, p->mfn << GTT_PAGE_SHIFT, 4096,
+	dma_unmap_page(kdev, p->mfn << I915_GTT_PAGE_SHIFT, 4096,
 			PCI_DMA_BIDIRECTIONAL);
 
 	if (!hlist_unhashed(&p->node))
@@ -664,6 +706,9 @@ static inline struct intel_vgpu_shadow_page *find_shadow_page(
 	return NULL;
 }
 
+#define page_track_to_guest_page(ptr) \
+	container_of(ptr, struct intel_vgpu_guest_page, track)
+
 #define guest_page_to_ppgtt_spt(ptr) \
 	container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page)
 
@@ -697,7 +742,7 @@ static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
 	trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type);
 
 	clean_shadow_page(spt->vgpu, &spt->shadow_page);
-	intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page);
+	clean_guest_page(spt->vgpu, &spt->guest_page);
 	list_del_init(&spt->post_shadow_list);
 
 	free_spt(spt);
@@ -713,22 +758,24 @@ static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu)
 		ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp));
 }
 
-static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
+static int ppgtt_handle_guest_write_page_table_bytes(
+		struct intel_vgpu_guest_page *gpt,
 		u64 pa, void *p_data, int bytes);
 
-static int ppgtt_write_protection_handler(void *gp, u64 pa,
+static int ppgtt_write_protection_handler(void *data, u64 pa,
 		void *p_data, int bytes)
 {
-	struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
+	struct intel_vgpu_page_track *t = data;
+	struct intel_vgpu_guest_page *p = page_track_to_guest_page(t);
 	int ret;
 
 	if (bytes != 4 && bytes != 8)
 		return -EINVAL;
 
-	if (!gpt->writeprotection)
+	if (!t->tracked)
 		return -EINVAL;
 
-	ret = ppgtt_handle_guest_write_page_table_bytes(gp,
+	ret = ppgtt_handle_guest_write_page_table_bytes(p,
 		pa, p_data, bytes);
 	if (ret)
 		return ret;
@@ -762,13 +809,13 @@ retry:
 	 * TODO: guest page type may be different with shadow page type,
 	 *	 when we support PSE page in future.
 	 */
-	ret = init_shadow_page(vgpu, &spt->shadow_page, type);
+	ret = init_shadow_page(vgpu, &spt->shadow_page, type, true);
 	if (ret) {
 		gvt_vgpu_err("fail to initialize shadow page for spt\n");
 		goto err;
 	}
 
-	ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
+	ret = init_guest_page(vgpu, &spt->guest_page,
 			gfn, ppgtt_write_protection_handler, NULL);
 	if (ret) {
 		gvt_vgpu_err("fail to initialize guest page for spt\n");
@@ -798,7 +845,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
 	((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
 
 #define pt_entries(spt) \
-	(GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
+	(I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
 
 #define for_each_present_guest_entry(spt, e, i) \
 	for (i = 0; i < pt_entries(spt); i++) \
@@ -856,7 +903,7 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
 	int v = atomic_read(&spt->refcount);
 
 	trace_spt_change(spt->vgpu->id, "die", spt,
-			spt->guest_page.gfn, spt->shadow_page.type);
+			spt->guest_page.track.gfn, spt->shadow_page.type);
 
 	trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
 
@@ -878,7 +925,7 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
 	}
 release:
 	trace_spt_change(spt->vgpu->id, "release", spt,
-			spt->guest_page.gfn, spt->shadow_page.type);
+			spt->guest_page.track.gfn, spt->shadow_page.type);
 	ppgtt_free_shadow_page(spt);
 	return 0;
 fail:
@@ -895,6 +942,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
 	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
 	struct intel_vgpu_ppgtt_spt *s = NULL;
 	struct intel_vgpu_guest_page *g;
+	struct intel_vgpu_page_track *t;
 	int ret;
 
 	if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(we->type)))) {
@@ -902,8 +950,9 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
 		goto fail;
 	}
 
-	g = intel_vgpu_find_guest_page(vgpu, ops->get_pfn(we));
-	if (g) {
+	t = intel_vgpu_find_tracked_page(vgpu, ops->get_pfn(we));
+	if (t) {
+		g = page_track_to_guest_page(t);
 		s = guest_page_to_ppgtt_spt(g);
 		ppgtt_get_shadow_page(s);
 	} else {
@@ -915,7 +964,8 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
 			goto fail;
 		}
 
-		ret = intel_gvt_hypervisor_set_wp_page(vgpu, &s->guest_page);
+		ret = intel_gvt_hypervisor_enable_page_track(vgpu,
+				&s->guest_page.track);
 		if (ret)
 			goto fail;
 
@@ -923,7 +973,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
 		if (ret)
 			goto fail;
 
-		trace_spt_change(vgpu->id, "new", s, s->guest_page.gfn,
+		trace_spt_change(vgpu->id, "new", s, s->guest_page.track.gfn,
 			s->shadow_page.type);
 	}
 	return s;
@@ -953,7 +1003,7 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
 	int ret;
 
 	trace_spt_change(spt->vgpu->id, "born", spt,
-			spt->guest_page.gfn, spt->shadow_page.type);
+			spt->guest_page.track.gfn, spt->shadow_page.type);
 
 	if (gtt_type_is_pte_pt(spt->shadow_page.type)) {
 		for_each_present_guest_entry(spt, &ge, i) {
@@ -1078,11 +1128,11 @@ static int sync_oos_page(struct intel_vgpu *vgpu,
 	old.type = new.type = get_entry_type(spt->guest_page_type);
 	old.val64 = new.val64 = 0;
 
-	for (index = 0; index < (GTT_PAGE_SIZE >> info->gtt_entry_size_shift);
-		index++) {
+	for (index = 0; index < (I915_GTT_PAGE_SIZE >>
+				info->gtt_entry_size_shift); index++) {
 		ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
 		ops->get_entry(NULL, &new, index, true,
-			oos_page->guest_page->gfn << PAGE_SHIFT, vgpu);
+			oos_page->guest_page->track.gfn << PAGE_SHIFT, vgpu);
 
 		if (old.val64 == new.val64
 			&& !test_and_clear_bit(index, spt->post_shadow_bitmap))
@@ -1132,8 +1182,9 @@ static int attach_oos_page(struct intel_vgpu *vgpu,
 	struct intel_gvt *gvt = vgpu->gvt;
 	int ret;
 
-	ret = intel_gvt_hypervisor_read_gpa(vgpu, gpt->gfn << GTT_PAGE_SHIFT,
-		oos_page->mem, GTT_PAGE_SIZE);
+	ret = intel_gvt_hypervisor_read_gpa(vgpu,
+			gpt->track.gfn << I915_GTT_PAGE_SHIFT,
+			oos_page->mem, I915_GTT_PAGE_SIZE);
 	if (ret)
 		return ret;
 
@@ -1152,7 +1203,7 @@ static int ppgtt_set_guest_page_sync(struct intel_vgpu *vgpu,
 {
 	int ret;
 
-	ret = intel_gvt_hypervisor_set_wp_page(vgpu, gpt);
+	ret = intel_gvt_hypervisor_enable_page_track(vgpu, &gpt->track);
 	if (ret)
 		return ret;
 
@@ -1200,7 +1251,7 @@ static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu,
 			gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
 
 	list_add_tail(&oos_page->vm_list, &vgpu->gtt.oos_page_list_head);
-	return intel_gvt_hypervisor_unset_wp_page(vgpu, gpt);
+	return intel_gvt_hypervisor_disable_page_track(vgpu, &gpt->track);
 }
 
 /**
@@ -1335,10 +1386,10 @@ int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
 	return 0;
 }
 
-static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
+static int ppgtt_handle_guest_write_page_table_bytes(
+		struct intel_vgpu_guest_page *gpt,
 		u64 pa, void *p_data, int bytes)
 {
-	struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
 	struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
 	struct intel_vgpu *vgpu = spt->vgpu;
 	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
@@ -1415,7 +1466,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
 		mm->shadow_page_table = mem + mm->page_table_entry_size;
 	} else if (mm->type == INTEL_GVT_MM_GGTT) {
 		mm->page_table_entry_cnt =
-			(gvt_ggtt_gm_sz(gvt) >> GTT_PAGE_SHIFT);
+			(gvt_ggtt_gm_sz(gvt) >> I915_GTT_PAGE_SHIFT);
 		mm->page_table_entry_size = mm->page_table_entry_cnt *
 			info->gtt_entry_size;
 		mem = vzalloc(mm->page_table_entry_size);
@@ -1737,8 +1788,8 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
 				gma_ops->gma_to_ggtt_pte_index(gma));
 		if (ret)
 			goto err;
-		gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
-			+ (gma & ~GTT_PAGE_MASK);
+		gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
+			+ (gma & ~I915_GTT_PAGE_MASK);
 
 		trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
 		return gpa;
@@ -1790,8 +1841,8 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
 		}
 	}
 
-	gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
-		+ (gma & ~GTT_PAGE_MASK);
+	gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
+		+ (gma & ~I915_GTT_PAGE_MASK);
 
 	trace_gma_translate(vgpu->id, "ppgtt", 0,
 			mm->page_table_level, gma, gpa);
@@ -1859,7 +1910,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
 	if (bytes != 4 && bytes != 8)
 		return -EINVAL;
 
-	gma = g_gtt_index << GTT_PAGE_SHIFT;
+	gma = g_gtt_index << I915_GTT_PAGE_SHIFT;
 
 	/* the VM may configure the whole GM space when ballooning is used */
 	if (!vgpu_gmadr_is_valid(vgpu, gma))
@@ -1878,11 +1929,11 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
 			 * update the entry in this situation p2m will fail
 			 * settting the shadow entry to point to a scratch page
 			 */
-			ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
+			ops->set_pfn(&m, gvt->gtt.scratch_mfn);
 		}
 	} else {
 		m = e;
-		ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
+		ops->set_pfn(&m, gvt->gtt.scratch_mfn);
 	}
 
 	ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
@@ -1922,7 +1973,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
 {
 	struct intel_vgpu_gtt *gtt = &vgpu->gtt;
 	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
-	int page_entry_num = GTT_PAGE_SIZE >>
+	int page_entry_num = I915_GTT_PAGE_SIZE >>
 				vgpu->gvt->device_info.gtt_entry_size_shift;
 	void *scratch_pt;
 	int i;
@@ -1946,7 +1997,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
 		return -ENOMEM;
 	}
 	gtt->scratch_pt[type].page_mfn =
-		(unsigned long)(daddr >> GTT_PAGE_SHIFT);
+		(unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
 	gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
 	gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
 			vgpu->id, type, gtt->scratch_pt[type].page_mfn);
@@ -1989,7 +2040,7 @@ static int release_scratch_page_tree(struct intel_vgpu *vgpu)
 	for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
 		if (vgpu->gtt.scratch_pt[i].page != NULL) {
 			daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
-					GTT_PAGE_SHIFT);
+					I915_GTT_PAGE_SHIFT);
 			dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
 			__free_page(vgpu->gtt.scratch_pt[i].page);
 			vgpu->gtt.scratch_pt[i].page = NULL;
@@ -2032,7 +2083,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
 	struct intel_vgpu_gtt *gtt = &vgpu->gtt;
 	struct intel_vgpu_mm *ggtt_mm;
 
-	hash_init(gtt->guest_page_hash_table);
+	hash_init(gtt->tracked_guest_page_hash_table);
 	hash_init(gtt->shadow_page_hash_table);
 
 	INIT_LIST_HEAD(&gtt->mm_list_head);
@@ -2285,15 +2336,16 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
 		__free_page(virt_to_page(page));
 		return -ENOMEM;
 	}
-	gvt->gtt.scratch_ggtt_page = virt_to_page(page);
-	gvt->gtt.scratch_ggtt_mfn = (unsigned long)(daddr >> GTT_PAGE_SHIFT);
+
+	gvt->gtt.scratch_page = virt_to_page(page);
+	gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
 
 	if (enable_out_of_sync) {
 		ret = setup_spt_oos(gvt);
 		if (ret) {
 			gvt_err("fail to initialize SPT oos\n");
 			dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
-			__free_page(gvt->gtt.scratch_ggtt_page);
+			__free_page(gvt->gtt.scratch_page);
 			return ret;
 		}
 	}
@@ -2312,12 +2364,12 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
 void intel_gvt_clean_gtt(struct intel_gvt *gvt)
 {
 	struct device *dev = &gvt->dev_priv->drm.pdev->dev;
-	dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_ggtt_mfn <<
-					GTT_PAGE_SHIFT);
+	dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
+					I915_GTT_PAGE_SHIFT);
 
 	dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
 
-	__free_page(gvt->gtt.scratch_ggtt_page);
+	__free_page(gvt->gtt.scratch_page);
 
 	if (enable_out_of_sync)
 		clean_spt_oos(gvt);
@@ -2343,7 +2395,7 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
 
 	memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
 	e.type = GTT_TYPE_GGTT_PTE;
-	ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
+	ops->set_pfn(&e, gvt->gtt.scratch_mfn);
 	e.val64 |= _PAGE_PRESENT;
 
 	index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
@@ -2369,8 +2421,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
  */
 void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
 {
-	int i;
-
 	ppgtt_free_all_shadow_page(vgpu);
 
 	/* Shadow pages are only created when there is no page
@@ -2380,11 +2430,4 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
 	intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
 
 	intel_vgpu_reset_ggtt(vgpu);
-
-	/* clear scratch page for security */
-	for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
-		if (vgpu->gtt.scratch_pt[i].page != NULL)
-			memset(page_address(vgpu->gtt.scratch_pt[i].page),
-				0, PAGE_SIZE);
-	}
 }

+ 19 - 23
drivers/gpu/drm/i915/gvt/gtt.h

@@ -34,9 +34,8 @@
 #ifndef _GVT_GTT_H_
 #define _GVT_GTT_H_
 
-#define GTT_PAGE_SHIFT		12
-#define GTT_PAGE_SIZE		(1UL << GTT_PAGE_SHIFT)
-#define GTT_PAGE_MASK		(~(GTT_PAGE_SIZE-1))
+#define I915_GTT_PAGE_SHIFT         12
+#define I915_GTT_PAGE_MASK		(~(I915_GTT_PAGE_SIZE - 1))
 
 struct intel_vgpu_mm;
 
@@ -63,6 +62,7 @@ struct intel_gvt_gtt_pte_ops {
 			 struct intel_vgpu *vgpu);
 	bool (*test_present)(struct intel_gvt_gtt_entry *e);
 	void (*clear_present)(struct intel_gvt_gtt_entry *e);
+	void (*set_present)(struct intel_gvt_gtt_entry *e);
 	bool (*test_pse)(struct intel_gvt_gtt_entry *e);
 	void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn);
 	unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e);
@@ -86,8 +86,8 @@ struct intel_gvt_gtt {
 	struct list_head oos_page_free_list_head;
 	struct list_head mm_lru_list_head;
 
-	struct page *scratch_ggtt_page;
-	unsigned long scratch_ggtt_mfn;
+	struct page *scratch_page;
+	unsigned long scratch_mfn;
 };
 
 enum {
@@ -193,18 +193,16 @@ struct intel_vgpu_scratch_pt {
 	unsigned long page_mfn;
 };
 
-
 struct intel_vgpu_gtt {
 	struct intel_vgpu_mm *ggtt_mm;
 	unsigned long active_ppgtt_mm_bitmap;
 	struct list_head mm_list_head;
 	DECLARE_HASHTABLE(shadow_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
-	DECLARE_HASHTABLE(guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
-	atomic_t n_write_protected_guest_page;
+	DECLARE_HASHTABLE(tracked_guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
+	atomic_t n_tracked_guest_page;
 	struct list_head oos_page_list_head;
 	struct list_head post_shadow_list_head;
 	struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX];
-
 };
 
 extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
@@ -228,12 +226,16 @@ struct intel_vgpu_shadow_page {
 	unsigned long mfn;
 };
 
-struct intel_vgpu_guest_page {
+struct intel_vgpu_page_track {
 	struct hlist_node node;
-	bool writeprotection;
+	bool tracked;
 	unsigned long gfn;
 	int (*handler)(void *, u64, void *, int);
 	void *data;
+};
+
+struct intel_vgpu_guest_page {
+	struct intel_vgpu_page_track track;
 	unsigned long write_cnt;
 	struct intel_vgpu_oos_page *oos_page;
 };
@@ -243,7 +245,7 @@ struct intel_vgpu_oos_page {
 	struct list_head list;
 	struct list_head vm_list;
 	int id;
-	unsigned char mem[GTT_PAGE_SIZE];
+	unsigned char mem[I915_GTT_PAGE_SIZE];
 };
 
 #define GTT_ENTRY_NUM_IN_ONE_PAGE 512
@@ -258,22 +260,16 @@ struct intel_vgpu_ppgtt_spt {
 	struct list_head post_shadow_list;
 };
 
-int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
-		struct intel_vgpu_guest_page *guest_page,
+int intel_vgpu_init_page_track(struct intel_vgpu *vgpu,
+		struct intel_vgpu_page_track *t,
 		unsigned long gfn,
 		int (*handler)(void *gp, u64, void *, int),
 		void *data);
 
-void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
-		struct intel_vgpu_guest_page *guest_page);
-
-int intel_vgpu_set_guest_page_writeprotection(struct intel_vgpu *vgpu,
-		struct intel_vgpu_guest_page *guest_page);
-
-void intel_vgpu_clear_guest_page_writeprotection(struct intel_vgpu *vgpu,
-		struct intel_vgpu_guest_page *guest_page);
+void intel_vgpu_clean_page_track(struct intel_vgpu *vgpu,
+		struct intel_vgpu_page_track *t);
 
-struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
+struct intel_vgpu_page_track *intel_vgpu_find_tracked_page(
 		struct intel_vgpu *vgpu, unsigned long gfn);
 
 int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu);

+ 140 - 8
drivers/gpu/drm/i915/gvt/gvt.c

@@ -36,6 +36,8 @@
 
 #include "i915_drv.h"
 #include "gvt.h"
+#include <linux/vfio.h>
+#include <linux/mdev.h>
 
 struct intel_gvt_host intel_gvt_host;
 
@@ -44,6 +46,129 @@ static const char * const supported_hypervisors[] = {
 	[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
 };
 
+static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
+		const char *name)
+{
+	int i;
+	struct intel_vgpu_type *t;
+	const char *driver_name = dev_driver_string(
+			&gvt->dev_priv->drm.pdev->dev);
+
+	for (i = 0; i < gvt->num_types; i++) {
+		t = &gvt->types[i];
+		if (!strncmp(t->name, name + strlen(driver_name) + 1,
+			sizeof(t->name)))
+			return t;
+	}
+
+	return NULL;
+}
+
+static ssize_t available_instances_show(struct kobject *kobj,
+					struct device *dev, char *buf)
+{
+	struct intel_vgpu_type *type;
+	unsigned int num = 0;
+	void *gvt = kdev_to_i915(dev)->gvt;
+
+	type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
+	if (!type)
+		num = 0;
+	else
+		num = type->avail_instance;
+
+	return sprintf(buf, "%u\n", num);
+}
+
+static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
+		char *buf)
+{
+	return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
+}
+
+static ssize_t description_show(struct kobject *kobj, struct device *dev,
+		char *buf)
+{
+	struct intel_vgpu_type *type;
+	void *gvt = kdev_to_i915(dev)->gvt;
+
+	type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
+	if (!type)
+		return 0;
+
+	return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
+		       "fence: %d\nresolution: %s\n"
+		       "weight: %d\n",
+		       BYTES_TO_MB(type->low_gm_size),
+		       BYTES_TO_MB(type->high_gm_size),
+		       type->fence, vgpu_edid_str(type->resolution),
+		       type->weight);
+}
+
+static MDEV_TYPE_ATTR_RO(available_instances);
+static MDEV_TYPE_ATTR_RO(device_api);
+static MDEV_TYPE_ATTR_RO(description);
+
+static struct attribute *gvt_type_attrs[] = {
+	&mdev_type_attr_available_instances.attr,
+	&mdev_type_attr_device_api.attr,
+	&mdev_type_attr_description.attr,
+	NULL,
+};
+
+static struct attribute_group *gvt_vgpu_type_groups[] = {
+	[0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
+};
+
+static bool intel_get_gvt_attrs(struct attribute ***type_attrs,
+		struct attribute_group ***intel_vgpu_type_groups)
+{
+	*type_attrs = gvt_type_attrs;
+	*intel_vgpu_type_groups = gvt_vgpu_type_groups;
+	return true;
+}
+
+static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
+{
+	int i, j;
+	struct intel_vgpu_type *type;
+	struct attribute_group *group;
+
+	for (i = 0; i < gvt->num_types; i++) {
+		type = &gvt->types[i];
+
+		group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
+		if (WARN_ON(!group))
+			goto unwind;
+
+		group->name = type->name;
+		group->attrs = gvt_type_attrs;
+		gvt_vgpu_type_groups[i] = group;
+	}
+
+	return true;
+
+unwind:
+	for (j = 0; j < i; j++) {
+		group = gvt_vgpu_type_groups[j];
+		kfree(group);
+	}
+
+	return false;
+}
+
+static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
+{
+	int i;
+	struct attribute_group *group;
+
+	for (i = 0; i < gvt->num_types; i++) {
+		group = gvt_vgpu_type_groups[i];
+		gvt_vgpu_type_groups[i] = NULL;
+		kfree(group);
+	}
+}
+
 static const struct intel_gvt_ops intel_gvt_ops = {
 	.emulate_cfg_read = intel_vgpu_emulate_cfg_read,
 	.emulate_cfg_write = intel_vgpu_emulate_cfg_write,
@@ -54,6 +179,8 @@ static const struct intel_gvt_ops intel_gvt_ops = {
 	.vgpu_reset = intel_gvt_reset_vgpu,
 	.vgpu_activate = intel_gvt_activate_vgpu,
 	.vgpu_deactivate = intel_gvt_deactivate_vgpu,
+	.gvt_find_vgpu_type = intel_gvt_find_vgpu_type,
+	.get_gvt_attrs = intel_get_gvt_attrs,
 };
 
 /**
@@ -191,17 +318,18 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
 	if (WARN_ON(!gvt))
 		return;
 
+	intel_gvt_debugfs_clean(gvt);
 	clean_service_thread(gvt);
 	intel_gvt_clean_cmd_parser(gvt);
 	intel_gvt_clean_sched_policy(gvt);
 	intel_gvt_clean_workload_scheduler(gvt);
-	intel_gvt_clean_opregion(gvt);
 	intel_gvt_clean_gtt(gvt);
 	intel_gvt_clean_irq(gvt);
 	intel_gvt_clean_mmio_info(gvt);
 	intel_gvt_free_firmware(gvt);
 
 	intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
+	intel_gvt_cleanup_vgpu_type_groups(gvt);
 	intel_gvt_clean_vgpu_types(gvt);
 
 	idr_destroy(&gvt->vgpu_idr);
@@ -268,13 +396,9 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
 	if (ret)
 		goto out_clean_irq;
 
-	ret = intel_gvt_init_opregion(gvt);
-	if (ret)
-		goto out_clean_gtt;
-
 	ret = intel_gvt_init_workload_scheduler(gvt);
 	if (ret)
-		goto out_clean_opregion;
+		goto out_clean_gtt;
 
 	ret = intel_gvt_init_sched_policy(gvt);
 	if (ret)
@@ -292,6 +416,12 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
 	if (ret)
 		goto out_clean_thread;
 
+	ret = intel_gvt_init_vgpu_type_groups(gvt);
+	if (ret == false) {
+		gvt_err("failed to init vgpu type groups: %d\n", ret);
+		goto out_clean_types;
+	}
+
 	ret = intel_gvt_hypervisor_host_init(&dev_priv->drm.pdev->dev, gvt,
 				&intel_gvt_ops);
 	if (ret) {
@@ -307,6 +437,10 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
 	}
 	gvt->idle_vgpu = vgpu;
 
+	ret = intel_gvt_debugfs_init(gvt);
+	if (ret)
+		gvt_err("debugfs registeration failed, go on.\n");
+
 	gvt_dbg_core("gvt device initialization is done\n");
 	dev_priv->gvt = gvt;
 	return 0;
@@ -321,8 +455,6 @@ out_clean_sched_policy:
 	intel_gvt_clean_sched_policy(gvt);
 out_clean_workload_scheduler:
 	intel_gvt_clean_workload_scheduler(gvt);
-out_clean_opregion:
-	intel_gvt_clean_opregion(gvt);
 out_clean_gtt:
 	intel_gvt_clean_gtt(gvt);
 out_clean_irq:

+ 50 - 21
drivers/gpu/drm/i915/gvt/gvt.h

@@ -125,7 +125,6 @@ struct intel_vgpu_irq {
 struct intel_vgpu_opregion {
 	void *va;
 	u32 gfn[INTEL_GVT_OPREGION_PAGES];
-	struct page *pages[INTEL_GVT_OPREGION_PAGES];
 };
 
 #define vgpu_opregion(vgpu) (&(vgpu->opregion))
@@ -142,6 +141,33 @@ struct vgpu_sched_ctl {
 	int weight;
 };
 
+enum {
+	INTEL_VGPU_EXECLIST_SUBMISSION = 1,
+	INTEL_VGPU_GUC_SUBMISSION,
+};
+
+struct intel_vgpu_submission_ops {
+	const char *name;
+	int (*init)(struct intel_vgpu *vgpu);
+	void (*clean)(struct intel_vgpu *vgpu);
+	void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask);
+};
+
+struct intel_vgpu_submission {
+	struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
+	struct list_head workload_q_head[I915_NUM_ENGINES];
+	struct kmem_cache *workloads;
+	atomic_t running_workload_num;
+	struct i915_gem_context *shadow_ctx;
+	DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
+	DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
+	void *ring_scan_buffer[I915_NUM_ENGINES];
+	int ring_scan_buffer_size[I915_NUM_ENGINES];
+	const struct intel_vgpu_submission_ops *ops;
+	int virtual_submission_interface;
+	bool active;
+};
+
 struct intel_vgpu {
 	struct intel_gvt *gvt;
 	int id;
@@ -161,16 +187,10 @@ struct intel_vgpu {
 	struct intel_vgpu_gtt gtt;
 	struct intel_vgpu_opregion opregion;
 	struct intel_vgpu_display display;
-	struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
-	struct list_head workload_q_head[I915_NUM_ENGINES];
-	struct kmem_cache *workloads;
-	atomic_t running_workload_num;
-	/* 1/2K for each reserve ring buffer */
-	void *reserve_ring_buffer_va[I915_NUM_ENGINES];
-	int reserve_ring_buffer_size[I915_NUM_ENGINES];
-	DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
-	struct i915_gem_context *shadow_ctx;
-	DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
+	struct intel_vgpu_submission submission;
+	u32 hws_pga[I915_NUM_ENGINES];
+
+	struct dentry *debugfs;
 
 #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
 	struct {
@@ -190,6 +210,10 @@ struct intel_vgpu {
 #endif
 };
 
+/* validating GM healthy status*/
+#define vgpu_is_vm_unhealthy(ret_val) \
+	(((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT))
+
 struct intel_gvt_gm {
 	unsigned long vgpu_allocated_low_gm_size;
 	unsigned long vgpu_allocated_high_gm_size;
@@ -231,7 +255,7 @@ struct intel_gvt_mmio {
 	unsigned int num_mmio_block;
 
 	DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
-	unsigned int num_tracked_mmio;
+	unsigned long num_tracked_mmio;
 };
 
 struct intel_gvt_firmware {
@@ -240,11 +264,6 @@ struct intel_gvt_firmware {
 	bool firmware_loaded;
 };
 
-struct intel_gvt_opregion {
-	void *opregion_va;
-	u32 opregion_pa;
-};
-
 #define NR_MAX_INTEL_VGPU_TYPES 20
 struct intel_vgpu_type {
 	char name[16];
@@ -268,7 +287,6 @@ struct intel_gvt {
 	struct intel_gvt_firmware firmware;
 	struct intel_gvt_irq irq;
 	struct intel_gvt_gtt gtt;
-	struct intel_gvt_opregion opregion;
 	struct intel_gvt_workload_scheduler scheduler;
 	struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
 	DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
@@ -279,6 +297,8 @@ struct intel_gvt {
 	struct task_struct *service_thread;
 	wait_queue_head_t service_thread_wq;
 	unsigned long service_request;
+
+	struct dentry *debugfs_root;
 };
 
 static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
@@ -484,9 +504,6 @@ static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
 			PCI_BASE_ADDRESS_MEM_MASK;
 }
 
-void intel_gvt_clean_opregion(struct intel_gvt *gvt);
-int intel_gvt_init_opregion(struct intel_gvt *gvt);
-
 void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
 int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
 
@@ -494,6 +511,7 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
 void populate_pvinfo_page(struct intel_vgpu *vgpu);
 
 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
+void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason);
 
 struct intel_gvt_ops {
 	int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
@@ -510,12 +528,17 @@ struct intel_gvt_ops {
 	void (*vgpu_reset)(struct intel_vgpu *);
 	void (*vgpu_activate)(struct intel_vgpu *);
 	void (*vgpu_deactivate)(struct intel_vgpu *);
+	struct intel_vgpu_type *(*gvt_find_vgpu_type)(struct intel_gvt *gvt,
+			const char *name);
+	bool (*get_gvt_attrs)(struct attribute ***type_attrs,
+			struct attribute_group ***intel_vgpu_type_groups);
 };
 
 
 enum {
 	GVT_FAILSAFE_UNSUPPORTED_GUEST,
 	GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
+	GVT_FAILSAFE_GUEST_ERR,
 };
 
 static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
@@ -591,6 +614,12 @@ static inline bool intel_gvt_mmio_has_mode_mask(
 	return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
 }
 
+int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
+int intel_gvt_debugfs_init(struct intel_gvt *gvt);
+void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
+
+
 #include "trace.h"
 #include "mpt.h"
 

+ 120 - 19
drivers/gpu/drm/i915/gvt/handlers.c

@@ -137,17 +137,26 @@ static int new_mmio_info(struct intel_gvt *gvt,
 	return 0;
 }
 
-static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
+/**
+ * intel_gvt_render_mmio_to_ring_id - convert a mmio offset into ring id
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * Ring ID on success, negative error code if failed.
+ */
+int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
+		unsigned int offset)
 {
 	enum intel_engine_id id;
 	struct intel_engine_cs *engine;
 
-	reg &= ~GENMASK(11, 0);
+	offset &= ~GENMASK(11, 0);
 	for_each_engine(engine, gvt->dev_priv, id) {
-		if (engine->mmio_base == reg)
+		if (engine->mmio_base == offset)
 			return id;
 	}
-	return -1;
+	return -ENODEV;
 }
 
 #define offset_to_fence_num(offset) \
@@ -157,7 +166,7 @@ static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
 	(num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
 
 
-static void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
+void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
 {
 	switch (reason) {
 	case GVT_FAILSAFE_UNSUPPORTED_GUEST:
@@ -165,6 +174,8 @@ static void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
 		break;
 	case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
 		pr_err("Graphics resource is not enough for the guest\n");
+	case GVT_FAILSAFE_GUEST_ERR:
+		pr_err("GVT Internal error  for the guest\n");
 	default:
 		break;
 	}
@@ -1369,6 +1380,34 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
 	return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
 }
 
+static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	u32 value = *(u32 *)p_data;
+	int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
+
+	if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
+		gvt_vgpu_err("VM(%d) write invalid HWSP address, reg:0x%x, value:0x%x\n",
+			      vgpu->id, offset, value);
+		return -EINVAL;
+	}
+	/*
+	 * Need to emulate all the HWSP register write to ensure host can
+	 * update the VM CSB status correctly. Here listed registers can
+	 * support BDW, SKL or other platforms with same HWSP registers.
+	 */
+	if (unlikely(ring_id < 0 || ring_id > I915_NUM_ENGINES)) {
+		gvt_vgpu_err("VM(%d) access unknown hardware status page register:0x%x\n",
+			     vgpu->id, offset);
+		return -EINVAL;
+	}
+	vgpu->hws_pga[ring_id] = value;
+	gvt_dbg_mmio("VM(%d) write: 0x%x to HWSP: 0x%x\n",
+		     vgpu->id, value, offset);
+
+	return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
+}
+
 static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
 		unsigned int offset, void *p_data, unsigned int bytes)
 {
@@ -1432,18 +1471,36 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
 static int mmio_read_from_hw(struct intel_vgpu *vgpu,
 		unsigned int offset, void *p_data, unsigned int bytes)
 {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct intel_gvt *gvt = vgpu->gvt;
+	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	int ring_id;
+	u32 ring_base;
+
+	ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset);
+	/**
+	 * Read HW reg in following case
+	 * a. the offset isn't a ring mmio
+	 * b. the offset's ring is running on hw.
+	 * c. the offset is ring time stamp mmio
+	 */
+	if (ring_id >= 0)
+		ring_base = dev_priv->engine[ring_id]->mmio_base;
+
+	if (ring_id < 0 || vgpu  == gvt->scheduler.engine_owner[ring_id] ||
+	    offset == i915_mmio_reg_offset(RING_TIMESTAMP(ring_base)) ||
+	    offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(ring_base))) {
+		mmio_hw_access_pre(dev_priv);
+		vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
+		mmio_hw_access_post(dev_priv);
+	}
 
-	mmio_hw_access_pre(dev_priv);
-	vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
-	mmio_hw_access_post(dev_priv);
 	return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
 }
 
 static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 		void *p_data, unsigned int bytes)
 {
-	int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
+	int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
 	struct intel_vgpu_execlist *execlist;
 	u32 data = *(u32 *)p_data;
 	int ret = 0;
@@ -1451,9 +1508,9 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 	if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1))
 		return -EINVAL;
 
-	execlist = &vgpu->execlist[ring_id];
+	execlist = &vgpu->submission.execlist[ring_id];
 
-	execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
+	execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
 	if (execlist->elsp_dwords.index == 3) {
 		ret = intel_vgpu_submit_execlist(vgpu, ring_id);
 		if(ret)
@@ -1469,9 +1526,11 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 		void *p_data, unsigned int bytes)
 {
+	struct intel_vgpu_submission *s = &vgpu->submission;
 	u32 data = *(u32 *)p_data;
-	int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
+	int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
 	bool enable_execlist;
+	int ret;
 
 	write_vreg(vgpu, offset, p_data, bytes);
 
@@ -1493,8 +1552,18 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 				(enable_execlist ? "enabling" : "disabling"),
 				ring_id);
 
-		if (enable_execlist)
-			intel_vgpu_start_schedule(vgpu);
+		if (!enable_execlist)
+			return 0;
+
+		if (s->active)
+			return 0;
+
+		ret = intel_vgpu_select_submission_ops(vgpu,
+				INTEL_VGPU_EXECLIST_SUBMISSION);
+		if (ret)
+			return ret;
+
+		intel_vgpu_start_schedule(vgpu);
 	}
 	return 0;
 }
@@ -1526,7 +1595,7 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
 	default:
 		return -EINVAL;
 	}
-	set_bit(id, (void *)vgpu->tlb_handle_pending);
+	set_bit(id, (void *)vgpu->submission.tlb_handle_pending);
 
 	return 0;
 }
@@ -2512,7 +2581,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
 	MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
 #undef RING_REG
 
-	MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
+	MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
 
 	MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
 
@@ -2914,14 +2983,46 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
 	gvt->mmio.mmio_block = mmio_blocks;
 	gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
 
-	gvt_dbg_mmio("traced %u virtual mmio registers\n",
-		     gvt->mmio.num_tracked_mmio);
 	return 0;
 err:
 	intel_gvt_clean_mmio_info(gvt);
 	return ret;
 }
 
+/**
+ * intel_gvt_for_each_tracked_mmio - iterate each tracked mmio
+ * @gvt: a GVT device
+ * @handler: the handler
+ * @data: private data given to handler
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
+	int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
+	void *data)
+{
+	struct gvt_mmio_block *block = gvt->mmio.mmio_block;
+	struct intel_gvt_mmio_info *e;
+	int i, j, ret;
+
+	hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
+		ret = handler(gvt, e->offset, data);
+		if (ret)
+			return ret;
+	}
+
+	for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) {
+		for (j = 0; j < block->size; j += 4) {
+			ret = handler(gvt,
+				INTEL_GVT_MMIO_OFFSET(block->offset) + j,
+				data);
+			if (ret)
+				return ret;
+		}
+	}
+	return 0;
+}
 
 /**
  * intel_vgpu_default_mmio_read - default MMIO read handler

+ 9 - 121
drivers/gpu/drm/i915/gvt/kvmgt.c

@@ -248,120 +248,6 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
 	}
 }
 
-static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
-		const char *name)
-{
-	int i;
-	struct intel_vgpu_type *t;
-	const char *driver_name = dev_driver_string(
-			&gvt->dev_priv->drm.pdev->dev);
-
-	for (i = 0; i < gvt->num_types; i++) {
-		t = &gvt->types[i];
-		if (!strncmp(t->name, name + strlen(driver_name) + 1,
-			sizeof(t->name)))
-			return t;
-	}
-
-	return NULL;
-}
-
-static ssize_t available_instances_show(struct kobject *kobj,
-					struct device *dev, char *buf)
-{
-	struct intel_vgpu_type *type;
-	unsigned int num = 0;
-	void *gvt = kdev_to_i915(dev)->gvt;
-
-	type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
-	if (!type)
-		num = 0;
-	else
-		num = type->avail_instance;
-
-	return sprintf(buf, "%u\n", num);
-}
-
-static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
-		char *buf)
-{
-	return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
-}
-
-static ssize_t description_show(struct kobject *kobj, struct device *dev,
-		char *buf)
-{
-	struct intel_vgpu_type *type;
-	void *gvt = kdev_to_i915(dev)->gvt;
-
-	type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
-	if (!type)
-		return 0;
-
-	return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
-		       "fence: %d\nresolution: %s\n"
-		       "weight: %d\n",
-		       BYTES_TO_MB(type->low_gm_size),
-		       BYTES_TO_MB(type->high_gm_size),
-		       type->fence, vgpu_edid_str(type->resolution),
-		       type->weight);
-}
-
-static MDEV_TYPE_ATTR_RO(available_instances);
-static MDEV_TYPE_ATTR_RO(device_api);
-static MDEV_TYPE_ATTR_RO(description);
-
-static struct attribute *type_attrs[] = {
-	&mdev_type_attr_available_instances.attr,
-	&mdev_type_attr_device_api.attr,
-	&mdev_type_attr_description.attr,
-	NULL,
-};
-
-static struct attribute_group *intel_vgpu_type_groups[] = {
-	[0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
-};
-
-static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
-{
-	int i, j;
-	struct intel_vgpu_type *type;
-	struct attribute_group *group;
-
-	for (i = 0; i < gvt->num_types; i++) {
-		type = &gvt->types[i];
-
-		group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
-		if (WARN_ON(!group))
-			goto unwind;
-
-		group->name = type->name;
-		group->attrs = type_attrs;
-		intel_vgpu_type_groups[i] = group;
-	}
-
-	return true;
-
-unwind:
-	for (j = 0; j < i; j++) {
-		group = intel_vgpu_type_groups[j];
-		kfree(group);
-	}
-
-	return false;
-}
-
-static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
-{
-	int i;
-	struct attribute_group *group;
-
-	for (i = 0; i < gvt->num_types; i++) {
-		group = intel_vgpu_type_groups[i];
-		kfree(group);
-	}
-}
-
 static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
 {
 	hash_init(info->ptable);
@@ -441,7 +327,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
 	pdev = mdev_parent_dev(mdev);
 	gvt = kdev_to_i915(pdev)->gvt;
 
-	type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
+	type = intel_gvt_ops->gvt_find_vgpu_type(gvt, kobject_name(kobj));
 	if (!type) {
 		gvt_vgpu_err("failed to find type %s to create\n",
 						kobject_name(kobj));
@@ -1188,7 +1074,7 @@ hw_id_show(struct device *dev, struct device_attribute *attr,
 		struct intel_vgpu *vgpu = (struct intel_vgpu *)
 			mdev_get_drvdata(mdev);
 		return sprintf(buf, "%u\n",
-			       vgpu->shadow_ctx->hw_id);
+			       vgpu->submission.shadow_ctx->hw_id);
 	}
 	return sprintf(buf, "\n");
 }
@@ -1212,8 +1098,7 @@ static const struct attribute_group *intel_vgpu_groups[] = {
 	NULL,
 };
 
-static const struct mdev_parent_ops intel_vgpu_ops = {
-	.supported_type_groups	= intel_vgpu_type_groups,
+static struct mdev_parent_ops intel_vgpu_ops = {
 	.mdev_attr_groups       = intel_vgpu_groups,
 	.create			= intel_vgpu_create,
 	.remove			= intel_vgpu_remove,
@@ -1229,17 +1114,20 @@ static const struct mdev_parent_ops intel_vgpu_ops = {
 
 static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
 {
-	if (!intel_gvt_init_vgpu_type_groups(gvt))
-		return -EFAULT;
+	struct attribute **kvm_type_attrs;
+	struct attribute_group **kvm_vgpu_type_groups;
 
 	intel_gvt_ops = ops;
+	if (!intel_gvt_ops->get_gvt_attrs(&kvm_type_attrs,
+			&kvm_vgpu_type_groups))
+		return -EFAULT;
+	intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
 
 	return mdev_register_device(dev, &intel_vgpu_ops);
 }
 
 static void kvmgt_host_exit(struct device *dev, void *gvt)
 {
-	intel_gvt_cleanup_vgpu_type_groups(gvt);
 	mdev_unregister_device(dev);
 }
 

+ 16 - 16
drivers/gpu/drm/i915/gvt/mmio.c

@@ -117,18 +117,18 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
 		else
 			memcpy(pt, p_data, bytes);
 
-	} else if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
-		struct intel_vgpu_guest_page *gp;
+	} else if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
+		struct intel_vgpu_page_track *t;
 
 		/* Since we enter the failsafe mode early during guest boot,
 		 * guest may not have chance to set up its ppgtt table, so
 		 * there should not be any wp pages for guest. Keep the wp
 		 * related code here in case we need to handle it in furture.
 		 */
-		gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
-		if (gp) {
+		t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
+		if (t) {
 			/* remove write protection to prevent furture traps */
-			intel_vgpu_clean_guest_page(vgpu, gp);
+			intel_vgpu_clean_page_track(vgpu, t);
 			if (read)
 				intel_gvt_hypervisor_read_gpa(vgpu, pa,
 						p_data, bytes);
@@ -170,17 +170,17 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
 		return ret;
 	}
 
-	if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
-		struct intel_vgpu_guest_page *gp;
+	if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
+		struct intel_vgpu_page_track *t;
 
-		gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
-		if (gp) {
+		t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
+		if (t) {
 			ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
 					p_data, bytes);
 			if (ret) {
 				gvt_vgpu_err("guest page read error %d, "
 					"gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
-					ret, gp->gfn, pa, *(u32 *)p_data,
+					ret, t->gfn, pa, *(u32 *)p_data,
 					bytes);
 			}
 			mutex_unlock(&gvt->lock);
@@ -267,17 +267,17 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
 		return ret;
 	}
 
-	if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
-		struct intel_vgpu_guest_page *gp;
+	if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
+		struct intel_vgpu_page_track *t;
 
-		gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
-		if (gp) {
-			ret = gp->handler(gp, pa, p_data, bytes);
+		t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
+		if (t) {
+			ret = t->handler(t, pa, p_data, bytes);
 			if (ret) {
 				gvt_err("guest page write error %d, "
 					"gfn 0x%lx, pa 0x%llx, "
 					"var 0x%x, len %d\n",
-					ret, gp->gfn, pa,
+					ret, t->gfn, pa,
 					*(u32 *)p_data, bytes);
 			}
 			mutex_unlock(&gvt->lock);

+ 6 - 0
drivers/gpu/drm/i915/gvt/mmio.h

@@ -65,11 +65,17 @@ struct intel_gvt_mmio_info {
 	struct hlist_node node;
 };
 
+int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
+		unsigned int reg);
 unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
 bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
 
 int intel_gvt_setup_mmio_info(struct intel_gvt *gvt);
 void intel_gvt_clean_mmio_info(struct intel_gvt *gvt);
+int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
+	int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
+	void *data);
+
 
 #define INTEL_GVT_MMIO_OFFSET(reg) ({ \
 	typeof(reg) __reg = reg; \

+ 18 - 16
drivers/gpu/drm/i915/gvt/mpt.h

@@ -154,51 +154,53 @@ static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
 }
 
 /**
- * intel_gvt_hypervisor_set_wp_page - set a guest page to write-protected
+ * intel_gvt_hypervisor_enable - set a guest page to write-protected
  * @vgpu: a vGPU
- * @p: intel_vgpu_guest_page
+ * @t: page track data structure
  *
  * Returns:
  * Zero on success, negative error code if failed.
  */
-static inline int intel_gvt_hypervisor_set_wp_page(struct intel_vgpu *vgpu,
-		struct intel_vgpu_guest_page *p)
+static inline int intel_gvt_hypervisor_enable_page_track(
+		struct intel_vgpu *vgpu,
+		struct intel_vgpu_page_track *t)
 {
 	int ret;
 
-	if (p->writeprotection)
+	if (t->tracked)
 		return 0;
 
-	ret = intel_gvt_host.mpt->set_wp_page(vgpu->handle, p->gfn);
+	ret = intel_gvt_host.mpt->set_wp_page(vgpu->handle, t->gfn);
 	if (ret)
 		return ret;
-	p->writeprotection = true;
-	atomic_inc(&vgpu->gtt.n_write_protected_guest_page);
+	t->tracked = true;
+	atomic_inc(&vgpu->gtt.n_tracked_guest_page);
 	return 0;
 }
 
 /**
- * intel_gvt_hypervisor_unset_wp_page - remove the write-protection of a
+ * intel_gvt_hypervisor_disable_page_track - remove the write-protection of a
  * guest page
  * @vgpu: a vGPU
- * @p: intel_vgpu_guest_page
+ * @t: page track data structure
  *
  * Returns:
  * Zero on success, negative error code if failed.
  */
-static inline int intel_gvt_hypervisor_unset_wp_page(struct intel_vgpu *vgpu,
-		struct intel_vgpu_guest_page *p)
+static inline int intel_gvt_hypervisor_disable_page_track(
+		struct intel_vgpu *vgpu,
+		struct intel_vgpu_page_track *t)
 {
 	int ret;
 
-	if (!p->writeprotection)
+	if (!t->tracked)
 		return 0;
 
-	ret = intel_gvt_host.mpt->unset_wp_page(vgpu->handle, p->gfn);
+	ret = intel_gvt_host.mpt->unset_wp_page(vgpu->handle, t->gfn);
 	if (ret)
 		return ret;
-	p->writeprotection = false;
-	atomic_dec(&vgpu->gtt.n_write_protected_guest_page);
+	t->tracked = false;
+	atomic_dec(&vgpu->gtt.n_tracked_guest_page);
 	return 0;
 }
 

+ 226 - 49
drivers/gpu/drm/i915/gvt/opregion.c

@@ -25,36 +25,247 @@
 #include "i915_drv.h"
 #include "gvt.h"
 
-static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
+/*
+ * Note: Only for GVT-g virtual VBT generation, other usage must
+ * not do like this.
+ */
+#define _INTEL_BIOS_PRIVATE
+#include "intel_vbt_defs.h"
+
+#define OPREGION_SIGNATURE "IntelGraphicsMem"
+#define MBOX_VBT      (1<<3)
+
+/* device handle */
+#define DEVICE_TYPE_CRT    0x01
+#define DEVICE_TYPE_EFP1   0x04
+#define DEVICE_TYPE_EFP2   0x40
+#define DEVICE_TYPE_EFP3   0x20
+#define DEVICE_TYPE_EFP4   0x10
+
+#define DEV_SIZE	38
+
+struct opregion_header {
+	u8 signature[16];
+	u32 size;
+	u32 opregion_ver;
+	u8 bios_ver[32];
+	u8 vbios_ver[16];
+	u8 driver_ver[16];
+	u32 mboxes;
+	u32 driver_model;
+	u32 pcon;
+	u8 dver[32];
+	u8 rsvd[124];
+} __packed;
+
+struct bdb_data_header {
+	u8 id;
+	u16 size; /* data size */
+} __packed;
+
+struct efp_child_device_config {
+	u16 handle;
+	u16 device_type;
+	u16 device_class;
+	u8 i2c_speed;
+	u8 dp_onboard_redriver; /* 158 */
+	u8 dp_ondock_redriver; /* 158 */
+	u8 hdmi_level_shifter_value:4; /* 169 */
+	u8 hdmi_max_data_rate:4; /* 204 */
+	u16 dtd_buf_ptr; /* 161 */
+	u8 edidless_efp:1; /* 161 */
+	u8 compression_enable:1; /* 198 */
+	u8 compression_method:1; /* 198 */
+	u8 ganged_edp:1; /* 202 */
+	u8 skip0:4;
+	u8 compression_structure_index:4; /* 198 */
+	u8 skip1:4;
+	u8 slave_port; /*  202 */
+	u8 skip2;
+	u8 dvo_port;
+	u8 i2c_pin; /* for add-in card */
+	u8 slave_addr; /* for add-in card */
+	u8 ddc_pin;
+	u16 edid_ptr;
+	u8 dvo_config;
+	u8 efp_docked_port:1; /* 158 */
+	u8 lane_reversal:1; /* 184 */
+	u8 onboard_lspcon:1; /* 192 */
+	u8 iboost_enable:1; /* 196 */
+	u8 hpd_invert:1; /* BXT 196 */
+	u8 slip3:3;
+	u8 hdmi_compat:1;
+	u8 dp_compat:1;
+	u8 tmds_compat:1;
+	u8 skip4:5;
+	u8 aux_channel;
+	u8 dongle_detect;
+	u8 pipe_cap:2;
+	u8 sdvo_stall:1; /* 158 */
+	u8 hpd_status:2;
+	u8 integrated_encoder:1;
+	u8 skip5:2;
+	u8 dvo_wiring;
+	u8 mipi_bridge_type; /* 171 */
+	u16 device_class_ext;
+	u8 dvo_function;
+	u8 dp_usb_type_c:1; /* 195 */
+	u8 skip6:7;
+	u8 dp_usb_type_c_2x_gpio_index; /* 195 */
+	u16 dp_usb_type_c_2x_gpio_pin; /* 195 */
+	u8 iboost_dp:4; /* 196 */
+	u8 iboost_hdmi:4; /* 196 */
+} __packed;
+
+struct vbt {
+	/* header->bdb_offset point to bdb_header offset */
+	struct vbt_header header;
+	struct bdb_header bdb_header;
+
+	struct bdb_data_header general_features_header;
+	struct bdb_general_features general_features;
+
+	struct bdb_data_header general_definitions_header;
+	struct bdb_general_definitions general_definitions;
+
+	struct efp_child_device_config child0;
+	struct efp_child_device_config child1;
+	struct efp_child_device_config child2;
+	struct efp_child_device_config child3;
+
+	struct bdb_data_header driver_features_header;
+	struct bdb_driver_features driver_features;
+};
+
+static void virt_vbt_generation(struct vbt *v)
 {
-	u8 *buf;
-	int i;
+	int num_child;
+
+	memset(v, 0, sizeof(struct vbt));
+
+	v->header.signature[0] = '$';
+	v->header.signature[1] = 'V';
+	v->header.signature[2] = 'B';
+	v->header.signature[3] = 'T';
+
+	/* there's features depending on version! */
+	v->header.version = 155;
+	v->header.header_size = sizeof(v->header);
+	v->header.vbt_size = sizeof(struct vbt) - sizeof(v->header);
+	v->header.bdb_offset = offsetof(struct vbt, bdb_header);
+
+	strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK");
+	v->bdb_header.version = 186; /* child_dev_size = 38 */
+	v->bdb_header.header_size = sizeof(v->bdb_header);
+
+	v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header)
+		- sizeof(struct bdb_header);
+
+	/* general features */
+	v->general_features_header.id = BDB_GENERAL_FEATURES;
+	v->general_features_header.size = sizeof(struct bdb_general_features);
+	v->general_features.int_crt_support = 0;
+	v->general_features.int_tv_support = 0;
+
+	/* child device */
+	num_child = 4; /* each port has one child */
+	v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS;
+	/* size will include child devices */
+	v->general_definitions_header.size =
+		sizeof(struct bdb_general_definitions) + num_child * DEV_SIZE;
+	v->general_definitions.child_dev_size = DEV_SIZE;
+
+	/* portA */
+	v->child0.handle = DEVICE_TYPE_EFP1;
+	v->child0.device_type = DEVICE_TYPE_DP;
+	v->child0.dvo_port = DVO_PORT_DPA;
+	v->child0.aux_channel = DP_AUX_A;
+	v->child0.dp_compat = true;
+	v->child0.integrated_encoder = true;
+
+	/* portB */
+	v->child1.handle = DEVICE_TYPE_EFP2;
+	v->child1.device_type = DEVICE_TYPE_DP;
+	v->child1.dvo_port = DVO_PORT_DPB;
+	v->child1.aux_channel = DP_AUX_B;
+	v->child1.dp_compat = true;
+	v->child1.integrated_encoder = true;
+
+	/* portC */
+	v->child2.handle = DEVICE_TYPE_EFP3;
+	v->child2.device_type = DEVICE_TYPE_DP;
+	v->child2.dvo_port = DVO_PORT_DPC;
+	v->child2.aux_channel = DP_AUX_C;
+	v->child2.dp_compat = true;
+	v->child2.integrated_encoder = true;
+
+	/* portD */
+	v->child3.handle = DEVICE_TYPE_EFP4;
+	v->child3.device_type = DEVICE_TYPE_DP;
+	v->child3.dvo_port = DVO_PORT_DPD;
+	v->child3.aux_channel = DP_AUX_D;
+	v->child3.dp_compat = true;
+	v->child3.integrated_encoder = true;
+
+	/* driver features */
+	v->driver_features_header.id = BDB_DRIVER_FEATURES;
+	v->driver_features_header.size = sizeof(struct bdb_driver_features);
+	v->driver_features.lvds_config = BDB_DRIVER_FEATURE_NO_LVDS;
+}
 
-	if (WARN((vgpu_opregion(vgpu)->va),
-			"vgpu%d: opregion has been initialized already.\n",
-			vgpu->id))
-		return -EINVAL;
+static int alloc_and_init_virt_opregion(struct intel_vgpu *vgpu)
+{
+	u8 *buf;
+	struct opregion_header *header;
+	struct vbt v;
 
+	gvt_dbg_core("init vgpu%d opregion\n", vgpu->id);
 	vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
 			__GFP_ZERO,
 			get_order(INTEL_GVT_OPREGION_SIZE));
-
-	if (!vgpu_opregion(vgpu)->va)
+	if (!vgpu_opregion(vgpu)->va) {
+		gvt_err("fail to get memory for vgpu virt opregion\n");
 		return -ENOMEM;
+	}
 
-	memcpy(vgpu_opregion(vgpu)->va, vgpu->gvt->opregion.opregion_va,
-	       INTEL_GVT_OPREGION_SIZE);
-
-	for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
-		vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
+	/* emulated opregion with VBT mailbox only */
+	buf = (u8 *)vgpu_opregion(vgpu)->va;
+	header = (struct opregion_header *)buf;
+	memcpy(header->signature, OPREGION_SIGNATURE,
+			sizeof(OPREGION_SIGNATURE));
+	header->size = 0x8;
+	header->opregion_ver = 0x02000000;
+	header->mboxes = MBOX_VBT;
 
 	/* for unknown reason, the value in LID field is incorrect
 	 * which block the windows guest, so workaround it by force
 	 * setting it to "OPEN"
 	 */
-	buf = (u8 *)vgpu_opregion(vgpu)->va;
 	buf[INTEL_GVT_OPREGION_CLID] = 0x3;
 
+	/* emulated vbt from virt vbt generation */
+	virt_vbt_generation(&v);
+	memcpy(buf + INTEL_GVT_OPREGION_VBT_OFFSET, &v, sizeof(struct vbt));
+
+	return 0;
+}
+
+static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
+{
+	int i, ret;
+
+	if (WARN((vgpu_opregion(vgpu)->va),
+			"vgpu%d: opregion has been initialized already.\n",
+			vgpu->id))
+		return -EINVAL;
+
+	ret = alloc_and_init_virt_opregion(vgpu);
+	if (ret < 0)
+		return ret;
+
+	for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
+		vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
+
 	return 0;
 }
 
@@ -132,40 +343,6 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa)
 	return 0;
 }
 
-/**
- * intel_gvt_clean_opregion - clean host opergion related stuffs
- * @gvt: a GVT device
- *
- */
-void intel_gvt_clean_opregion(struct intel_gvt *gvt)
-{
-	memunmap(gvt->opregion.opregion_va);
-	gvt->opregion.opregion_va = NULL;
-}
-
-/**
- * intel_gvt_init_opregion - initialize host opergion related stuffs
- * @gvt: a GVT device
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-int intel_gvt_init_opregion(struct intel_gvt *gvt)
-{
-	gvt_dbg_core("init host opregion\n");
-
-	pci_read_config_dword(gvt->dev_priv->drm.pdev, INTEL_GVT_PCI_OPREGION,
-			&gvt->opregion.opregion_pa);
-
-	gvt->opregion.opregion_va = memremap(gvt->opregion.opregion_pa,
-					     INTEL_GVT_OPREGION_SIZE, MEMREMAP_WB);
-	if (!gvt->opregion.opregion_va) {
-		gvt_err("fail to map host opregion\n");
-		return -EFAULT;
-	}
-	return 0;
-}
-
 #define GVT_OPREGION_FUNC(scic)					\
 	({							\
 	 u32 __ret;						\

+ 5 - 1
drivers/gpu/drm/i915/gvt/reg.h

@@ -51,6 +51,9 @@
 
 #define INTEL_GVT_OPREGION_PAGES	2
 #define INTEL_GVT_OPREGION_SIZE		(INTEL_GVT_OPREGION_PAGES * PAGE_SIZE)
+#define INTEL_GVT_OPREGION_VBT_OFFSET	0x400
+#define INTEL_GVT_OPREGION_VBT_SIZE	\
+		(INTEL_GVT_OPREGION_SIZE - INTEL_GVT_OPREGION_VBT_OFFSET)
 
 #define VGT_SPRSTRIDE(pipe)	_PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
 
@@ -71,6 +74,7 @@
 #define RB_HEAD_OFF_MASK	((1U << 21) - (1U << 2))
 #define RB_TAIL_OFF_MASK	((1U << 21) - (1U << 3))
 #define RB_TAIL_SIZE_MASK	((1U << 21) - (1U << 12))
-#define _RING_CTL_BUF_SIZE(ctl) (((ctl) & RB_TAIL_SIZE_MASK) + GTT_PAGE_SIZE)
+#define _RING_CTL_BUF_SIZE(ctl) (((ctl) & RB_TAIL_SIZE_MASK) + \
+		I915_GTT_PAGE_SIZE)
 
 #endif

+ 8 - 6
drivers/gpu/drm/i915/gvt/render.c

@@ -147,6 +147,7 @@ static u32 gen9_render_mocs_L3[32];
 static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
 {
 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct intel_vgpu_submission *s = &vgpu->submission;
 	enum forcewake_domains fw;
 	i915_reg_t reg;
 	u32 regs[] = {
@@ -160,7 +161,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
 	if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
 		return;
 
-	if (!test_and_clear_bit(ring_id, (void *)vgpu->tlb_handle_pending))
+	if (!test_and_clear_bit(ring_id, (void *)s->tlb_handle_pending))
 		return;
 
 	reg = _MMIO(regs[ring_id]);
@@ -208,7 +209,7 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
 	offset.reg = regs[ring_id];
 	for (i = 0; i < 64; i++) {
 		gen9_render_mocs[ring_id][i] = I915_READ_FW(offset);
-		I915_WRITE(offset, vgpu_vreg(vgpu, offset));
+		I915_WRITE_FW(offset, vgpu_vreg(vgpu, offset));
 		offset.reg += 4;
 	}
 
@@ -261,14 +262,15 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
 static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
 {
 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-	struct render_mmio *mmio;
-	u32 v;
-	int i, array_size;
-	u32 *reg_state = vgpu->shadow_ctx->engine[ring_id].lrc_reg_state;
+	struct intel_vgpu_submission *s = &vgpu->submission;
+	u32 *reg_state = s->shadow_ctx->engine[ring_id].lrc_reg_state;
 	u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
 	u32 inhibit_mask =
 		_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
 	i915_reg_t last_reg = _MMIO(0);
+	struct render_mmio *mmio;
+	u32 v;
+	int i, array_size;
 
 	if (IS_SKYLAKE(vgpu->gvt->dev_priv)
 		|| IS_KABYLAKE(vgpu->gvt->dev_priv)) {

+ 644 - 43
drivers/gpu/drm/i915/gvt/scheduler.c

@@ -57,7 +57,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 	struct intel_vgpu *vgpu = workload->vgpu;
 	struct intel_gvt *gvt = vgpu->gvt;
 	int ring_id = workload->ring_id;
-	struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
+	struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
 	struct drm_i915_gem_object *ctx_obj =
 		shadow_ctx->engine[ring_id].state->obj;
 	struct execlist_ring_context *shadow_ring_context;
@@ -81,16 +81,16 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 	while (i < context_page_num) {
 		context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
 				(u32)((workload->ctx_desc.lrca + i) <<
-				GTT_PAGE_SHIFT));
+				I915_GTT_PAGE_SHIFT));
 		if (context_gpa == INTEL_GVT_INVALID_ADDR) {
 			gvt_vgpu_err("Invalid guest context descriptor\n");
-			return -EINVAL;
+			return -EFAULT;
 		}
 
 		page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
 		dst = kmap(page);
 		intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
-				GTT_PAGE_SIZE);
+				I915_GTT_PAGE_SIZE);
 		kunmap(page);
 		i++;
 	}
@@ -120,7 +120,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 			sizeof(*shadow_ring_context),
 			(void *)shadow_ring_context +
 			sizeof(*shadow_ring_context),
-			GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
+			I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
 
 	kunmap(page);
 	return 0;
@@ -131,6 +131,20 @@ static inline bool is_gvt_request(struct drm_i915_gem_request *req)
 	return i915_gem_context_force_single_submission(req->ctx);
 }
 
+static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
+{
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
+	i915_reg_t reg;
+
+	reg = RING_INSTDONE(ring_base);
+	vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
+	reg = RING_ACTHD(ring_base);
+	vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
+	reg = RING_ACTHD_UDW(ring_base);
+	vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
+}
+
 static int shadow_context_status_change(struct notifier_block *nb,
 		unsigned long action, void *data)
 {
@@ -175,6 +189,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
 		break;
 	case INTEL_CONTEXT_SCHEDULE_OUT:
 	case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
+		save_ring_hw_state(workload->vgpu, ring_id);
 		atomic_set(&workload->shadow_ctx_active, 0);
 		break;
 	default:
@@ -249,12 +264,12 @@ void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
  */
 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
 {
+	struct intel_vgpu *vgpu = workload->vgpu;
+	struct intel_vgpu_submission *s = &vgpu->submission;
+	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 	int ring_id = workload->ring_id;
-	struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
-	struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
 	struct intel_engine_cs *engine = dev_priv->engine[ring_id];
-	struct drm_i915_gem_request *rq;
-	struct intel_vgpu *vgpu = workload->vgpu;
 	struct intel_ring *ring;
 	int ret;
 
@@ -267,7 +282,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
 	shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
 				    GEN8_CTX_ADDRESSING_MODE_SHIFT;
 
-	if (!test_and_set_bit(ring_id, vgpu->shadow_ctx_desc_updated))
+	if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated))
 		shadow_context_descriptor_update(shadow_ctx,
 					dev_priv->engine[ring_id]);
 
@@ -299,6 +314,27 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
 	ret = populate_shadow_context(workload);
 	if (ret)
 		goto err_unpin;
+	workload->shadowed = true;
+	return 0;
+
+err_unpin:
+	engine->context_unpin(engine, shadow_ctx);
+err_shadow:
+	release_shadow_wa_ctx(&workload->wa_ctx);
+err_scan:
+	return ret;
+}
+
+static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
+{
+	int ring_id = workload->ring_id;
+	struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+	struct intel_engine_cs *engine = dev_priv->engine[ring_id];
+	struct drm_i915_gem_request *rq;
+	struct intel_vgpu *vgpu = workload->vgpu;
+	struct intel_vgpu_submission *s = &vgpu->submission;
+	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
+	int ret;
 
 	rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
 	if (IS_ERR(rq)) {
@@ -313,22 +349,211 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
 	ret = copy_workload_to_ring_buffer(workload);
 	if (ret)
 		goto err_unpin;
-	workload->shadowed = true;
 	return 0;
 
 err_unpin:
 	engine->context_unpin(engine, shadow_ctx);
-err_shadow:
 	release_shadow_wa_ctx(&workload->wa_ctx);
-err_scan:
+	return ret;
+}
+
+static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
+
+static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
+{
+	struct intel_gvt *gvt = workload->vgpu->gvt;
+	const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
+	struct intel_vgpu_shadow_bb *bb;
+	int ret;
+
+	list_for_each_entry(bb, &workload->shadow_bb, list) {
+		bb->vma = i915_gem_object_ggtt_pin(bb->obj, NULL, 0, 0, 0);
+		if (IS_ERR(bb->vma)) {
+			ret = PTR_ERR(bb->vma);
+			goto err;
+		}
+
+		/* relocate shadow batch buffer */
+		bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
+		if (gmadr_bytes == 8)
+			bb->bb_start_cmd_va[2] = 0;
+
+		/* No one is going to touch shadow bb from now on. */
+		if (bb->clflush & CLFLUSH_AFTER) {
+			drm_clflush_virt_range(bb->va, bb->obj->base.size);
+			bb->clflush &= ~CLFLUSH_AFTER;
+		}
+
+		ret = i915_gem_object_set_to_gtt_domain(bb->obj, false);
+		if (ret)
+			goto err;
+
+		i915_gem_obj_finish_shmem_access(bb->obj);
+		bb->accessing = false;
+
+		i915_vma_move_to_active(bb->vma, workload->req, 0);
+	}
+	return 0;
+err:
+	release_shadow_batch_buffer(workload);
+	return ret;
+}
+
+static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+	struct intel_vgpu_workload *workload = container_of(wa_ctx,
+					struct intel_vgpu_workload,
+					wa_ctx);
+	int ring_id = workload->ring_id;
+	struct intel_vgpu_submission *s = &workload->vgpu->submission;
+	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
+	struct drm_i915_gem_object *ctx_obj =
+		shadow_ctx->engine[ring_id].state->obj;
+	struct execlist_ring_context *shadow_ring_context;
+	struct page *page;
+
+	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+	shadow_ring_context = kmap_atomic(page);
+
+	shadow_ring_context->bb_per_ctx_ptr.val =
+		(shadow_ring_context->bb_per_ctx_ptr.val &
+		(~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
+	shadow_ring_context->rcs_indirect_ctx.val =
+		(shadow_ring_context->rcs_indirect_ctx.val &
+		(~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
+
+	kunmap_atomic(shadow_ring_context);
+	return 0;
+}
+
+static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+	struct i915_vma *vma;
+	unsigned char *per_ctx_va =
+		(unsigned char *)wa_ctx->indirect_ctx.shadow_va +
+		wa_ctx->indirect_ctx.size;
+
+	if (wa_ctx->indirect_ctx.size == 0)
+		return 0;
+
+	vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
+				       0, CACHELINE_BYTES, 0);
+	if (IS_ERR(vma))
+		return PTR_ERR(vma);
+
+	/* FIXME: we are not tracking our pinned VMA leaving it
+	 * up to the core to fix up the stray pin_count upon
+	 * free.
+	 */
+
+	wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
+
+	wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
+	memset(per_ctx_va, 0, CACHELINE_BYTES);
+
+	update_wa_ctx_2_shadow_ctx(wa_ctx);
+	return 0;
+}
+
+static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
+{
+	struct intel_vgpu *vgpu = workload->vgpu;
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct intel_vgpu_shadow_bb *bb, *pos;
+
+	if (list_empty(&workload->shadow_bb))
+		return;
+
+	bb = list_first_entry(&workload->shadow_bb,
+			struct intel_vgpu_shadow_bb, list);
+
+	mutex_lock(&dev_priv->drm.struct_mutex);
+
+	list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
+		if (bb->obj) {
+			if (bb->accessing)
+				i915_gem_obj_finish_shmem_access(bb->obj);
+
+			if (bb->va && !IS_ERR(bb->va))
+				i915_gem_object_unpin_map(bb->obj);
+
+			if (bb->vma && !IS_ERR(bb->vma)) {
+				i915_vma_unpin(bb->vma);
+				i915_vma_close(bb->vma);
+			}
+			__i915_gem_object_release_unless_active(bb->obj);
+		}
+		list_del(&bb->list);
+		kfree(bb);
+	}
+
+	mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+static int prepare_workload(struct intel_vgpu_workload *workload)
+{
+	struct intel_vgpu *vgpu = workload->vgpu;
+	int ret = 0;
+
+	ret = intel_vgpu_pin_mm(workload->shadow_mm);
+	if (ret) {
+		gvt_vgpu_err("fail to vgpu pin mm\n");
+		return ret;
+	}
+
+	ret = intel_vgpu_sync_oos_pages(workload->vgpu);
+	if (ret) {
+		gvt_vgpu_err("fail to vgpu sync oos pages\n");
+		goto err_unpin_mm;
+	}
+
+	ret = intel_vgpu_flush_post_shadow(workload->vgpu);
+	if (ret) {
+		gvt_vgpu_err("fail to flush post shadow\n");
+		goto err_unpin_mm;
+	}
+
+	ret = intel_gvt_generate_request(workload);
+	if (ret) {
+		gvt_vgpu_err("fail to generate request\n");
+		goto err_unpin_mm;
+	}
+
+	ret = prepare_shadow_batch_buffer(workload);
+	if (ret) {
+		gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
+		goto err_unpin_mm;
+	}
+
+	ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
+	if (ret) {
+		gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
+		goto err_shadow_batch;
+	}
+
+	if (workload->prepare) {
+		ret = workload->prepare(workload);
+		if (ret)
+			goto err_shadow_wa_ctx;
+	}
+
+	return 0;
+err_shadow_wa_ctx:
+	release_shadow_wa_ctx(&workload->wa_ctx);
+err_shadow_batch:
+	release_shadow_batch_buffer(workload);
+err_unpin_mm:
+	intel_vgpu_unpin_mm(workload->shadow_mm);
 	return ret;
 }
 
 static int dispatch_workload(struct intel_vgpu_workload *workload)
 {
+	struct intel_vgpu *vgpu = workload->vgpu;
+	struct intel_vgpu_submission *s = &vgpu->submission;
+	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 	int ring_id = workload->ring_id;
-	struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
-	struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
 	struct intel_engine_cs *engine = dev_priv->engine[ring_id];
 	int ret = 0;
 
@@ -341,12 +566,10 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
 	if (ret)
 		goto out;
 
-	if (workload->prepare) {
-		ret = workload->prepare(workload);
-		if (ret) {
-			engine->context_unpin(engine, shadow_ctx);
-			goto out;
-		}
+	ret = prepare_workload(workload);
+	if (ret) {
+		engine->context_unpin(engine, shadow_ctx);
+		goto out;
 	}
 
 out:
@@ -414,7 +637,7 @@ static struct intel_vgpu_workload *pick_next_workload(
 
 	gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
 
-	atomic_inc(&workload->vgpu->running_workload_num);
+	atomic_inc(&workload->vgpu->submission.running_workload_num);
 out:
 	mutex_unlock(&gvt->lock);
 	return workload;
@@ -424,8 +647,9 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
 {
 	struct intel_vgpu *vgpu = workload->vgpu;
 	struct intel_gvt *gvt = vgpu->gvt;
+	struct intel_vgpu_submission *s = &vgpu->submission;
+	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
 	int ring_id = workload->ring_id;
-	struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
 	struct drm_i915_gem_object *ctx_obj =
 		shadow_ctx->engine[ring_id].state->obj;
 	struct execlist_ring_context *shadow_ring_context;
@@ -449,7 +673,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
 	while (i < context_page_num) {
 		context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
 				(u32)((workload->ctx_desc.lrca + i) <<
-					GTT_PAGE_SHIFT));
+					I915_GTT_PAGE_SHIFT));
 		if (context_gpa == INTEL_GVT_INVALID_ADDR) {
 			gvt_vgpu_err("invalid guest context descriptor\n");
 			return;
@@ -458,7 +682,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
 		page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
 		src = kmap(page);
 		intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
-				GTT_PAGE_SIZE);
+				I915_GTT_PAGE_SIZE);
 		kunmap(page);
 		i++;
 	}
@@ -483,23 +707,41 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
 			sizeof(*shadow_ring_context),
 			(void *)shadow_ring_context +
 			sizeof(*shadow_ring_context),
-			GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
+			I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
 
 	kunmap(page);
 }
 
+static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
+{
+	struct intel_vgpu_submission *s = &vgpu->submission;
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct intel_engine_cs *engine;
+	struct intel_vgpu_workload *pos, *n;
+	unsigned int tmp;
+
+	/* free the unsubmited workloads in the queues. */
+	for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+		list_for_each_entry_safe(pos, n,
+			&s->workload_q_head[engine->id], list) {
+			list_del_init(&pos->list);
+			intel_vgpu_destroy_workload(pos);
+		}
+		clear_bit(engine->id, s->shadow_ctx_desc_updated);
+	}
+}
+
 static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 {
 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
-	struct intel_vgpu_workload *workload;
-	struct intel_vgpu *vgpu;
+	struct intel_vgpu_workload *workload =
+		scheduler->current_workload[ring_id];
+	struct intel_vgpu *vgpu = workload->vgpu;
+	struct intel_vgpu_submission *s = &vgpu->submission;
 	int event;
 
 	mutex_lock(&gvt->lock);
 
-	workload = scheduler->current_workload[ring_id];
-	vgpu = workload->vgpu;
-
 	/* For the workload w/ request, needs to wait for the context
 	 * switch to make sure request is completed.
 	 * For the workload w/o request, directly complete the workload.
@@ -536,7 +778,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 		}
 		mutex_lock(&dev_priv->drm.struct_mutex);
 		/* unpin shadow ctx as the shadow_ctx update is done */
-		engine->context_unpin(engine, workload->vgpu->shadow_ctx);
+		engine->context_unpin(engine, s->shadow_ctx);
 		mutex_unlock(&dev_priv->drm.struct_mutex);
 	}
 
@@ -546,9 +788,32 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 	scheduler->current_workload[ring_id] = NULL;
 
 	list_del_init(&workload->list);
+
+	if (!workload->status) {
+		release_shadow_batch_buffer(workload);
+		release_shadow_wa_ctx(&workload->wa_ctx);
+	}
+
+	if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
+		/* if workload->status is not successful means HW GPU
+		 * has occurred GPU hang or something wrong with i915/GVT,
+		 * and GVT won't inject context switch interrupt to guest.
+		 * So this error is a vGPU hang actually to the guest.
+		 * According to this we should emunlate a vGPU hang. If
+		 * there are pending workloads which are already submitted
+		 * from guest, we should clean them up like HW GPU does.
+		 *
+		 * if it is in middle of engine resetting, the pending
+		 * workloads won't be submitted to HW GPU and will be
+		 * cleaned up during the resetting process later, so doing
+		 * the workload clean up here doesn't have any impact.
+		 **/
+		clean_workloads(vgpu, ENGINE_MASK(ring_id));
+	}
+
 	workload->complete(workload);
 
-	atomic_dec(&vgpu->running_workload_num);
+	atomic_dec(&s->running_workload_num);
 	wake_up(&scheduler->workload_complete_wq);
 
 	if (gvt->scheduler.need_reschedule)
@@ -631,20 +896,23 @@ complete:
 					FORCEWAKE_ALL);
 
 		intel_runtime_pm_put(gvt->dev_priv);
+		if (ret && (vgpu_is_vm_unhealthy(ret)))
+			enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
 	}
 	return 0;
 }
 
 void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
 {
+	struct intel_vgpu_submission *s = &vgpu->submission;
 	struct intel_gvt *gvt = vgpu->gvt;
 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
 
-	if (atomic_read(&vgpu->running_workload_num)) {
+	if (atomic_read(&s->running_workload_num)) {
 		gvt_dbg_sched("wait vgpu idle\n");
 
 		wait_event(scheduler->workload_complete_wq,
-				!atomic_read(&vgpu->running_workload_num));
+				!atomic_read(&s->running_workload_num));
 	}
 }
 
@@ -709,21 +977,354 @@ err:
 	return ret;
 }
 
-void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
+/**
+ * intel_vgpu_clean_submission - free submission-related resource for vGPU
+ * @vgpu: a vGPU
+ *
+ * This function is called when a vGPU is being destroyed.
+ *
+ */
+void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
 {
-	i915_gem_context_put(vgpu->shadow_ctx);
+	struct intel_vgpu_submission *s = &vgpu->submission;
+
+	intel_vgpu_select_submission_ops(vgpu, 0);
+	i915_gem_context_put(s->shadow_ctx);
+	kmem_cache_destroy(s->workloads);
+}
+
+
+/**
+ * intel_vgpu_reset_submission - reset submission-related resource for vGPU
+ * @vgpu: a vGPU
+ * @engine_mask: engines expected to be reset
+ *
+ * This function is called when a vGPU is being destroyed.
+ *
+ */
+void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
+		unsigned long engine_mask)
+{
+	struct intel_vgpu_submission *s = &vgpu->submission;
+
+	if (!s->active)
+		return;
+
+	clean_workloads(vgpu, engine_mask);
+	s->ops->reset(vgpu, engine_mask);
 }
 
-int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
+/**
+ * intel_vgpu_setup_submission - setup submission-related resource for vGPU
+ * @vgpu: a vGPU
+ *
+ * This function is called when a vGPU is being created.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 {
-	atomic_set(&vgpu->running_workload_num, 0);
+	struct intel_vgpu_submission *s = &vgpu->submission;
+	enum intel_engine_id i;
+	struct intel_engine_cs *engine;
+	int ret;
 
-	vgpu->shadow_ctx = i915_gem_context_create_gvt(
+	s->shadow_ctx = i915_gem_context_create_gvt(
 			&vgpu->gvt->dev_priv->drm);
-	if (IS_ERR(vgpu->shadow_ctx))
-		return PTR_ERR(vgpu->shadow_ctx);
+	if (IS_ERR(s->shadow_ctx))
+		return PTR_ERR(s->shadow_ctx);
+
+	bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
+
+	s->workloads = kmem_cache_create("gvt-g_vgpu_workload",
+			sizeof(struct intel_vgpu_workload), 0,
+			SLAB_HWCACHE_ALIGN,
+			NULL);
+
+	if (!s->workloads) {
+		ret = -ENOMEM;
+		goto out_shadow_ctx;
+	}
+
+	for_each_engine(engine, vgpu->gvt->dev_priv, i)
+		INIT_LIST_HEAD(&s->workload_q_head[i]);
 
-	bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES);
+	atomic_set(&s->running_workload_num, 0);
+	bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
 
 	return 0;
+
+out_shadow_ctx:
+	i915_gem_context_put(s->shadow_ctx);
+	return ret;
+}
+
+/**
+ * intel_vgpu_select_submission_ops - select virtual submission interface
+ * @vgpu: a vGPU
+ * @interface: expected vGPU virtual submission interface
+ *
+ * This function is called when guest configures submission interface.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
+				     unsigned int interface)
+{
+	struct intel_vgpu_submission *s = &vgpu->submission;
+	const struct intel_vgpu_submission_ops *ops[] = {
+		[INTEL_VGPU_EXECLIST_SUBMISSION] =
+			&intel_vgpu_execlist_submission_ops,
+	};
+	int ret;
+
+	if (WARN_ON(interface >= ARRAY_SIZE(ops)))
+		return -EINVAL;
+
+	if (s->active) {
+		s->ops->clean(vgpu);
+		s->active = false;
+		gvt_dbg_core("vgpu%d: de-select ops [ %s ] \n",
+				vgpu->id, s->ops->name);
+	}
+
+	if (interface == 0) {
+		s->ops = NULL;
+		s->virtual_submission_interface = 0;
+		gvt_dbg_core("vgpu%d: no submission ops\n", vgpu->id);
+		return 0;
+	}
+
+	ret = ops[interface]->init(vgpu);
+	if (ret)
+		return ret;
+
+	s->ops = ops[interface];
+	s->virtual_submission_interface = interface;
+	s->active = true;
+
+	gvt_dbg_core("vgpu%d: activate ops [ %s ]\n",
+			vgpu->id, s->ops->name);
+
+	return 0;
+}
+
+/**
+ * intel_vgpu_destroy_workload - destroy a vGPU workload
+ * @vgpu: a vGPU
+ *
+ * This function is called when destroy a vGPU workload.
+ *
+ */
+void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
+{
+	struct intel_vgpu_submission *s = &workload->vgpu->submission;
+
+	if (workload->shadow_mm)
+		intel_gvt_mm_unreference(workload->shadow_mm);
+
+	kmem_cache_free(s->workloads, workload);
+}
+
+static struct intel_vgpu_workload *
+alloc_workload(struct intel_vgpu *vgpu)
+{
+	struct intel_vgpu_submission *s = &vgpu->submission;
+	struct intel_vgpu_workload *workload;
+
+	workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
+	if (!workload)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&workload->list);
+	INIT_LIST_HEAD(&workload->shadow_bb);
+
+	init_waitqueue_head(&workload->shadow_ctx_status_wq);
+	atomic_set(&workload->shadow_ctx_active, 0);
+
+	workload->status = -EINPROGRESS;
+	workload->shadowed = false;
+	workload->vgpu = vgpu;
+
+	return workload;
+}
+
+#define RING_CTX_OFF(x) \
+	offsetof(struct execlist_ring_context, x)
+
+static void read_guest_pdps(struct intel_vgpu *vgpu,
+		u64 ring_context_gpa, u32 pdp[8])
+{
+	u64 gpa;
+	int i;
+
+	gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
+
+	for (i = 0; i < 8; i++)
+		intel_gvt_hypervisor_read_gpa(vgpu,
+				gpa + i * 8, &pdp[7 - i], 4);
+}
+
+static int prepare_mm(struct intel_vgpu_workload *workload)
+{
+	struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
+	struct intel_vgpu_mm *mm;
+	struct intel_vgpu *vgpu = workload->vgpu;
+	int page_table_level;
+	u32 pdp[8];
+
+	if (desc->addressing_mode == 1) { /* legacy 32-bit */
+		page_table_level = 3;
+	} else if (desc->addressing_mode == 3) { /* legacy 64 bit */
+		page_table_level = 4;
+	} else {
+		gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
+		return -EINVAL;
+	}
+
+	read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp);
+
+	mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp);
+	if (mm) {
+		intel_gvt_mm_reference(mm);
+	} else {
+
+		mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
+				pdp, page_table_level, 0);
+		if (IS_ERR(mm)) {
+			gvt_vgpu_err("fail to create mm object.\n");
+			return PTR_ERR(mm);
+		}
+	}
+	workload->shadow_mm = mm;
+	return 0;
+}
+
+#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
+		((a)->lrca == (b)->lrca))
+
+#define get_last_workload(q) \
+	(list_empty(q) ? NULL : container_of(q->prev, \
+	struct intel_vgpu_workload, list))
+/**
+ * intel_vgpu_create_workload - create a vGPU workload
+ * @vgpu: a vGPU
+ * @desc: a guest context descriptor
+ *
+ * This function is called when creating a vGPU workload.
+ *
+ * Returns:
+ * struct intel_vgpu_workload * on success, negative error code in
+ * pointer if failed.
+ *
+ */
+struct intel_vgpu_workload *
+intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
+			   struct execlist_ctx_descriptor_format *desc)
+{
+	struct intel_vgpu_submission *s = &vgpu->submission;
+	struct list_head *q = workload_q_head(vgpu, ring_id);
+	struct intel_vgpu_workload *last_workload = get_last_workload(q);
+	struct intel_vgpu_workload *workload = NULL;
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	u64 ring_context_gpa;
+	u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
+	int ret;
+
+	ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+			(u32)((desc->lrca + 1) << I915_GTT_PAGE_SHIFT));
+	if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
+		gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
+		return ERR_PTR(-EINVAL);
+	}
+
+	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+			RING_CTX_OFF(ring_header.val), &head, 4);
+
+	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+			RING_CTX_OFF(ring_tail.val), &tail, 4);
+
+	head &= RB_HEAD_OFF_MASK;
+	tail &= RB_TAIL_OFF_MASK;
+
+	if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
+		gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
+		gvt_dbg_el("ctx head %x real head %lx\n", head,
+				last_workload->rb_tail);
+		/*
+		 * cannot use guest context head pointer here,
+		 * as it might not be updated at this time
+		 */
+		head = last_workload->rb_tail;
+	}
+
+	gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
+
+	/* record some ring buffer register values for scan and shadow */
+	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+			RING_CTX_OFF(rb_start.val), &start, 4);
+	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+			RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
+	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+			RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
+
+	workload = alloc_workload(vgpu);
+	if (IS_ERR(workload))
+		return workload;
+
+	workload->ring_id = ring_id;
+	workload->ctx_desc = *desc;
+	workload->ring_context_gpa = ring_context_gpa;
+	workload->rb_head = head;
+	workload->rb_tail = tail;
+	workload->rb_start = start;
+	workload->rb_ctl = ctl;
+
+	if (ring_id == RCS) {
+		intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+			RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
+		intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+			RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
+
+		workload->wa_ctx.indirect_ctx.guest_gma =
+			indirect_ctx & INDIRECT_CTX_ADDR_MASK;
+		workload->wa_ctx.indirect_ctx.size =
+			(indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
+			CACHELINE_BYTES;
+		workload->wa_ctx.per_ctx.guest_gma =
+			per_ctx & PER_CTX_ADDR_MASK;
+		workload->wa_ctx.per_ctx.valid = per_ctx & 1;
+	}
+
+	gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
+			workload, ring_id, head, tail, start, ctl);
+
+	ret = prepare_mm(workload);
+	if (ret) {
+		kmem_cache_free(s->workloads, workload);
+		return ERR_PTR(ret);
+	}
+
+	/* Only scan and shadow the first workload in the queue
+	 * as there is only one pre-allocated buf-obj for shadow.
+	 */
+	if (list_empty(workload_q_head(vgpu, ring_id))) {
+		intel_runtime_pm_get(dev_priv);
+		mutex_lock(&dev_priv->drm.struct_mutex);
+		ret = intel_gvt_scan_and_shadow_workload(workload);
+		mutex_unlock(&dev_priv->drm.struct_mutex);
+		intel_runtime_pm_put(dev_priv);
+	}
+
+	if (ret && (vgpu_is_vm_unhealthy(ret))) {
+		enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
+		intel_vgpu_destroy_workload(workload);
+		return ERR_PTR(ret);
+	}
+
+	return workload;
 }

+ 22 - 7
drivers/gpu/drm/i915/gvt/scheduler.h

@@ -112,17 +112,18 @@ struct intel_vgpu_workload {
 	struct intel_shadow_wa_ctx wa_ctx;
 };
 
-/* Intel shadow batch buffer is a i915 gem object */
-struct intel_shadow_bb_entry {
+struct intel_vgpu_shadow_bb {
 	struct list_head list;
 	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
 	void *va;
-	unsigned long len;
 	u32 *bb_start_cmd_va;
+	unsigned int clflush;
+	bool accessing;
 };
 
 #define workload_q_head(vgpu, ring_id) \
-	(&(vgpu->workload_q_head[ring_id]))
+	(&(vgpu->submission.workload_q_head[ring_id]))
 
 #define queue_workload(workload) do { \
 	list_add_tail(&workload->list, \
@@ -137,9 +138,23 @@ void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt);
 
 void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu);
 
-int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
+int intel_vgpu_setup_submission(struct intel_vgpu *vgpu);
 
-void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
+void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
+				 unsigned long engine_mask);
+
+void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
+
+int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
+				     unsigned int interface);
+
+extern const struct intel_vgpu_submission_ops
+intel_vgpu_execlist_submission_ops;
+
+struct intel_vgpu_workload *
+intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
+			   struct execlist_ctx_descriptor_format *desc);
+
+void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
 
-void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
 #endif

+ 18 - 16
drivers/gpu/drm/i915/gvt/vgpu.c

@@ -43,7 +43,10 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
 	vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0;
 	vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0;
 	vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
+
 	vgpu_vreg(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
+	vgpu_vreg(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
+
 	vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
 		vgpu_aperture_gmadr_base(vgpu);
 	vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
@@ -226,7 +229,7 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
 
 	vgpu->active = false;
 
-	if (atomic_read(&vgpu->running_workload_num)) {
+	if (atomic_read(&vgpu->submission.running_workload_num)) {
 		mutex_unlock(&gvt->lock);
 		intel_gvt_wait_vgpu_idle(vgpu);
 		mutex_lock(&gvt->lock);
@@ -252,10 +255,10 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
 
 	WARN(vgpu->active, "vGPU is still active!\n");
 
+	intel_gvt_debugfs_remove_vgpu(vgpu);
 	idr_remove(&gvt->vgpu_idr, vgpu->id);
 	intel_vgpu_clean_sched_policy(vgpu);
-	intel_vgpu_clean_gvt_context(vgpu);
-	intel_vgpu_clean_execlist(vgpu);
+	intel_vgpu_clean_submission(vgpu);
 	intel_vgpu_clean_display(vgpu);
 	intel_vgpu_clean_opregion(vgpu);
 	intel_vgpu_clean_gtt(vgpu);
@@ -293,7 +296,7 @@ struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
 	vgpu->gvt = gvt;
 
 	for (i = 0; i < I915_NUM_ENGINES; i++)
-		INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
+		INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);
 
 	ret = intel_vgpu_init_sched_policy(vgpu);
 	if (ret)
@@ -346,7 +349,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
 	vgpu->handle = param->handle;
 	vgpu->gvt = gvt;
 	vgpu->sched_ctl.weight = param->weight;
-	bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
 
 	intel_vgpu_init_cfg_space(vgpu, param->primary);
 
@@ -372,26 +374,26 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
 	if (ret)
 		goto out_clean_gtt;
 
-	ret = intel_vgpu_init_execlist(vgpu);
+	ret = intel_vgpu_setup_submission(vgpu);
 	if (ret)
 		goto out_clean_display;
 
-	ret = intel_vgpu_init_gvt_context(vgpu);
+	ret = intel_vgpu_init_sched_policy(vgpu);
 	if (ret)
-		goto out_clean_execlist;
+		goto out_clean_submission;
 
-	ret = intel_vgpu_init_sched_policy(vgpu);
+	ret = intel_gvt_debugfs_add_vgpu(vgpu);
 	if (ret)
-		goto out_clean_shadow_ctx;
+		goto out_clean_sched_policy;
 
 	mutex_unlock(&gvt->lock);
 
 	return vgpu;
 
-out_clean_shadow_ctx:
-	intel_vgpu_clean_gvt_context(vgpu);
-out_clean_execlist:
-	intel_vgpu_clean_execlist(vgpu);
+out_clean_sched_policy:
+	intel_vgpu_clean_sched_policy(vgpu);
+out_clean_submission:
+	intel_vgpu_clean_submission(vgpu);
 out_clean_display:
 	intel_vgpu_clean_display(vgpu);
 out_clean_gtt:
@@ -500,10 +502,10 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
 		mutex_lock(&gvt->lock);
 	}
 
-	intel_vgpu_reset_execlist(vgpu, resetting_eng);
-
+	intel_vgpu_reset_submission(vgpu, resetting_eng);
 	/* full GPU reset or device model level reset */
 	if (engine_mask == ALL_ENGINES || dmlr) {
+		intel_vgpu_select_submission_ops(vgpu, 0);
 
 		/*fence will not be reset during virtual reset */
 		if (dmlr) {