Browse Source

Merge tag 'gvt-next-2018-03-08' of https://github.com/intel/gvt-linux into drm-intel-next-queued

gvt-next-2018-03-08

- big refactor for shadow ppgtt (Changbin)
- KBL context save/restore via LRI cmd (Weinan)
- misc smatch fixes (Zhenyu)
- Properly unmap dma for guest page (Changbin)
- other misc fixes (Xiong, etc.)

Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180308023152.oi4ialn5uxetbruf@zhen-hp.sh.intel.com
Joonas Lahtinen 7 years ago
parent
commit
ed5c94a85b

+ 1 - 1
drivers/gpu/drm/i915/gvt/Makefile

@@ -3,7 +3,7 @@ GVT_DIR := gvt
 GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
 	interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
 	execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \
-	fb_decoder.o dmabuf.o
+	fb_decoder.o dmabuf.o page_track.o
 
 ccflags-y				+= -I$(src) -I$(src)/$(GVT_DIR)
 i915-y					+= $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))

+ 1 - 1
drivers/gpu/drm/i915/gvt/dmabuf.c

@@ -459,7 +459,7 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
 
 	obj = vgpu_create_gem(dev, dmabuf_obj->info);
 	if (obj == NULL) {
-		gvt_vgpu_err("create gvt gem obj failed:%d\n", vgpu->id);
+		gvt_vgpu_err("create gvt gem obj failed\n");
 		ret = -ENOMEM;
 		goto out;
 	}

+ 658 - 799
drivers/gpu/drm/i915/gvt/gtt.c

@@ -38,6 +38,12 @@
 #include "i915_pvinfo.h"
 #include "trace.h"
 
+#if defined(VERBOSE_DEBUG)
+#define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args)
+#else
+#define gvt_vdbg_mm(fmt, args...)
+#endif
+
 static bool enable_out_of_sync = false;
 static int preallocated_oos_pages = 8192;
 
@@ -264,7 +270,7 @@ static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
 	return readq(addr);
 }
 
-static void gtt_invalidate(struct drm_i915_private *dev_priv)
+static void ggtt_invalidate(struct drm_i915_private *dev_priv)
 {
 	mmio_hw_access_pre(dev_priv);
 	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
@@ -331,20 +337,20 @@ static inline int gtt_set_entry64(void *pt,
 
 #define GTT_HAW 46
 
-#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30)
-#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21)
-#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12)
+#define ADDR_1G_MASK	GENMASK_ULL(GTT_HAW - 1, 30)
+#define ADDR_2M_MASK	GENMASK_ULL(GTT_HAW - 1, 21)
+#define ADDR_4K_MASK	GENMASK_ULL(GTT_HAW - 1, 12)
 
 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
 {
 	unsigned long pfn;
 
 	if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
-		pfn = (e->val64 & ADDR_1G_MASK) >> 12;
+		pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT;
 	else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
-		pfn = (e->val64 & ADDR_2M_MASK) >> 12;
+		pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT;
 	else
-		pfn = (e->val64 & ADDR_4K_MASK) >> 12;
+		pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT;
 	return pfn;
 }
 
@@ -352,16 +358,16 @@ static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
 {
 	if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
 		e->val64 &= ~ADDR_1G_MASK;
-		pfn &= (ADDR_1G_MASK >> 12);
+		pfn &= (ADDR_1G_MASK >> PAGE_SHIFT);
 	} else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
 		e->val64 &= ~ADDR_2M_MASK;
-		pfn &= (ADDR_2M_MASK >> 12);
+		pfn &= (ADDR_2M_MASK >> PAGE_SHIFT);
 	} else {
 		e->val64 &= ~ADDR_4K_MASK;
-		pfn &= (ADDR_4K_MASK >> 12);
+		pfn &= (ADDR_4K_MASK >> PAGE_SHIFT);
 	}
 
-	e->val64 |= (pfn << 12);
+	e->val64 |= (pfn << PAGE_SHIFT);
 }
 
 static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
@@ -371,7 +377,7 @@ static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
 		return false;
 
 	e->type = get_entry_type(e->type);
-	if (!(e->val64 & BIT(7)))
+	if (!(e->val64 & _PAGE_PSE))
 		return false;
 
 	e->type = get_pse_type(e->type);
@@ -389,17 +395,17 @@ static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
 			|| e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
 		return (e->val64 != 0);
 	else
-		return (e->val64 & BIT(0));
+		return (e->val64 & _PAGE_PRESENT);
 }
 
 static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
 {
-	e->val64 &= ~BIT(0);
+	e->val64 &= ~_PAGE_PRESENT;
 }
 
 static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
 {
-	e->val64 |= BIT(0);
+	e->val64 |= _PAGE_PRESENT;
 }
 
 /*
@@ -447,58 +453,91 @@ static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
 	.gma_to_pml4_index = gen8_gma_to_pml4_index,
 };
 
-static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
-		struct intel_gvt_gtt_entry *m)
+/*
+ * MM helpers.
+ */
+static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
+		struct intel_gvt_gtt_entry *entry, unsigned long index,
+		bool guest)
 {
-	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
-	unsigned long gfn, mfn;
+	struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
 
-	*m = *p;
+	GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT);
 
-	if (!ops->test_present(p))
-		return 0;
+	entry->type = mm->ppgtt_mm.root_entry_type;
+	pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps :
+			   mm->ppgtt_mm.shadow_pdps,
+			   entry, index, false, 0, mm->vgpu);
 
-	gfn = ops->get_pfn(p);
+	pte_ops->test_pse(entry);
+}
 
-	mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
-	if (mfn == INTEL_GVT_INVALID_ADDR) {
-		gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn);
-		return -ENXIO;
-	}
+static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
+		struct intel_gvt_gtt_entry *entry, unsigned long index)
+{
+	_ppgtt_get_root_entry(mm, entry, index, true);
+}
 
-	ops->set_pfn(m, mfn);
-	return 0;
+static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm,
+		struct intel_gvt_gtt_entry *entry, unsigned long index)
+{
+	_ppgtt_get_root_entry(mm, entry, index, false);
 }
 
-/*
- * MM helpers.
- */
-int intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
-		void *page_table, struct intel_gvt_gtt_entry *e,
-		unsigned long index)
+static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm,
+		struct intel_gvt_gtt_entry *entry, unsigned long index,
+		bool guest)
 {
-	struct intel_gvt *gvt = mm->vgpu->gvt;
-	struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
-	int ret;
+	struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
 
-	e->type = mm->page_table_entry_type;
+	pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps :
+			   mm->ppgtt_mm.shadow_pdps,
+			   entry, index, false, 0, mm->vgpu);
+}
 
-	ret = ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
-	if (ret)
-		return ret;
+static inline void ppgtt_set_guest_root_entry(struct intel_vgpu_mm *mm,
+		struct intel_gvt_gtt_entry *entry, unsigned long index)
+{
+	_ppgtt_set_root_entry(mm, entry, index, true);
+}
 
-	ops->test_pse(e);
-	return 0;
+static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm,
+		struct intel_gvt_gtt_entry *entry, unsigned long index)
+{
+	_ppgtt_set_root_entry(mm, entry, index, false);
 }
 
-int intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
-		void *page_table, struct intel_gvt_gtt_entry *e,
-		unsigned long index)
+static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm,
+		struct intel_gvt_gtt_entry *entry, unsigned long index)
 {
-	struct intel_gvt *gvt = mm->vgpu->gvt;
-	struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+	struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
+
+	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
+
+	entry->type = GTT_TYPE_GGTT_PTE;
+	pte_ops->get_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
+			   false, 0, mm->vgpu);
+}
+
+static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
+		struct intel_gvt_gtt_entry *entry, unsigned long index)
+{
+	struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
+
+	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
+
+	pte_ops->set_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
+			   false, 0, mm->vgpu);
+}
+
+static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
+		struct intel_gvt_gtt_entry *entry, unsigned long index)
+{
+	struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
+
+	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
 
-	return ops->set_entry(page_table, e, index, false, 0, mm->vgpu);
+	pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
 }
 
 /*
@@ -520,12 +559,15 @@ static inline int ppgtt_spt_get_entry(
 		return -EINVAL;
 
 	ret = ops->get_entry(page_table, e, index, guest,
-			spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT,
+			spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
 			spt->vgpu);
 	if (ret)
 		return ret;
 
 	ops->test_pse(e);
+
+	gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
+		    type, e->type, index, e->val64);
 	return 0;
 }
 
@@ -541,18 +583,21 @@ static inline int ppgtt_spt_set_entry(
 	if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
 		return -EINVAL;
 
+	gvt_vdbg_mm("set ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
+		    type, e->type, index, e->val64);
+
 	return ops->set_entry(page_table, e, index, guest,
-			spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT,
+			spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
 			spt->vgpu);
 }
 
 #define ppgtt_get_guest_entry(spt, e, index) \
 	ppgtt_spt_get_entry(spt, NULL, \
-		spt->guest_page_type, e, index, true)
+		spt->guest_page.type, e, index, true)
 
 #define ppgtt_set_guest_entry(spt, e, index) \
 	ppgtt_spt_set_entry(spt, NULL, \
-		spt->guest_page_type, e, index, true)
+		spt->guest_page.type, e, index, true)
 
 #define ppgtt_get_shadow_entry(spt, e, index) \
 	ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
@@ -562,159 +607,6 @@ static inline int ppgtt_spt_set_entry(
 	ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
 		spt->shadow_page.type, e, index, false)
 
-/**
- * intel_vgpu_init_page_track - init a page track data structure
- * @vgpu: a vGPU
- * @t: a page track data structure
- * @gfn: guest memory page frame number
- * @handler: the function will be called when target guest memory page has
- * been modified.
- *
- * This function is called when a user wants to prepare a page track data
- * structure to track a guest memory page.
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-int intel_vgpu_init_page_track(struct intel_vgpu *vgpu,
-		struct intel_vgpu_page_track *t,
-		unsigned long gfn,
-		int (*handler)(void *, u64, void *, int),
-		void *data)
-{
-	INIT_HLIST_NODE(&t->node);
-
-	t->tracked = false;
-	t->gfn = gfn;
-	t->handler = handler;
-	t->data = data;
-
-	hash_add(vgpu->gtt.tracked_guest_page_hash_table, &t->node, t->gfn);
-	return 0;
-}
-
-/**
- * intel_vgpu_clean_page_track - release a page track data structure
- * @vgpu: a vGPU
- * @t: a page track data structure
- *
- * This function is called before a user frees a page track data structure.
- */
-void intel_vgpu_clean_page_track(struct intel_vgpu *vgpu,
-		struct intel_vgpu_page_track *t)
-{
-	if (!hlist_unhashed(&t->node))
-		hash_del(&t->node);
-
-	if (t->tracked)
-		intel_gvt_hypervisor_disable_page_track(vgpu, t);
-}
-
-/**
- * intel_vgpu_find_tracked_page - find a tracked guest page
- * @vgpu: a vGPU
- * @gfn: guest memory page frame number
- *
- * This function is called when the emulation layer wants to figure out if a
- * trapped GFN is a tracked guest page.
- *
- * Returns:
- * Pointer to page track data structure, NULL if not found.
- */
-struct intel_vgpu_page_track *intel_vgpu_find_tracked_page(
-		struct intel_vgpu *vgpu, unsigned long gfn)
-{
-	struct intel_vgpu_page_track *t;
-
-	hash_for_each_possible(vgpu->gtt.tracked_guest_page_hash_table,
-			t, node, gfn) {
-		if (t->gfn == gfn)
-			return t;
-	}
-	return NULL;
-}
-
-static int init_guest_page(struct intel_vgpu *vgpu,
-		struct intel_vgpu_guest_page *p,
-		unsigned long gfn,
-		int (*handler)(void *, u64, void *, int),
-		void *data)
-{
-	p->oos_page = NULL;
-	p->write_cnt = 0;
-
-	return intel_vgpu_init_page_track(vgpu, &p->track, gfn, handler, data);
-}
-
-static int detach_oos_page(struct intel_vgpu *vgpu,
-		struct intel_vgpu_oos_page *oos_page);
-
-static void clean_guest_page(struct intel_vgpu *vgpu,
-		struct intel_vgpu_guest_page *p)
-{
-	if (p->oos_page)
-		detach_oos_page(vgpu, p->oos_page);
-
-	intel_vgpu_clean_page_track(vgpu, &p->track);
-}
-
-static inline int init_shadow_page(struct intel_vgpu *vgpu,
-		struct intel_vgpu_shadow_page *p, int type, bool hash)
-{
-	struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
-	dma_addr_t daddr;
-
-	daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
-	if (dma_mapping_error(kdev, daddr)) {
-		gvt_vgpu_err("fail to map dma addr\n");
-		return -EINVAL;
-	}
-
-	p->vaddr = page_address(p->page);
-	p->type = type;
-
-	INIT_HLIST_NODE(&p->node);
-
-	p->mfn = daddr >> I915_GTT_PAGE_SHIFT;
-	if (hash)
-		hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
-	return 0;
-}
-
-static inline void clean_shadow_page(struct intel_vgpu *vgpu,
-		struct intel_vgpu_shadow_page *p)
-{
-	struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
-
-	dma_unmap_page(kdev, p->mfn << I915_GTT_PAGE_SHIFT, 4096,
-			PCI_DMA_BIDIRECTIONAL);
-
-	if (!hlist_unhashed(&p->node))
-		hash_del(&p->node);
-}
-
-static inline struct intel_vgpu_shadow_page *find_shadow_page(
-		struct intel_vgpu *vgpu, unsigned long mfn)
-{
-	struct intel_vgpu_shadow_page *p;
-
-	hash_for_each_possible(vgpu->gtt.shadow_page_hash_table,
-		p, node, mfn) {
-		if (p->mfn == mfn)
-			return p;
-	}
-	return NULL;
-}
-
-#define page_track_to_guest_page(ptr) \
-	container_of(ptr, struct intel_vgpu_guest_page, track)
-
-#define guest_page_to_ppgtt_spt(ptr) \
-	container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page)
-
-#define shadow_page_to_ppgtt_spt(ptr) \
-	container_of(ptr, struct intel_vgpu_ppgtt_spt, shadow_page)
-
 static void *alloc_spt(gfp_t gfp_mask)
 {
 	struct intel_vgpu_ppgtt_spt *spt;
@@ -737,63 +629,96 @@ static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
 	kfree(spt);
 }
 
-static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+static int detach_oos_page(struct intel_vgpu *vgpu,
+		struct intel_vgpu_oos_page *oos_page);
+
+static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
 {
-	trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type);
+	struct device *kdev = &spt->vgpu->gvt->dev_priv->drm.pdev->dev;
 
-	clean_shadow_page(spt->vgpu, &spt->shadow_page);
-	clean_guest_page(spt->vgpu, &spt->guest_page);
-	list_del_init(&spt->post_shadow_list);
+	trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
+
+	dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096,
+		       PCI_DMA_BIDIRECTIONAL);
 
+	radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
+
+	if (spt->guest_page.oos_page)
+		detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
+
+	intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
+
+	list_del_init(&spt->post_shadow_list);
 	free_spt(spt);
 }
 
-static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu)
+static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
 {
-	struct hlist_node *n;
-	struct intel_vgpu_shadow_page *sp;
-	int i;
+	struct intel_vgpu_ppgtt_spt *spt;
+	struct radix_tree_iter iter;
+	void **slot;
 
-	hash_for_each_safe(vgpu->gtt.shadow_page_hash_table, i, n, sp, node)
-		ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp));
+	radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
+		spt = radix_tree_deref_slot(slot);
+		ppgtt_free_spt(spt);
+	}
 }
 
 static int ppgtt_handle_guest_write_page_table_bytes(
-		struct intel_vgpu_guest_page *gpt,
+		struct intel_vgpu_ppgtt_spt *spt,
 		u64 pa, void *p_data, int bytes);
 
-static int ppgtt_write_protection_handler(void *data, u64 pa,
-		void *p_data, int bytes)
+static int ppgtt_write_protection_handler(
+		struct intel_vgpu_page_track *page_track,
+		u64 gpa, void *data, int bytes)
 {
-	struct intel_vgpu_page_track *t = data;
-	struct intel_vgpu_guest_page *p = page_track_to_guest_page(t);
+	struct intel_vgpu_ppgtt_spt *spt = page_track->priv_data;
+
 	int ret;
 
 	if (bytes != 4 && bytes != 8)
 		return -EINVAL;
 
-	if (!t->tracked)
-		return -EINVAL;
-
-	ret = ppgtt_handle_guest_write_page_table_bytes(p,
-		pa, p_data, bytes);
+	ret = ppgtt_handle_guest_write_page_table_bytes(spt, gpa, data, bytes);
 	if (ret)
 		return ret;
 	return ret;
 }
 
-static int reclaim_one_mm(struct intel_gvt *gvt);
+/* Find a spt by guest gfn. */
+static struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_gfn(
+		struct intel_vgpu *vgpu, unsigned long gfn)
+{
+	struct intel_vgpu_page_track *track;
+
+	track = intel_vgpu_find_page_track(vgpu, gfn);
+	if (track && track->handler == ppgtt_write_protection_handler)
+		return track->priv_data;
 
-static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page(
+	return NULL;
+}
+
+/* Find the spt by shadow page mfn. */
+static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn(
+		struct intel_vgpu *vgpu, unsigned long mfn)
+{
+	return radix_tree_lookup(&vgpu->gtt.spt_tree, mfn);
+}
+
+static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
+
+static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
 		struct intel_vgpu *vgpu, int type, unsigned long gfn)
 {
+	struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
 	struct intel_vgpu_ppgtt_spt *spt = NULL;
+	dma_addr_t daddr;
 	int ret;
 
 retry:
 	spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
 	if (!spt) {
-		if (reclaim_one_mm(vgpu->gvt))
+		if (reclaim_one_ppgtt_mm(vgpu->gvt))
 			goto retry;
 
 		gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
@@ -801,44 +726,48 @@ retry:
 	}
 
 	spt->vgpu = vgpu;
-	spt->guest_page_type = type;
 	atomic_set(&spt->refcount, 1);
 	INIT_LIST_HEAD(&spt->post_shadow_list);
 
 	/*
-	 * TODO: guest page type may be different with shadow page type,
-	 *	 when we support PSE page in future.
+	 * Init shadow_page.
 	 */
-	ret = init_shadow_page(vgpu, &spt->shadow_page, type, true);
-	if (ret) {
-		gvt_vgpu_err("fail to initialize shadow page for spt\n");
-		goto err;
+	spt->shadow_page.type = type;
+	daddr = dma_map_page(kdev, spt->shadow_page.page,
+			     0, 4096, PCI_DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(kdev, daddr)) {
+		gvt_vgpu_err("fail to map dma addr\n");
+		ret = -EINVAL;
+		goto err_free_spt;
 	}
+	spt->shadow_page.vaddr = page_address(spt->shadow_page.page);
+	spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT;
 
-	ret = init_guest_page(vgpu, &spt->guest_page,
-			gfn, ppgtt_write_protection_handler, NULL);
-	if (ret) {
-		gvt_vgpu_err("fail to initialize guest page for spt\n");
-		goto err;
-	}
+	/*
+	 * Init guest_page.
+	 */
+	spt->guest_page.type = type;
+	spt->guest_page.gfn = gfn;
 
-	trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
-	return spt;
-err:
-	ppgtt_free_shadow_page(spt);
-	return ERR_PTR(ret);
-}
+	ret = intel_vgpu_register_page_track(vgpu, spt->guest_page.gfn,
+					ppgtt_write_protection_handler, spt);
+	if (ret)
+		goto err_unmap_dma;
 
-static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
-		struct intel_vgpu *vgpu, unsigned long mfn)
-{
-	struct intel_vgpu_shadow_page *p = find_shadow_page(vgpu, mfn);
+	ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
+	if (ret)
+		goto err_unreg_page_track;
 
-	if (p)
-		return shadow_page_to_ppgtt_spt(p);
+	trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
+	return spt;
 
-	gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn);
-	return NULL;
+err_unreg_page_track:
+	intel_vgpu_unregister_page_track(vgpu, spt->guest_page.gfn);
+err_unmap_dma:
+	dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+err_free_spt:
+	free_spt(spt);
+	return ERR_PTR(ret);
 }
 
 #define pt_entry_size_shift(spt) \
@@ -857,7 +786,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
 		if (!ppgtt_get_shadow_entry(spt, e, i) && \
 		    spt->vgpu->gvt->gtt.pte_ops->test_present(e))
 
-static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+static void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
 {
 	int v = atomic_read(&spt->refcount);
 
@@ -866,17 +795,16 @@ static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
 	atomic_inc(&spt->refcount);
 }
 
-static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
+static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
 
-static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
+static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
 		struct intel_gvt_gtt_entry *e)
 {
 	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
 	struct intel_vgpu_ppgtt_spt *s;
 	intel_gvt_gtt_type_t cur_pt_type;
 
-	if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e->type))))
-		return -EINVAL;
+	GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type)));
 
 	if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
 		&& e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
@@ -885,16 +813,33 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
 			vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
 			return 0;
 	}
-	s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
+	s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
 	if (!s) {
 		gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
 				ops->get_pfn(e));
 		return -ENXIO;
 	}
-	return ppgtt_invalidate_shadow_page(s);
+	return ppgtt_invalidate_spt(s);
 }
 
-static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
+		struct intel_gvt_gtt_entry *entry)
+{
+	struct intel_vgpu *vgpu = spt->vgpu;
+	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+	unsigned long pfn;
+	int type;
+
+	pfn = ops->get_pfn(entry);
+	type = spt->shadow_page.type;
+
+	if (pfn == vgpu->gtt.scratch_pt[type].page_mfn)
+		return;
+
+	intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
+}
+
+static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
 {
 	struct intel_vgpu *vgpu = spt->vgpu;
 	struct intel_gvt_gtt_entry e;
@@ -903,30 +848,40 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
 	int v = atomic_read(&spt->refcount);
 
 	trace_spt_change(spt->vgpu->id, "die", spt,
-			spt->guest_page.track.gfn, spt->shadow_page.type);
+			spt->guest_page.gfn, spt->shadow_page.type);
 
 	trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
 
 	if (atomic_dec_return(&spt->refcount) > 0)
 		return 0;
 
-	if (gtt_type_is_pte_pt(spt->shadow_page.type))
-		goto release;
-
 	for_each_present_shadow_entry(spt, &e, index) {
-		if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
-			gvt_vgpu_err("GVT doesn't support pse bit for now\n");
-			return -EINVAL;
+		switch (e.type) {
+		case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
+			gvt_vdbg_mm("invalidate 4K entry\n");
+			ppgtt_invalidate_pte(spt, &e);
+			break;
+		case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
+		case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
+			WARN(1, "GVT doesn't support 2M/1GB page\n");
+			continue;
+		case GTT_TYPE_PPGTT_PML4_ENTRY:
+		case GTT_TYPE_PPGTT_PDP_ENTRY:
+		case GTT_TYPE_PPGTT_PDE_ENTRY:
+			gvt_vdbg_mm("invalidate PMUL4/PDP/PDE entry\n");
+			ret = ppgtt_invalidate_spt_by_shadow_entry(
+					spt->vgpu, &e);
+			if (ret)
+				goto fail;
+			break;
+		default:
+			GEM_BUG_ON(1);
 		}
-		ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
-				spt->vgpu, &e);
-		if (ret)
-			goto fail;
 	}
-release:
+
 	trace_spt_change(spt->vgpu->id, "release", spt,
-			spt->guest_page.track.gfn, spt->shadow_page.type);
-	ppgtt_free_shadow_page(spt);
+			 spt->guest_page.gfn, spt->shadow_page.type);
+	ppgtt_free_spt(spt);
 	return 0;
 fail:
 	gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
@@ -934,52 +889,44 @@ fail:
 	return ret;
 }
 
-static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
+static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt);
 
-static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
+static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
 		struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
 {
 	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
-	struct intel_vgpu_ppgtt_spt *s = NULL;
-	struct intel_vgpu_guest_page *g;
-	struct intel_vgpu_page_track *t;
+	struct intel_vgpu_ppgtt_spt *spt = NULL;
 	int ret;
 
-	if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(we->type)))) {
-		ret = -EINVAL;
-		goto fail;
-	}
+	GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type)));
 
-	t = intel_vgpu_find_tracked_page(vgpu, ops->get_pfn(we));
-	if (t) {
-		g = page_track_to_guest_page(t);
-		s = guest_page_to_ppgtt_spt(g);
-		ppgtt_get_shadow_page(s);
-	} else {
+	spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we));
+	if (spt)
+		ppgtt_get_spt(spt);
+	else {
 		int type = get_next_pt_type(we->type);
 
-		s = ppgtt_alloc_shadow_page(vgpu, type, ops->get_pfn(we));
-		if (IS_ERR(s)) {
-			ret = PTR_ERR(s);
+		spt = ppgtt_alloc_spt(vgpu, type, ops->get_pfn(we));
+		if (IS_ERR(spt)) {
+			ret = PTR_ERR(spt);
 			goto fail;
 		}
 
-		ret = intel_gvt_hypervisor_enable_page_track(vgpu,
-				&s->guest_page.track);
+		ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
 		if (ret)
 			goto fail;
 
-		ret = ppgtt_populate_shadow_page(s);
+		ret = ppgtt_populate_spt(spt);
 		if (ret)
 			goto fail;
 
-		trace_spt_change(vgpu->id, "new", s, s->guest_page.track.gfn,
-			s->shadow_page.type);
+		trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
+				 spt->shadow_page.type);
 	}
-	return s;
+	return spt;
 fail:
 	gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
-			s, we->val64, we->type);
+		     spt, we->val64, we->type);
 	return ERR_PTR(ret);
 }
 
@@ -994,7 +941,44 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
 	ops->set_pfn(se, s->shadow_page.mfn);
 }
 
-static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
+	struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
+	struct intel_gvt_gtt_entry *ge)
+{
+	struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
+	struct intel_gvt_gtt_entry se = *ge;
+	unsigned long gfn;
+	dma_addr_t dma_addr;
+	int ret;
+
+	if (!pte_ops->test_present(ge))
+		return 0;
+
+	gfn = pte_ops->get_pfn(ge);
+
+	switch (ge->type) {
+	case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
+		gvt_vdbg_mm("shadow 4K gtt entry\n");
+		break;
+	case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
+	case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
+		gvt_vgpu_err("GVT doesn't support 2M/1GB entry\n");
+		return -EINVAL;
+	default:
+		GEM_BUG_ON(1);
+	};
+
+	/* direct shadow */
+	ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, &dma_addr);
+	if (ret)
+		return -ENXIO;
+
+	pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT);
+	ppgtt_set_shadow_entry(spt, &se, index);
+	return 0;
+}
+
+static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
 {
 	struct intel_vgpu *vgpu = spt->vgpu;
 	struct intel_gvt *gvt = vgpu->gvt;
@@ -1005,34 +989,30 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
 	int ret;
 
 	trace_spt_change(spt->vgpu->id, "born", spt,
-			spt->guest_page.track.gfn, spt->shadow_page.type);
+			 spt->guest_page.gfn, spt->shadow_page.type);
 
-	if (gtt_type_is_pte_pt(spt->shadow_page.type)) {
-		for_each_present_guest_entry(spt, &ge, i) {
+	for_each_present_guest_entry(spt, &ge, i) {
+		if (gtt_type_is_pt(get_next_pt_type(ge.type))) {
+			s = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
+			if (IS_ERR(s)) {
+				ret = PTR_ERR(s);
+				goto fail;
+			}
+			ppgtt_get_shadow_entry(spt, &se, i);
+			ppgtt_generate_shadow_entry(&se, s, &ge);
+			ppgtt_set_shadow_entry(spt, &se, i);
+		} else {
 			gfn = ops->get_pfn(&ge);
-			if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn) ||
-				gtt_entry_p2m(vgpu, &ge, &se))
+			if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
 				ops->set_pfn(&se, gvt->gtt.scratch_mfn);
-			ppgtt_set_shadow_entry(spt, &se, i);
-		}
-		return 0;
-	}
-
-	for_each_present_guest_entry(spt, &ge, i) {
-		if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
-			gvt_vgpu_err("GVT doesn't support pse bit now\n");
-			ret = -EINVAL;
-			goto fail;
-		}
+				ppgtt_set_shadow_entry(spt, &se, i);
+				continue;
+			}
 
-		s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
-		if (IS_ERR(s)) {
-			ret = PTR_ERR(s);
-			goto fail;
+			ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge);
+			if (ret)
+				goto fail;
 		}
-		ppgtt_get_shadow_entry(spt, &se, i);
-		ppgtt_generate_shadow_entry(&se, s, &ge);
-		ppgtt_set_shadow_entry(spt, &se, i);
 	}
 	return 0;
 fail:
@@ -1041,36 +1021,40 @@ fail:
 	return ret;
 }
 
-static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
+static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt,
 		struct intel_gvt_gtt_entry *se, unsigned long index)
 {
-	struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
-	struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
 	struct intel_vgpu *vgpu = spt->vgpu;
 	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
 	int ret;
 
-	trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, se->val64,
-			 index);
+	trace_spt_guest_change(spt->vgpu->id, "remove", spt,
+			       spt->shadow_page.type, se->val64, index);
+
+	gvt_vdbg_mm("destroy old shadow entry, type %d, index %lu, value %llx\n",
+		    se->type, index, se->val64);
 
 	if (!ops->test_present(se))
 		return 0;
 
-	if (ops->get_pfn(se) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
+	if (ops->get_pfn(se) ==
+	    vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn)
 		return 0;
 
 	if (gtt_type_is_pt(get_next_pt_type(se->type))) {
 		struct intel_vgpu_ppgtt_spt *s =
-			ppgtt_find_shadow_page(vgpu, ops->get_pfn(se));
+			intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(se));
 		if (!s) {
 			gvt_vgpu_err("fail to find guest page\n");
 			ret = -ENXIO;
 			goto fail;
 		}
-		ret = ppgtt_invalidate_shadow_page(s);
+		ret = ppgtt_invalidate_spt(s);
 		if (ret)
 			goto fail;
-	}
+	} else
+		ppgtt_invalidate_pte(spt, se);
+
 	return 0;
 fail:
 	gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
@@ -1078,21 +1062,22 @@ fail:
 	return ret;
 }
 
-static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
+static int ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt *spt,
 		struct intel_gvt_gtt_entry *we, unsigned long index)
 {
-	struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
-	struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
 	struct intel_vgpu *vgpu = spt->vgpu;
 	struct intel_gvt_gtt_entry m;
 	struct intel_vgpu_ppgtt_spt *s;
 	int ret;
 
-	trace_gpt_change(spt->vgpu->id, "add", spt, sp->type,
-		we->val64, index);
+	trace_spt_guest_change(spt->vgpu->id, "add", spt, spt->shadow_page.type,
+			       we->val64, index);
+
+	gvt_vdbg_mm("add shadow entry: type %d, index %lu, value %llx\n",
+		    we->type, index, we->val64);
 
 	if (gtt_type_is_pt(get_next_pt_type(we->type))) {
-		s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, we);
+		s = ppgtt_populate_spt_by_guest_entry(vgpu, we);
 		if (IS_ERR(s)) {
 			ret = PTR_ERR(s);
 			goto fail;
@@ -1101,10 +1086,9 @@ static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
 		ppgtt_generate_shadow_entry(&m, s, we);
 		ppgtt_set_shadow_entry(spt, &m, index);
 	} else {
-		ret = gtt_entry_p2m(vgpu, we, &m);
+		ret = ppgtt_populate_shadow_entry(vgpu, spt, index, we);
 		if (ret)
 			goto fail;
-		ppgtt_set_shadow_entry(spt, &m, index);
 	}
 	return 0;
 fail:
@@ -1119,41 +1103,39 @@ static int sync_oos_page(struct intel_vgpu *vgpu,
 	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
 	struct intel_gvt *gvt = vgpu->gvt;
 	struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
-	struct intel_vgpu_ppgtt_spt *spt =
-		guest_page_to_ppgtt_spt(oos_page->guest_page);
-	struct intel_gvt_gtt_entry old, new, m;
+	struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
+	struct intel_gvt_gtt_entry old, new;
 	int index;
 	int ret;
 
 	trace_oos_change(vgpu->id, "sync", oos_page->id,
-			oos_page->guest_page, spt->guest_page_type);
+			 spt, spt->guest_page.type);
 
-	old.type = new.type = get_entry_type(spt->guest_page_type);
+	old.type = new.type = get_entry_type(spt->guest_page.type);
 	old.val64 = new.val64 = 0;
 
 	for (index = 0; index < (I915_GTT_PAGE_SIZE >>
 				info->gtt_entry_size_shift); index++) {
 		ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
 		ops->get_entry(NULL, &new, index, true,
-			oos_page->guest_page->track.gfn << PAGE_SHIFT, vgpu);
+			       spt->guest_page.gfn << PAGE_SHIFT, vgpu);
 
 		if (old.val64 == new.val64
 			&& !test_and_clear_bit(index, spt->post_shadow_bitmap))
 			continue;
 
 		trace_oos_sync(vgpu->id, oos_page->id,
-				oos_page->guest_page, spt->guest_page_type,
+				spt, spt->guest_page.type,
 				new.val64, index);
 
-		ret = gtt_entry_p2m(vgpu, &new, &m);
+		ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new);
 		if (ret)
 			return ret;
 
 		ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
-		ppgtt_set_shadow_entry(spt, &m, index);
 	}
 
-	oos_page->guest_page->write_cnt = 0;
+	spt->guest_page.write_cnt = 0;
 	list_del_init(&spt->post_shadow_list);
 	return 0;
 }
@@ -1162,15 +1144,14 @@ static int detach_oos_page(struct intel_vgpu *vgpu,
 		struct intel_vgpu_oos_page *oos_page)
 {
 	struct intel_gvt *gvt = vgpu->gvt;
-	struct intel_vgpu_ppgtt_spt *spt =
-		guest_page_to_ppgtt_spt(oos_page->guest_page);
+	struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
 
 	trace_oos_change(vgpu->id, "detach", oos_page->id,
-			oos_page->guest_page, spt->guest_page_type);
+			 spt, spt->guest_page.type);
 
-	oos_page->guest_page->write_cnt = 0;
-	oos_page->guest_page->oos_page = NULL;
-	oos_page->guest_page = NULL;
+	spt->guest_page.write_cnt = 0;
+	spt->guest_page.oos_page = NULL;
+	oos_page->spt = NULL;
 
 	list_del_init(&oos_page->vm_list);
 	list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
@@ -1178,51 +1159,49 @@ static int detach_oos_page(struct intel_vgpu *vgpu,
 	return 0;
 }
 
-static int attach_oos_page(struct intel_vgpu *vgpu,
-		struct intel_vgpu_oos_page *oos_page,
-		struct intel_vgpu_guest_page *gpt)
+static int attach_oos_page(struct intel_vgpu_oos_page *oos_page,
+		struct intel_vgpu_ppgtt_spt *spt)
 {
-	struct intel_gvt *gvt = vgpu->gvt;
+	struct intel_gvt *gvt = spt->vgpu->gvt;
 	int ret;
 
-	ret = intel_gvt_hypervisor_read_gpa(vgpu,
-			gpt->track.gfn << I915_GTT_PAGE_SHIFT,
+	ret = intel_gvt_hypervisor_read_gpa(spt->vgpu,
+			spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
 			oos_page->mem, I915_GTT_PAGE_SIZE);
 	if (ret)
 		return ret;
 
-	oos_page->guest_page = gpt;
-	gpt->oos_page = oos_page;
+	oos_page->spt = spt;
+	spt->guest_page.oos_page = oos_page;
 
 	list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
 
-	trace_oos_change(vgpu->id, "attach", gpt->oos_page->id,
-			gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
+	trace_oos_change(spt->vgpu->id, "attach", oos_page->id,
+			 spt, spt->guest_page.type);
 	return 0;
 }
 
-static int ppgtt_set_guest_page_sync(struct intel_vgpu *vgpu,
-		struct intel_vgpu_guest_page *gpt)
+static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt *spt)
 {
+	struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
 	int ret;
 
-	ret = intel_gvt_hypervisor_enable_page_track(vgpu, &gpt->track);
+	ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn);
 	if (ret)
 		return ret;
 
-	trace_oos_change(vgpu->id, "set page sync", gpt->oos_page->id,
-			gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
+	trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id,
+			 spt, spt->guest_page.type);
 
-	list_del_init(&gpt->oos_page->vm_list);
-	return sync_oos_page(vgpu, gpt->oos_page);
+	list_del_init(&oos_page->vm_list);
+	return sync_oos_page(spt->vgpu, oos_page);
 }
 
-static int ppgtt_allocate_oos_page(struct intel_vgpu *vgpu,
-		struct intel_vgpu_guest_page *gpt)
+static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt *spt)
 {
-	struct intel_gvt *gvt = vgpu->gvt;
+	struct intel_gvt *gvt = spt->vgpu->gvt;
 	struct intel_gvt_gtt *gtt = &gvt->gtt;
-	struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
+	struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
 	int ret;
 
 	WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
@@ -1230,31 +1209,30 @@ static int ppgtt_allocate_oos_page(struct intel_vgpu *vgpu,
 	if (list_empty(&gtt->oos_page_free_list_head)) {
 		oos_page = container_of(gtt->oos_page_use_list_head.next,
 			struct intel_vgpu_oos_page, list);
-		ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
+		ret = ppgtt_set_guest_page_sync(oos_page->spt);
 		if (ret)
 			return ret;
-		ret = detach_oos_page(vgpu, oos_page);
+		ret = detach_oos_page(spt->vgpu, oos_page);
 		if (ret)
 			return ret;
 	} else
 		oos_page = container_of(gtt->oos_page_free_list_head.next,
 			struct intel_vgpu_oos_page, list);
-	return attach_oos_page(vgpu, oos_page, gpt);
+	return attach_oos_page(oos_page, spt);
 }
 
-static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu,
-		struct intel_vgpu_guest_page *gpt)
+static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt)
 {
-	struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
+	struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
 
 	if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
 		return -EINVAL;
 
-	trace_oos_change(vgpu->id, "set page out of sync", gpt->oos_page->id,
-			gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
+	trace_oos_change(spt->vgpu->id, "set page out of sync", oos_page->id,
+			 spt, spt->guest_page.type);
 
-	list_add_tail(&oos_page->vm_list, &vgpu->gtt.oos_page_list_head);
-	return intel_gvt_hypervisor_disable_page_track(vgpu, &gpt->track);
+	list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head);
+	return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn);
 }
 
 /**
@@ -1279,7 +1257,7 @@ int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
 	list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
 		oos_page = container_of(pos,
 				struct intel_vgpu_oos_page, vm_list);
-		ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
+		ret = ppgtt_set_guest_page_sync(oos_page->spt);
 		if (ret)
 			return ret;
 	}
@@ -1290,17 +1268,15 @@ int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
  * The heart of PPGTT shadow page table.
  */
 static int ppgtt_handle_guest_write_page_table(
-		struct intel_vgpu_guest_page *gpt,
+		struct intel_vgpu_ppgtt_spt *spt,
 		struct intel_gvt_gtt_entry *we, unsigned long index)
 {
-	struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
 	struct intel_vgpu *vgpu = spt->vgpu;
 	int type = spt->shadow_page.type;
 	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
-	struct intel_gvt_gtt_entry se;
-
-	int ret;
+	struct intel_gvt_gtt_entry old_se;
 	int new_present;
+	int ret;
 
 	new_present = ops->test_present(we);
 
@@ -1309,21 +1285,21 @@ static int ppgtt_handle_guest_write_page_table(
 	 * guarantee the ppgtt table is validated during the window between
 	 * adding and removal.
 	 */
-	ppgtt_get_shadow_entry(spt, &se, index);
+	ppgtt_get_shadow_entry(spt, &old_se, index);
 
 	if (new_present) {
-		ret = ppgtt_handle_guest_entry_add(gpt, we, index);
+		ret = ppgtt_handle_guest_entry_add(spt, we, index);
 		if (ret)
 			goto fail;
 	}
 
-	ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
+	ret = ppgtt_handle_guest_entry_removal(spt, &old_se, index);
 	if (ret)
 		goto fail;
 
 	if (!new_present) {
-		ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
-		ppgtt_set_shadow_entry(spt, &se, index);
+		ops->set_pfn(&old_se, vgpu->gtt.scratch_pt[type].page_mfn);
+		ppgtt_set_shadow_entry(spt, &old_se, index);
 	}
 
 	return 0;
@@ -1333,12 +1309,13 @@ fail:
 	return ret;
 }
 
-static inline bool can_do_out_of_sync(struct intel_vgpu_guest_page *gpt)
+
+
+static inline bool can_do_out_of_sync(struct intel_vgpu_ppgtt_spt *spt)
 {
 	return enable_out_of_sync
-		&& gtt_type_is_pte_pt(
-			guest_page_to_ppgtt_spt(gpt)->guest_page_type)
-		&& gpt->write_cnt >= 2;
+		&& gtt_type_is_pte_pt(spt->guest_page.type)
+		&& spt->guest_page.write_cnt >= 2;
 }
 
 static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
@@ -1378,8 +1355,8 @@ int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
 				GTT_ENTRY_NUM_IN_ONE_PAGE) {
 			ppgtt_get_guest_entry(spt, &ge, index);
 
-			ret = ppgtt_handle_guest_write_page_table(
-					&spt->guest_page, &ge, index);
+			ret = ppgtt_handle_guest_write_page_table(spt,
+							&ge, index);
 			if (ret)
 				return ret;
 			clear_bit(index, spt->post_shadow_bitmap);
@@ -1390,10 +1367,9 @@ int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
 }
 
 static int ppgtt_handle_guest_write_page_table_bytes(
-		struct intel_vgpu_guest_page *gpt,
+		struct intel_vgpu_ppgtt_spt *spt,
 		u64 pa, void *p_data, int bytes)
 {
-	struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
 	struct intel_vgpu *vgpu = spt->vgpu;
 	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
 	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
@@ -1408,7 +1384,7 @@ static int ppgtt_handle_guest_write_page_table_bytes(
 	ops->test_pse(&we);
 
 	if (bytes == info->gtt_entry_size) {
-		ret = ppgtt_handle_guest_write_page_table(gpt, &we, index);
+		ret = ppgtt_handle_guest_write_page_table(spt, &we, index);
 		if (ret)
 			return ret;
 	} else {
@@ -1416,7 +1392,7 @@ static int ppgtt_handle_guest_write_page_table_bytes(
 			int type = spt->shadow_page.type;
 
 			ppgtt_get_shadow_entry(spt, &se, index);
-			ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
+			ret = ppgtt_handle_guest_entry_removal(spt, &se, index);
 			if (ret)
 				return ret;
 			ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
@@ -1428,128 +1404,54 @@ static int ppgtt_handle_guest_write_page_table_bytes(
 	if (!enable_out_of_sync)
 		return 0;
 
-	gpt->write_cnt++;
+	spt->guest_page.write_cnt++;
 
-	if (gpt->oos_page)
-		ops->set_entry(gpt->oos_page->mem, &we, index,
+	if (spt->guest_page.oos_page)
+		ops->set_entry(spt->guest_page.oos_page->mem, &we, index,
 				false, 0, vgpu);
 
-	if (can_do_out_of_sync(gpt)) {
-		if (!gpt->oos_page)
-			ppgtt_allocate_oos_page(vgpu, gpt);
+	if (can_do_out_of_sync(spt)) {
+		if (!spt->guest_page.oos_page)
+			ppgtt_allocate_oos_page(spt);
 
-		ret = ppgtt_set_guest_page_oos(vgpu, gpt);
+		ret = ppgtt_set_guest_page_oos(spt);
 		if (ret < 0)
 			return ret;
 	}
 	return 0;
 }
 
-/*
- * mm page table allocation policy for bdw+
- *  - for ggtt, only virtual page table will be allocated.
- *  - for ppgtt, dedicated virtual/shadow page table will be allocated.
- */
-static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
-{
-	struct intel_vgpu *vgpu = mm->vgpu;
-	struct intel_gvt *gvt = vgpu->gvt;
-	const struct intel_gvt_device_info *info = &gvt->device_info;
-	void *mem;
-
-	if (mm->type == INTEL_GVT_MM_PPGTT) {
-		mm->page_table_entry_cnt = 4;
-		mm->page_table_entry_size = mm->page_table_entry_cnt *
-			info->gtt_entry_size;
-		mem = kzalloc(mm->has_shadow_page_table ?
-			mm->page_table_entry_size * 2
-				: mm->page_table_entry_size, GFP_KERNEL);
-		if (!mem)
-			return -ENOMEM;
-		mm->virtual_page_table = mem;
-		if (!mm->has_shadow_page_table)
-			return 0;
-		mm->shadow_page_table = mem + mm->page_table_entry_size;
-	} else if (mm->type == INTEL_GVT_MM_GGTT) {
-		mm->page_table_entry_cnt =
-			(gvt_ggtt_gm_sz(gvt) >> I915_GTT_PAGE_SHIFT);
-		mm->page_table_entry_size = mm->page_table_entry_cnt *
-			info->gtt_entry_size;
-		mem = vzalloc(mm->page_table_entry_size);
-		if (!mem)
-			return -ENOMEM;
-		mm->virtual_page_table = mem;
-	}
-	return 0;
-}
-
-static void gen8_mm_free_page_table(struct intel_vgpu_mm *mm)
-{
-	if (mm->type == INTEL_GVT_MM_PPGTT) {
-		kfree(mm->virtual_page_table);
-	} else if (mm->type == INTEL_GVT_MM_GGTT) {
-		if (mm->virtual_page_table)
-			vfree(mm->virtual_page_table);
-	}
-	mm->virtual_page_table = mm->shadow_page_table = NULL;
-}
-
-static void invalidate_mm(struct intel_vgpu_mm *mm)
+static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm)
 {
 	struct intel_vgpu *vgpu = mm->vgpu;
 	struct intel_gvt *gvt = vgpu->gvt;
 	struct intel_gvt_gtt *gtt = &gvt->gtt;
 	struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
 	struct intel_gvt_gtt_entry se;
-	int i;
+	int index;
 
-	if (WARN_ON(!mm->has_shadow_page_table || !mm->shadowed))
+	if (!mm->ppgtt_mm.shadowed)
 		return;
 
-	for (i = 0; i < mm->page_table_entry_cnt; i++) {
-		ppgtt_get_shadow_root_entry(mm, &se, i);
+	for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) {
+		ppgtt_get_shadow_root_entry(mm, &se, index);
+
 		if (!ops->test_present(&se))
 			continue;
-		ppgtt_invalidate_shadow_page_by_shadow_entry(
-				vgpu, &se);
+
+		ppgtt_invalidate_spt_by_shadow_entry(vgpu, &se);
 		se.val64 = 0;
-		ppgtt_set_shadow_root_entry(mm, &se, i);
+		ppgtt_set_shadow_root_entry(mm, &se, index);
 
-		trace_gpt_change(vgpu->id, "destroy root pointer",
-				NULL, se.type, se.val64, i);
+		trace_spt_guest_change(vgpu->id, "destroy root pointer",
+				       NULL, se.type, se.val64, index);
 	}
-	mm->shadowed = false;
-}
 
-/**
- * intel_vgpu_destroy_mm - destroy a mm object
- * @mm: a kref object
- *
- * This function is used to destroy a mm object for vGPU
- *
- */
-void intel_vgpu_destroy_mm(struct kref *mm_ref)
-{
-	struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
-	struct intel_vgpu *vgpu = mm->vgpu;
-	struct intel_gvt *gvt = vgpu->gvt;
-	struct intel_gvt_gtt *gtt = &gvt->gtt;
-
-	if (!mm->initialized)
-		goto out;
-
-	list_del(&mm->list);
-	list_del(&mm->lru_list);
-
-	if (mm->has_shadow_page_table)
-		invalidate_mm(mm);
-
-	gtt->mm_free_page_table(mm);
-out:
-	kfree(mm);
+	mm->ppgtt_mm.shadowed = false;
 }
 
-static int shadow_mm(struct intel_vgpu_mm *mm)
+
+static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm)
 {
 	struct intel_vgpu *vgpu = mm->vgpu;
 	struct intel_gvt *gvt = vgpu->gvt;
@@ -1557,119 +1459,155 @@ static int shadow_mm(struct intel_vgpu_mm *mm)
 	struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
 	struct intel_vgpu_ppgtt_spt *spt;
 	struct intel_gvt_gtt_entry ge, se;
-	int i;
-	int ret;
+	int index, ret;
 
-	if (WARN_ON(!mm->has_shadow_page_table || mm->shadowed))
+	if (mm->ppgtt_mm.shadowed)
 		return 0;
 
-	mm->shadowed = true;
+	mm->ppgtt_mm.shadowed = true;
+
+	for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) {
+		ppgtt_get_guest_root_entry(mm, &ge, index);
 
-	for (i = 0; i < mm->page_table_entry_cnt; i++) {
-		ppgtt_get_guest_root_entry(mm, &ge, i);
 		if (!ops->test_present(&ge))
 			continue;
 
-		trace_gpt_change(vgpu->id, __func__, NULL,
-				ge.type, ge.val64, i);
+		trace_spt_guest_change(vgpu->id, __func__, NULL,
+				       ge.type, ge.val64, index);
 
-		spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
+		spt = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
 		if (IS_ERR(spt)) {
 			gvt_vgpu_err("fail to populate guest root pointer\n");
 			ret = PTR_ERR(spt);
 			goto fail;
 		}
 		ppgtt_generate_shadow_entry(&se, spt, &ge);
-		ppgtt_set_shadow_root_entry(mm, &se, i);
+		ppgtt_set_shadow_root_entry(mm, &se, index);
 
-		trace_gpt_change(vgpu->id, "populate root pointer",
-				NULL, se.type, se.val64, i);
+		trace_spt_guest_change(vgpu->id, "populate root pointer",
+				       NULL, se.type, se.val64, index);
 	}
+
 	return 0;
 fail:
-	invalidate_mm(mm);
+	invalidate_ppgtt_mm(mm);
 	return ret;
 }
 
+static struct intel_vgpu_mm *vgpu_alloc_mm(struct intel_vgpu *vgpu)
+{
+	struct intel_vgpu_mm *mm;
+
+	mm = kzalloc(sizeof(*mm), GFP_KERNEL);
+	if (!mm)
+		return NULL;
+
+	mm->vgpu = vgpu;
+	kref_init(&mm->ref);
+	atomic_set(&mm->pincount, 0);
+
+	return mm;
+}
+
+static void vgpu_free_mm(struct intel_vgpu_mm *mm)
+{
+	kfree(mm);
+}
+
 /**
- * intel_vgpu_create_mm - create a mm object for a vGPU
+ * intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU
  * @vgpu: a vGPU
- * @mm_type: mm object type, should be PPGTT or GGTT
- * @virtual_page_table: page table root pointers. Could be NULL if user wants
- *	to populate shadow later.
- * @page_table_level: describe the page table level of the mm object
- * @pde_base_index: pde root pointer base in GGTT MMIO.
+ * @root_entry_type: ppgtt root entry type
+ * @pdps: guest pdps.
  *
- * This function is used to create a mm object for a vGPU.
+ * This function is used to create a ppgtt mm object for a vGPU.
  *
  * Returns:
  * Zero on success, negative error code in pointer if failed.
  */
-struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
-		int mm_type, void *virtual_page_table, int page_table_level,
-		u32 pde_base_index)
+struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
+		intel_gvt_gtt_type_t root_entry_type, u64 pdps[])
 {
 	struct intel_gvt *gvt = vgpu->gvt;
-	struct intel_gvt_gtt *gtt = &gvt->gtt;
 	struct intel_vgpu_mm *mm;
 	int ret;
 
-	mm = kzalloc(sizeof(*mm), GFP_KERNEL);
-	if (!mm) {
-		ret = -ENOMEM;
-		goto fail;
-	}
+	mm = vgpu_alloc_mm(vgpu);
+	if (!mm)
+		return ERR_PTR(-ENOMEM);
 
-	mm->type = mm_type;
+	mm->type = INTEL_GVT_MM_PPGTT;
 
-	if (page_table_level == 1)
-		mm->page_table_entry_type = GTT_TYPE_GGTT_PTE;
-	else if (page_table_level == 3)
-		mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
-	else if (page_table_level == 4)
-		mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
-	else {
-		WARN_ON(1);
-		ret = -EINVAL;
-		goto fail;
-	}
+	GEM_BUG_ON(root_entry_type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY &&
+		   root_entry_type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY);
+	mm->ppgtt_mm.root_entry_type = root_entry_type;
 
-	mm->page_table_level = page_table_level;
-	mm->pde_base_index = pde_base_index;
+	INIT_LIST_HEAD(&mm->ppgtt_mm.list);
+	INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list);
 
-	mm->vgpu = vgpu;
-	mm->has_shadow_page_table = !!(mm_type == INTEL_GVT_MM_PPGTT);
-
-	kref_init(&mm->ref);
-	atomic_set(&mm->pincount, 0);
-	INIT_LIST_HEAD(&mm->list);
-	INIT_LIST_HEAD(&mm->lru_list);
-	list_add_tail(&mm->list, &vgpu->gtt.mm_list_head);
+	if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
+		mm->ppgtt_mm.guest_pdps[0] = pdps[0];
+	else
+		memcpy(mm->ppgtt_mm.guest_pdps, pdps,
+		       sizeof(mm->ppgtt_mm.guest_pdps));
 
-	ret = gtt->mm_alloc_page_table(mm);
+	ret = shadow_ppgtt_mm(mm);
 	if (ret) {
-		gvt_vgpu_err("fail to allocate page table for mm\n");
-		goto fail;
+		gvt_vgpu_err("failed to shadow ppgtt mm\n");
+		vgpu_free_mm(mm);
+		return ERR_PTR(ret);
 	}
 
-	mm->initialized = true;
+	list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
+	list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
+	return mm;
+}
 
-	if (virtual_page_table)
-		memcpy(mm->virtual_page_table, virtual_page_table,
-				mm->page_table_entry_size);
+static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
+{
+	struct intel_vgpu_mm *mm;
+	unsigned long nr_entries;
 
-	if (mm->has_shadow_page_table) {
-		ret = shadow_mm(mm);
-		if (ret)
-			goto fail;
-		list_add_tail(&mm->lru_list, &gvt->gtt.mm_lru_list_head);
+	mm = vgpu_alloc_mm(vgpu);
+	if (!mm)
+		return ERR_PTR(-ENOMEM);
+
+	mm->type = INTEL_GVT_MM_GGTT;
+
+	nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT;
+	mm->ggtt_mm.virtual_ggtt = vzalloc(nr_entries *
+					vgpu->gvt->device_info.gtt_entry_size);
+	if (!mm->ggtt_mm.virtual_ggtt) {
+		vgpu_free_mm(mm);
+		return ERR_PTR(-ENOMEM);
 	}
+
 	return mm;
-fail:
-	gvt_vgpu_err("fail to create mm\n");
-	if (mm)
-		intel_gvt_mm_unreference(mm);
-	return ERR_PTR(ret);
+}
+
+/**
+ * _intel_vgpu_mm_release - destroy a mm object
+ * @mm_ref: a kref object
+ *
+ * This function is used to destroy a mm object for vGPU
+ *
+ */
+void _intel_vgpu_mm_release(struct kref *mm_ref)
+{
+	struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
+
+	if (GEM_WARN_ON(atomic_read(&mm->pincount)))
+		gvt_err("vgpu mm pin count bug detected\n");
+
+	if (mm->type == INTEL_GVT_MM_PPGTT) {
+		list_del(&mm->ppgtt_mm.list);
+		list_del(&mm->ppgtt_mm.lru_list);
+		invalidate_ppgtt_mm(mm);
+	} else {
+		vfree(mm->ggtt_mm.virtual_ggtt);
+	}
+
+	vgpu_free_mm(mm);
 }
 
 /**
@@ -1680,9 +1618,6 @@ fail:
  */
 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
 {
-	if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
-		return;
-
 	atomic_dec(&mm->pincount);
 }
 
@@ -1701,36 +1636,34 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
 {
 	int ret;
 
-	if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
-		return 0;
+	atomic_inc(&mm->pincount);
 
-	if (!mm->shadowed) {
-		ret = shadow_mm(mm);
+	if (mm->type == INTEL_GVT_MM_PPGTT) {
+		ret = shadow_ppgtt_mm(mm);
 		if (ret)
 			return ret;
+
+		list_move_tail(&mm->ppgtt_mm.lru_list,
+			       &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
+
 	}
 
-	atomic_inc(&mm->pincount);
-	list_del_init(&mm->lru_list);
-	list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head);
 	return 0;
 }
 
-static int reclaim_one_mm(struct intel_gvt *gvt)
+static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
 {
 	struct intel_vgpu_mm *mm;
 	struct list_head *pos, *n;
 
-	list_for_each_safe(pos, n, &gvt->gtt.mm_lru_list_head) {
-		mm = container_of(pos, struct intel_vgpu_mm, lru_list);
+	list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
+		mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
 
-		if (mm->type != INTEL_GVT_MM_PPGTT)
-			continue;
 		if (atomic_read(&mm->pincount))
 			continue;
 
-		list_del_init(&mm->lru_list);
-		invalidate_mm(mm);
+		list_del_init(&mm->ppgtt_mm.lru_list);
+		invalidate_ppgtt_mm(mm);
 		return 1;
 	}
 	return 0;
@@ -1746,10 +1679,7 @@ static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
 	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
 	struct intel_vgpu_ppgtt_spt *s;
 
-	if (WARN_ON(!mm->has_shadow_page_table))
-		return -EINVAL;
-
-	s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
+	s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
 	if (!s)
 		return -ENXIO;
 
@@ -1780,85 +1710,72 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
 	unsigned long gpa = INTEL_GVT_INVALID_ADDR;
 	unsigned long gma_index[4];
 	struct intel_gvt_gtt_entry e;
-	int i, index;
+	int i, levels = 0;
 	int ret;
 
-	if (mm->type != INTEL_GVT_MM_GGTT && mm->type != INTEL_GVT_MM_PPGTT)
-		return INTEL_GVT_INVALID_ADDR;
+	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT &&
+		   mm->type != INTEL_GVT_MM_PPGTT);
 
 	if (mm->type == INTEL_GVT_MM_GGTT) {
 		if (!vgpu_gmadr_is_valid(vgpu, gma))
 			goto err;
 
-		ret = ggtt_get_guest_entry(mm, &e,
-				gma_ops->gma_to_ggtt_pte_index(gma));
-		if (ret)
-			goto err;
+		ggtt_get_guest_entry(mm, &e,
+			gma_ops->gma_to_ggtt_pte_index(gma));
+
 		gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
 			+ (gma & ~I915_GTT_PAGE_MASK);
 
 		trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
-		return gpa;
-	}
-
-	switch (mm->page_table_level) {
-	case 4:
-		ret = ppgtt_get_shadow_root_entry(mm, &e, 0);
-		if (ret)
-			goto err;
-		gma_index[0] = gma_ops->gma_to_pml4_index(gma);
-		gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
-		gma_index[2] = gma_ops->gma_to_pde_index(gma);
-		gma_index[3] = gma_ops->gma_to_pte_index(gma);
-		index = 4;
-		break;
-	case 3:
-		ret = ppgtt_get_shadow_root_entry(mm, &e,
-				gma_ops->gma_to_l3_pdp_index(gma));
-		if (ret)
-			goto err;
-		gma_index[0] = gma_ops->gma_to_pde_index(gma);
-		gma_index[1] = gma_ops->gma_to_pte_index(gma);
-		index = 2;
-		break;
-	case 2:
-		ret = ppgtt_get_shadow_root_entry(mm, &e,
-				gma_ops->gma_to_pde_index(gma));
-		if (ret)
-			goto err;
-		gma_index[0] = gma_ops->gma_to_pte_index(gma);
-		index = 1;
-		break;
-	default:
-		WARN_ON(1);
-		goto err;
-	}
+	} else {
+		switch (mm->ppgtt_mm.root_entry_type) {
+		case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
+			ppgtt_get_shadow_root_entry(mm, &e, 0);
+
+			gma_index[0] = gma_ops->gma_to_pml4_index(gma);
+			gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
+			gma_index[2] = gma_ops->gma_to_pde_index(gma);
+			gma_index[3] = gma_ops->gma_to_pte_index(gma);
+			levels = 4;
+			break;
+		case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
+			ppgtt_get_shadow_root_entry(mm, &e,
+					gma_ops->gma_to_l3_pdp_index(gma));
+
+			gma_index[0] = gma_ops->gma_to_pde_index(gma);
+			gma_index[1] = gma_ops->gma_to_pte_index(gma);
+			levels = 2;
+			break;
+		default:
+			GEM_BUG_ON(1);
+		}
 
-	/* walk into the shadow page table and get gpa from guest entry */
-	for (i = 0; i < index; i++) {
-		ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
-			(i == index - 1));
-		if (ret)
-			goto err;
+		/* walk the shadow page table and get gpa from guest entry */
+		for (i = 0; i < levels; i++) {
+			ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
+				(i == levels - 1));
+			if (ret)
+				goto err;
 
-		if (!pte_ops->test_present(&e)) {
-			gvt_dbg_core("GMA 0x%lx is not present\n", gma);
-			goto err;
+			if (!pte_ops->test_present(&e)) {
+				gvt_dbg_core("GMA 0x%lx is not present\n", gma);
+				goto err;
+			}
 		}
-	}
 
-	gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
-		+ (gma & ~I915_GTT_PAGE_MASK);
+		gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) +
+					(gma & ~I915_GTT_PAGE_MASK);
+		trace_gma_translate(vgpu->id, "ppgtt", 0,
+				    mm->ppgtt_mm.root_entry_type, gma, gpa);
+	}
 
-	trace_gma_translate(vgpu->id, "ppgtt", 0,
-			mm->page_table_level, gma, gpa);
 	return gpa;
 err:
 	gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
 	return INTEL_GVT_INVALID_ADDR;
 }
 
-static int emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
+static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
 	unsigned int off, void *p_data, unsigned int bytes)
 {
 	struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
@@ -1887,7 +1804,7 @@ static int emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
  * Returns:
  * Zero on success, error code if failed.
  */
-int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
+int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
 	void *p_data, unsigned int bytes)
 {
 	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
@@ -1897,11 +1814,11 @@ int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
 		return -EINVAL;
 
 	off -= info->gtt_start_offset;
-	ret = emulate_gtt_mmio_read(vgpu, off, p_data, bytes);
+	ret = emulate_ggtt_mmio_read(vgpu, off, p_data, bytes);
 	return ret;
 }
 
-static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
+static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
 	void *p_data, unsigned int bytes)
 {
 	struct intel_gvt *gvt = vgpu->gvt;
@@ -1911,6 +1828,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
 	unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
 	unsigned long gma, gfn;
 	struct intel_gvt_gtt_entry e, m;
+	dma_addr_t dma_addr;
 	int ret;
 
 	if (bytes != 4 && bytes != 8)
@@ -1926,6 +1844,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
 
 	memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
 			bytes);
+	m = e;
 
 	if (ops->test_present(&e)) {
 		gfn = ops->get_pfn(&e);
@@ -1938,29 +1857,29 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
 			goto out;
 		}
 
-		ret = gtt_entry_p2m(vgpu, &e, &m);
+		ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn,
+							      &dma_addr);
 		if (ret) {
-			gvt_vgpu_err("fail to translate guest gtt entry\n");
+			gvt_vgpu_err("fail to populate guest ggtt entry\n");
 			/* guest driver may read/write the entry when partial
 			 * update the entry in this situation p2m will fail
 			 * settting the shadow entry to point to a scratch page
 			 */
 			ops->set_pfn(&m, gvt->gtt.scratch_mfn);
-		}
-	} else {
-		m = e;
+		} else
+			ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
+	} else
 		ops->set_pfn(&m, gvt->gtt.scratch_mfn);
-	}
 
 out:
-	ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
-	gtt_invalidate(gvt->dev_priv);
+	ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
+	ggtt_invalidate(gvt->dev_priv);
 	ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
 	return 0;
 }
 
 /*
- * intel_vgpu_emulate_gtt_mmio_write - emulate GTT MMIO register write
+ * intel_vgpu_emulate_ggtt_mmio_write - emulate GTT MMIO register write
  * @vgpu: a vGPU
  * @off: register offset
  * @p_data: data from guest write
@@ -1971,8 +1890,8 @@ out:
  * Returns:
  * Zero on success, error code if failed.
  */
-int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
-	void *p_data, unsigned int bytes)
+int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
+		unsigned int off, void *p_data, unsigned int bytes)
 {
 	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
 	int ret;
@@ -1981,43 +1900,10 @@ int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
 		return -EINVAL;
 
 	off -= info->gtt_start_offset;
-	ret = emulate_gtt_mmio_write(vgpu, off, p_data, bytes);
+	ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes);
 	return ret;
 }
 
-int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa,
-				     void *p_data, unsigned int bytes)
-{
-	struct intel_gvt *gvt = vgpu->gvt;
-	int ret = 0;
-
-	if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
-		struct intel_vgpu_page_track *t;
-
-		mutex_lock(&gvt->lock);
-
-		t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
-		if (t) {
-			if (unlikely(vgpu->failsafe)) {
-				/* remove write protection to prevent furture traps */
-				intel_vgpu_clean_page_track(vgpu, t);
-			} else {
-				ret = t->handler(t, pa, p_data, bytes);
-				if (ret) {
-					gvt_err("guest page write error %d, "
-						"gfn 0x%lx, pa 0x%llx, "
-						"var 0x%x, len %d\n",
-						ret, t->gfn, pa,
-						*(u32 *)p_data, bytes);
-				}
-			}
-		}
-		mutex_unlock(&gvt->lock);
-	}
-	return ret;
-}
-
-
 static int alloc_scratch_pages(struct intel_vgpu *vgpu,
 		intel_gvt_gtt_type_t type)
 {
@@ -2131,45 +2017,49 @@ err:
 int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
 {
 	struct intel_vgpu_gtt *gtt = &vgpu->gtt;
-	struct intel_vgpu_mm *ggtt_mm;
 
-	hash_init(gtt->tracked_guest_page_hash_table);
-	hash_init(gtt->shadow_page_hash_table);
+	INIT_RADIX_TREE(&gtt->spt_tree, GFP_KERNEL);
 
-	INIT_LIST_HEAD(&gtt->mm_list_head);
+	INIT_LIST_HEAD(&gtt->ppgtt_mm_list_head);
 	INIT_LIST_HEAD(&gtt->oos_page_list_head);
 	INIT_LIST_HEAD(&gtt->post_shadow_list_head);
 
-	intel_vgpu_reset_ggtt(vgpu);
-
-	ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
-			NULL, 1, 0);
-	if (IS_ERR(ggtt_mm)) {
+	gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu);
+	if (IS_ERR(gtt->ggtt_mm)) {
 		gvt_vgpu_err("fail to create mm for ggtt.\n");
-		return PTR_ERR(ggtt_mm);
+		return PTR_ERR(gtt->ggtt_mm);
 	}
 
-	gtt->ggtt_mm = ggtt_mm;
+	intel_vgpu_reset_ggtt(vgpu);
 
 	return create_scratch_page_tree(vgpu);
 }
 
-static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type)
+static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
 {
 	struct list_head *pos, *n;
 	struct intel_vgpu_mm *mm;
 
-	list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
-		mm = container_of(pos, struct intel_vgpu_mm, list);
-		if (mm->type == type) {
-			vgpu->gvt->gtt.mm_free_page_table(mm);
-			list_del(&mm->list);
-			list_del(&mm->lru_list);
-			kfree(mm);
-		}
+	list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
+		mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
+		intel_vgpu_destroy_mm(mm);
+	}
+
+	if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head)))
+		gvt_err("vgpu ppgtt mm is not fully destoried\n");
+
+	if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) {
+		gvt_err("Why we still has spt not freed?\n");
+		ppgtt_free_all_spt(vgpu);
 	}
 }
 
+static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
+{
+	intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
+	vgpu->gtt.ggtt_mm = NULL;
+}
+
 /**
  * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
  * @vgpu: a vGPU
@@ -2182,11 +2072,9 @@ static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type)
  */
 void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
 {
-	ppgtt_free_all_shadow_page(vgpu);
+	intel_vgpu_destroy_all_ppgtt_mm(vgpu);
+	intel_vgpu_destroy_ggtt_mm(vgpu);
 	release_scratch_page_tree(vgpu);
-
-	intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
-	intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_GGTT);
 }
 
 static void clean_spt_oos(struct intel_gvt *gvt)
@@ -2248,99 +2136,78 @@ fail:
  * pointer to mm object on success, NULL if failed.
  */
 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
-		int page_table_level, void *root_entry)
+		u64 pdps[])
 {
-	struct list_head *pos;
 	struct intel_vgpu_mm *mm;
-	u64 *src, *dst;
-
-	list_for_each(pos, &vgpu->gtt.mm_list_head) {
-		mm = container_of(pos, struct intel_vgpu_mm, list);
-		if (mm->type != INTEL_GVT_MM_PPGTT)
-			continue;
-
-		if (mm->page_table_level != page_table_level)
-			continue;
+	struct list_head *pos;
 
-		src = root_entry;
-		dst = mm->virtual_page_table;
+	list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) {
+		mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
 
-		if (page_table_level == 3) {
-			if (src[0] == dst[0]
-					&& src[1] == dst[1]
-					&& src[2] == dst[2]
-					&& src[3] == dst[3])
+		switch (mm->ppgtt_mm.root_entry_type) {
+		case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
+			if (pdps[0] == mm->ppgtt_mm.guest_pdps[0])
 				return mm;
-		} else {
-			if (src[0] == dst[0])
+			break;
+		case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
+			if (!memcmp(pdps, mm->ppgtt_mm.guest_pdps,
+				    sizeof(mm->ppgtt_mm.guest_pdps)))
 				return mm;
+			break;
+		default:
+			GEM_BUG_ON(1);
 		}
 	}
 	return NULL;
 }
 
 /**
- * intel_vgpu_g2v_create_ppgtt_mm - create a PPGTT mm object from
- * g2v notification
+ * intel_vgpu_get_ppgtt_mm - get or create a PPGTT mm object.
  * @vgpu: a vGPU
- * @page_table_level: PPGTT page table level
+ * @root_entry_type: ppgtt root entry type
+ * @pdps: guest pdps
  *
- * This function is used to create a PPGTT mm object from a guest to GVT-g
- * notification.
+ * This function is used to find or create a PPGTT mm object from a guest.
  *
  * Returns:
  * Zero on success, negative error code if failed.
  */
-int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
-		int page_table_level)
+struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
+		intel_gvt_gtt_type_t root_entry_type, u64 pdps[])
 {
-	u64 *pdp = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
 	struct intel_vgpu_mm *mm;
 
-	if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
-		return -EINVAL;
-
-	mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
+	mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
 	if (mm) {
-		intel_gvt_mm_reference(mm);
+		intel_vgpu_mm_get(mm);
 	} else {
-		mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
-				pdp, page_table_level, 0);
-		if (IS_ERR(mm)) {
+		mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps);
+		if (IS_ERR(mm))
 			gvt_vgpu_err("fail to create mm\n");
-			return PTR_ERR(mm);
-		}
 	}
-	return 0;
+	return mm;
 }
 
 /**
- * intel_vgpu_g2v_destroy_ppgtt_mm - destroy a PPGTT mm object from
- * g2v notification
+ * intel_vgpu_put_ppgtt_mm - find and put a PPGTT mm object.
  * @vgpu: a vGPU
- * @page_table_level: PPGTT page table level
+ * @pdps: guest pdps
  *
- * This function is used to create a PPGTT mm object from a guest to GVT-g
- * notification.
+ * This function is used to find a PPGTT mm object from a guest and destroy it.
  *
  * Returns:
  * Zero on success, negative error code if failed.
  */
-int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
-		int page_table_level)
+int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[])
 {
-	u64 *pdp = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
 	struct intel_vgpu_mm *mm;
 
-	if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
-		return -EINVAL;
-
-	mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
+	mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
 	if (!mm) {
 		gvt_vgpu_err("fail to find ppgtt instance.\n");
 		return -EINVAL;
 	}
-	intel_gvt_mm_unreference(mm);
+	intel_vgpu_mm_put(mm);
 	return 0;
 }
 
@@ -2367,8 +2234,6 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
 		|| IS_KABYLAKE(gvt->dev_priv)) {
 		gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
 		gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
-		gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table;
-		gvt->gtt.mm_free_page_table = gen8_mm_free_page_table;
 	} else {
 		return -ENODEV;
 	}
@@ -2399,7 +2264,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
 			return ret;
 		}
 	}
-	INIT_LIST_HEAD(&gvt->gtt.mm_lru_list_head);
+	INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
 	return 0;
 }
 
@@ -2437,28 +2302,25 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
 {
 	struct intel_gvt *gvt = vgpu->gvt;
 	struct drm_i915_private *dev_priv = gvt->dev_priv;
-	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+	struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
+	struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
 	u32 index;
-	u32 offset;
 	u32 num_entries;
-	struct intel_gvt_gtt_entry e;
 
-	memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
-	e.type = GTT_TYPE_GGTT_PTE;
-	ops->set_pfn(&e, gvt->gtt.scratch_mfn);
-	e.val64 |= _PAGE_PRESENT;
+	pte_ops->set_pfn(&entry, gvt->gtt.scratch_mfn);
+	pte_ops->set_present(&entry);
 
 	index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
 	num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
-	for (offset = 0; offset < num_entries; offset++)
-		ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
+	while (num_entries--)
+		ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
 
 	index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
 	num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
-	for (offset = 0; offset < num_entries; offset++)
-		ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
+	while (num_entries--)
+		ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
 
-	gtt_invalidate(dev_priv);
+	ggtt_invalidate(dev_priv);
 }
 
 /**
@@ -2471,13 +2333,10 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
  */
 void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
 {
-	ppgtt_free_all_shadow_page(vgpu);
-
 	/* Shadow pages are only created when there is no page
 	 * table tracking data, so remove page tracking data after
 	 * removing the shadow pages.
 	 */
-	intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
-
+	intel_vgpu_destroy_all_ppgtt_mm(vgpu);
 	intel_vgpu_reset_ggtt(vgpu);
 }

+ 69 - 120
drivers/gpu/drm/i915/gvt/gtt.h

@@ -39,7 +39,6 @@
 
 struct intel_vgpu_mm;
 
-#define INTEL_GVT_GTT_HASH_BITS 8
 #define INTEL_GVT_INVALID_ADDR (~0UL)
 
 struct intel_gvt_gtt_entry {
@@ -84,17 +83,12 @@ struct intel_gvt_gtt {
 	void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
 	struct list_head oos_page_use_list_head;
 	struct list_head oos_page_free_list_head;
-	struct list_head mm_lru_list_head;
+	struct list_head ppgtt_mm_lru_list_head;
 
 	struct page *scratch_page;
 	unsigned long scratch_mfn;
 };
 
-enum {
-	INTEL_GVT_MM_GGTT = 0,
-	INTEL_GVT_MM_PPGTT,
-};
-
 typedef enum {
 	GTT_TYPE_INVALID = -1,
 
@@ -125,66 +119,60 @@ typedef enum {
 	GTT_TYPE_MAX,
 } intel_gvt_gtt_type_t;
 
-struct intel_vgpu_mm {
-	int type;
-	bool initialized;
-	bool shadowed;
+enum intel_gvt_mm_type {
+	INTEL_GVT_MM_GGTT,
+	INTEL_GVT_MM_PPGTT,
+};
 
-	int page_table_entry_type;
-	u32 page_table_entry_size;
-	u32 page_table_entry_cnt;
-	void *virtual_page_table;
-	void *shadow_page_table;
+#define GVT_RING_CTX_NR_PDPS	GEN8_3LVL_PDPES
 
-	int page_table_level;
-	bool has_shadow_page_table;
-	u32 pde_base_index;
+struct intel_vgpu_mm {
+	enum intel_gvt_mm_type type;
+	struct intel_vgpu *vgpu;
 
-	struct list_head list;
 	struct kref ref;
 	atomic_t pincount;
-	struct list_head lru_list;
-	struct intel_vgpu *vgpu;
-};
-
-extern int intel_vgpu_mm_get_entry(
-		struct intel_vgpu_mm *mm,
-		void *page_table, struct intel_gvt_gtt_entry *e,
-		unsigned long index);
 
-extern int intel_vgpu_mm_set_entry(
-		struct intel_vgpu_mm *mm,
-		void *page_table, struct intel_gvt_gtt_entry *e,
-		unsigned long index);
-
-#define ggtt_get_guest_entry(mm, e, index) \
-	intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
-
-#define ggtt_set_guest_entry(mm, e, index) \
-	intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index)
-
-#define ggtt_get_shadow_entry(mm, e, index) \
-	intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
-
-#define ggtt_set_shadow_entry(mm, e, index) \
-	intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index)
+	union {
+		struct {
+			intel_gvt_gtt_type_t root_entry_type;
+			/*
+			 * The 4 PDPs in ring context. For 48bit addressing,
+			 * only PDP0 is valid and point to PML4. For 32it
+			 * addressing, all 4 are used as true PDPs.
+			 */
+			u64 guest_pdps[GVT_RING_CTX_NR_PDPS];
+			u64 shadow_pdps[GVT_RING_CTX_NR_PDPS];
+			bool shadowed;
+
+			struct list_head list;
+			struct list_head lru_list;
+		} ppgtt_mm;
+		struct {
+			void *virtual_ggtt;
+		} ggtt_mm;
+	};
+};
 
-#define ppgtt_get_guest_root_entry(mm, e, index) \
-	intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
+struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
+		intel_gvt_gtt_type_t root_entry_type, u64 pdps[]);
 
-#define ppgtt_set_guest_root_entry(mm, e, index) \
-	intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index)
+static inline void intel_vgpu_mm_get(struct intel_vgpu_mm *mm)
+{
+	kref_get(&mm->ref);
+}
 
-#define ppgtt_get_shadow_root_entry(mm, e, index) \
-	intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
+void _intel_vgpu_mm_release(struct kref *mm_ref);
 
-#define ppgtt_set_shadow_root_entry(mm, e, index) \
-	intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index)
+static inline void intel_vgpu_mm_put(struct intel_vgpu_mm *mm)
+{
+	kref_put(&mm->ref, _intel_vgpu_mm_release);
+}
 
-extern struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
-		int mm_type, void *virtual_page_table, int page_table_level,
-		u32 pde_base_index);
-extern void intel_vgpu_destroy_mm(struct kref *mm_ref);
+static inline void intel_vgpu_destroy_mm(struct intel_vgpu_mm *mm)
+{
+	intel_vgpu_mm_put(mm);
+}
 
 struct intel_vgpu_guest_page;
 
@@ -196,10 +184,8 @@ struct intel_vgpu_scratch_pt {
 struct intel_vgpu_gtt {
 	struct intel_vgpu_mm *ggtt_mm;
 	unsigned long active_ppgtt_mm_bitmap;
-	struct list_head mm_list_head;
-	DECLARE_HASHTABLE(shadow_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
-	DECLARE_HASHTABLE(tracked_guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
-	atomic_t n_tracked_guest_page;
+	struct list_head ppgtt_mm_list_head;
+	struct radix_tree_root spt_tree;
 	struct list_head oos_page_list_head;
 	struct list_head post_shadow_list_head;
 	struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX];
@@ -216,32 +202,8 @@ extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
 extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
 		int page_table_level, void *root_entry);
 
-struct intel_vgpu_oos_page;
-
-struct intel_vgpu_shadow_page {
-	void *vaddr;
-	struct page *page;
-	int type;
-	struct hlist_node node;
-	unsigned long mfn;
-};
-
-struct intel_vgpu_page_track {
-	struct hlist_node node;
-	bool tracked;
-	unsigned long gfn;
-	int (*handler)(void *, u64, void *, int);
-	void *data;
-};
-
-struct intel_vgpu_guest_page {
-	struct intel_vgpu_page_track track;
-	unsigned long write_cnt;
-	struct intel_vgpu_oos_page *oos_page;
-};
-
 struct intel_vgpu_oos_page {
-	struct intel_vgpu_guest_page *guest_page;
+	struct intel_vgpu_ppgtt_spt *spt;
 	struct list_head list;
 	struct list_head vm_list;
 	int id;
@@ -250,42 +212,33 @@ struct intel_vgpu_oos_page {
 
 #define GTT_ENTRY_NUM_IN_ONE_PAGE 512
 
+/* Represent a vgpu shadow page table. */
 struct intel_vgpu_ppgtt_spt {
-	struct intel_vgpu_shadow_page shadow_page;
-	struct intel_vgpu_guest_page guest_page;
-	int guest_page_type;
 	atomic_t refcount;
 	struct intel_vgpu *vgpu;
-	DECLARE_BITMAP(post_shadow_bitmap, GTT_ENTRY_NUM_IN_ONE_PAGE);
-	struct list_head post_shadow_list;
-};
 
-int intel_vgpu_init_page_track(struct intel_vgpu *vgpu,
-		struct intel_vgpu_page_track *t,
-		unsigned long gfn,
-		int (*handler)(void *gp, u64, void *, int),
-		void *data);
+	struct {
+		intel_gvt_gtt_type_t type;
+		void *vaddr;
+		struct page *page;
+		unsigned long mfn;
+	} shadow_page;
 
-void intel_vgpu_clean_page_track(struct intel_vgpu *vgpu,
-		struct intel_vgpu_page_track *t);
+	struct {
+		intel_gvt_gtt_type_t type;
+		unsigned long gfn;
+		unsigned long write_cnt;
+		struct intel_vgpu_oos_page *oos_page;
+	} guest_page;
 
-struct intel_vgpu_page_track *intel_vgpu_find_tracked_page(
-		struct intel_vgpu *vgpu, unsigned long gfn);
+	DECLARE_BITMAP(post_shadow_bitmap, GTT_ENTRY_NUM_IN_ONE_PAGE);
+	struct list_head post_shadow_list;
+};
 
 int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu);
 
 int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu);
 
-static inline void intel_gvt_mm_reference(struct intel_vgpu_mm *mm)
-{
-	kref_get(&mm->ref);
-}
-
-static inline void intel_gvt_mm_unreference(struct intel_vgpu_mm *mm)
-{
-	kref_put(&mm->ref, intel_vgpu_destroy_mm);
-}
-
 int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm);
 
 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm);
@@ -294,21 +247,17 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm,
 		unsigned long gma);
 
 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
-		int page_table_level, void *root_entry);
+		u64 pdps[]);
 
-int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
-		int page_table_level);
+struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
+		intel_gvt_gtt_type_t root_entry_type, u64 pdps[]);
 
-int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
-		int page_table_level);
+int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]);
 
-int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
+int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
 	unsigned int off, void *p_data, unsigned int bytes);
 
-int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu,
+int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
 	unsigned int off, void *p_data, unsigned int bytes);
 
-int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa,
-				     void *p_data, unsigned int bytes);
-
 #endif /* _GVT_GTT_H_ */

+ 1 - 1
drivers/gpu/drm/i915/gvt/gvt.c

@@ -183,7 +183,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
 	.get_gvt_attrs = intel_get_gvt_attrs,
 	.vgpu_query_plane = intel_vgpu_query_plane,
 	.vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
-	.write_protect_handler = intel_vgpu_write_protect_handler,
+	.write_protect_handler = intel_vgpu_page_track_handler,
 };
 
 /**

+ 16 - 5
drivers/gpu/drm/i915/gvt/gvt.h

@@ -48,6 +48,7 @@
 #include "cmd_parser.h"
 #include "fb_decoder.h"
 #include "dmabuf.h"
+#include "page_track.h"
 
 #define GVT_MAX_VGPU 8
 
@@ -131,11 +132,9 @@ struct intel_vgpu_opregion {
 
 #define vgpu_opregion(vgpu) (&(vgpu->opregion))
 
-#define INTEL_GVT_MAX_PORT 5
-
 struct intel_vgpu_display {
 	struct intel_vgpu_i2c_edid i2c_edid;
-	struct intel_vgpu_port ports[INTEL_GVT_MAX_PORT];
+	struct intel_vgpu_port ports[I915_MAX_PORTS];
 	struct intel_vgpu_sbi sbi;
 };
 
@@ -190,6 +189,7 @@ struct intel_vgpu {
 	struct intel_vgpu_opregion opregion;
 	struct intel_vgpu_display display;
 	struct intel_vgpu_submission submission;
+	struct radix_tree_root page_track_tree;
 	u32 hws_pga[I915_NUM_ENGINES];
 
 	struct dentry *debugfs;
@@ -201,8 +201,16 @@ struct intel_vgpu {
 		int num_regions;
 		struct eventfd_ctx *intx_trigger;
 		struct eventfd_ctx *msi_trigger;
-		struct rb_root cache;
+
+		/*
+		 * Two caches are used to avoid mapping duplicated pages (eg.
+		 * scratch pages). This help to reduce dma setup overhead.
+		 */
+		struct rb_root gfn_cache;
+		struct rb_root dma_addr_cache;
+		unsigned long nr_cache_entries;
 		struct mutex cache_lock;
+
 		struct notifier_block iommu_notifier;
 		struct notifier_block group_notifier;
 		struct kvm *kvm;
@@ -308,7 +316,10 @@ struct intel_gvt {
 	wait_queue_head_t service_thread_wq;
 	unsigned long service_request;
 
-	struct engine_mmio *engine_mmio_list;
+	struct {
+		struct engine_mmio *mmio;
+		int ctx_mmio_count[I915_NUM_ENGINES];
+	} engine_mmio_list;
 
 	struct dentry *debugfs_root;
 };

+ 20 - 18
drivers/gpu/drm/i915/gvt/handlers.c

@@ -188,7 +188,9 @@ void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
 static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
 		unsigned int fence_num, void *p_data, unsigned int bytes)
 {
-	if (fence_num >= vgpu_fence_sz(vgpu)) {
+	unsigned int max_fence = vgpu_fence_sz(vgpu);
+
+	if (fence_num >= max_fence) {
 
 		/* When guest access oob fence regs without access
 		 * pv_info first, we treat guest not supporting GVT,
@@ -201,7 +203,7 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
 		if (!vgpu->mmio.disable_warn_untrack) {
 			gvt_vgpu_err("found oob fence register access\n");
 			gvt_vgpu_err("total fence %d, access fence %d\n",
-					vgpu_fence_sz(vgpu), fence_num);
+				     max_fence, fence_num);
 		}
 		memset(p_data, 0, bytes);
 		return -EINVAL;
@@ -320,7 +322,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 	intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
 
 	/* sw will wait for the device to ack the reset request */
-	 vgpu_vreg(vgpu, offset) = 0;
+	vgpu_vreg(vgpu, offset) = 0;
 
 	return 0;
 }
@@ -1139,21 +1141,21 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
 
 static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
 {
-	int ret = 0;
+	intel_gvt_gtt_type_t root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
+	struct intel_vgpu_mm *mm;
+	u64 *pdps;
+
+	pdps = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
 
 	switch (notification) {
 	case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
-		ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 3);
-		break;
-	case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
-		ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 3);
-		break;
+		root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
 	case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
-		ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 4);
-		break;
+		mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps);
+		return PTR_ERR_OR_ZERO(mm);
+	case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
 	case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY:
-		ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 4);
-		break;
+		return intel_vgpu_put_ppgtt_mm(vgpu, pdps);
 	case VGT_G2V_EXECLIST_CONTEXT_CREATE:
 	case VGT_G2V_EXECLIST_CONTEXT_DESTROY:
 	case 1:	/* Remove this in guest driver. */
@@ -1161,7 +1163,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
 	default:
 		gvt_vgpu_err("Invalid PV notification %d\n", notification);
 	}
-	return ret;
+	return 0;
 }
 
 static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
@@ -1389,8 +1391,8 @@ static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
 	int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
 
 	if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
-		gvt_vgpu_err("VM(%d) write invalid HWSP address, reg:0x%x, value:0x%x\n",
-			      vgpu->id, offset, value);
+		gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n",
+			      offset, value);
 		return -EINVAL;
 	}
 	/*
@@ -1399,8 +1401,8 @@ static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
 	 * support BDW, SKL or other platforms with same HWSP registers.
 	 */
 	if (unlikely(ring_id < 0 || ring_id >= I915_NUM_ENGINES)) {
-		gvt_vgpu_err("VM(%d) access unknown hardware status page register:0x%x\n",
-			     vgpu->id, offset);
+		gvt_vgpu_err("access unknown hardware status page register:0x%x\n",
+			     offset);
 		return -EINVAL;
 	}
 	vgpu->hws_pga[ring_id] = value;

+ 7 - 2
drivers/gpu/drm/i915/gvt/hypercall.h

@@ -44,13 +44,18 @@ struct intel_gvt_mpt {
 	void (*detach_vgpu)(unsigned long handle);
 	int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
 	unsigned long (*from_virt_to_mfn)(void *p);
-	int (*set_wp_page)(unsigned long handle, u64 gfn);
-	int (*unset_wp_page)(unsigned long handle, u64 gfn);
+	int (*enable_page_track)(unsigned long handle, u64 gfn);
+	int (*disable_page_track)(unsigned long handle, u64 gfn);
 	int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf,
 			unsigned long len);
 	int (*write_gpa)(unsigned long handle, unsigned long gpa, void *buf,
 			 unsigned long len);
 	unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
+
+	int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn,
+				  dma_addr_t *dma_addr);
+	void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr);
+
 	int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
 			      unsigned long mfn, unsigned int nr, bool map);
 	int (*set_trap_area)(unsigned long handle, u64 start, u64 end,

+ 189 - 124
drivers/gpu/drm/i915/gvt/kvmgt.c

@@ -41,6 +41,7 @@
 #include <linux/kvm_host.h>
 #include <linux/vfio.h>
 #include <linux/mdev.h>
+#include <linux/debugfs.h>
 
 #include "i915_drv.h"
 #include "gvt.h"
@@ -84,12 +85,16 @@ struct kvmgt_guest_info {
 #define NR_BKT (1 << 18)
 	struct hlist_head ptable[NR_BKT];
 #undef NR_BKT
+	struct dentry *debugfs_cache_entries;
 };
 
 struct gvt_dma {
-	struct rb_node node;
+	struct intel_vgpu *vgpu;
+	struct rb_node gfn_node;
+	struct rb_node dma_addr_node;
 	gfn_t gfn;
-	unsigned long iova;
+	dma_addr_t dma_addr;
+	struct kref ref;
 };
 
 static inline bool handle_valid(unsigned long handle)
@@ -101,165 +106,167 @@ static int kvmgt_guest_init(struct mdev_device *mdev);
 static void intel_vgpu_release_work(struct work_struct *work);
 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
 
-static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn,
-		unsigned long *iova)
+static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
+		dma_addr_t *dma_addr)
 {
-	struct page *page;
 	struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
-	dma_addr_t daddr;
+	struct page *page;
+	unsigned long pfn;
+	int ret;
 
-	if (unlikely(!pfn_valid(pfn)))
-		return -EFAULT;
+	/* Pin the page first. */
+	ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1,
+			     IOMMU_READ | IOMMU_WRITE, &pfn);
+	if (ret != 1) {
+		gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
+			     gfn, ret);
+		return -EINVAL;
+	}
 
+	/* Setup DMA mapping. */
 	page = pfn_to_page(pfn);
-	daddr = dma_map_page(dev, page, 0, PAGE_SIZE,
-			PCI_DMA_BIDIRECTIONAL);
-	if (dma_mapping_error(dev, daddr))
+	*dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE,
+				 PCI_DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, *dma_addr)) {
+		gvt_vgpu_err("DMA mapping failed for gfn 0x%lx\n", gfn);
+		vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
 		return -ENOMEM;
+	}
 
-	*iova = (unsigned long)(daddr >> PAGE_SHIFT);
 	return 0;
 }
 
-static void gvt_dma_unmap_iova(struct intel_vgpu *vgpu, unsigned long iova)
+static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
+		dma_addr_t dma_addr)
 {
 	struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
-	dma_addr_t daddr;
+	int ret;
 
-	daddr = (dma_addr_t)(iova << PAGE_SHIFT);
-	dma_unmap_page(dev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+	dma_unmap_page(dev, dma_addr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+	ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
+	WARN_ON(ret != 1);
 }
 
-static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
+static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
+		dma_addr_t dma_addr)
 {
-	struct rb_node *node = vgpu->vdev.cache.rb_node;
-	struct gvt_dma *ret = NULL;
+	struct rb_node *node = vgpu->vdev.dma_addr_cache.rb_node;
+	struct gvt_dma *itr;
 
 	while (node) {
-		struct gvt_dma *itr = rb_entry(node, struct gvt_dma, node);
+		itr = rb_entry(node, struct gvt_dma, dma_addr_node);
 
-		if (gfn < itr->gfn)
+		if (dma_addr < itr->dma_addr)
 			node = node->rb_left;
-		else if (gfn > itr->gfn)
+		else if (dma_addr > itr->dma_addr)
 			node = node->rb_right;
-		else {
-			ret = itr;
-			goto out;
-		}
+		else
+			return itr;
 	}
-
-out:
-	return ret;
+	return NULL;
 }
 
-static unsigned long gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
+static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
 {
-	struct gvt_dma *entry;
-	unsigned long iova;
-
-	mutex_lock(&vgpu->vdev.cache_lock);
+	struct rb_node *node = vgpu->vdev.gfn_cache.rb_node;
+	struct gvt_dma *itr;
 
-	entry = __gvt_cache_find(vgpu, gfn);
-	iova = (entry == NULL) ? INTEL_GVT_INVALID_ADDR : entry->iova;
+	while (node) {
+		itr = rb_entry(node, struct gvt_dma, gfn_node);
 
-	mutex_unlock(&vgpu->vdev.cache_lock);
-	return iova;
+		if (gfn < itr->gfn)
+			node = node->rb_left;
+		else if (gfn > itr->gfn)
+			node = node->rb_right;
+		else
+			return itr;
+	}
+	return NULL;
 }
 
-static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
-		unsigned long iova)
+static void __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
+		dma_addr_t dma_addr)
 {
 	struct gvt_dma *new, *itr;
-	struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL;
+	struct rb_node **link, *parent = NULL;
 
 	new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
 	if (!new)
 		return;
 
+	new->vgpu = vgpu;
 	new->gfn = gfn;
-	new->iova = iova;
+	new->dma_addr = dma_addr;
+	kref_init(&new->ref);
 
-	mutex_lock(&vgpu->vdev.cache_lock);
+	/* gfn_cache maps gfn to struct gvt_dma. */
+	link = &vgpu->vdev.gfn_cache.rb_node;
 	while (*link) {
 		parent = *link;
-		itr = rb_entry(parent, struct gvt_dma, node);
+		itr = rb_entry(parent, struct gvt_dma, gfn_node);
 
-		if (gfn == itr->gfn)
-			goto out;
-		else if (gfn < itr->gfn)
+		if (gfn < itr->gfn)
 			link = &parent->rb_left;
 		else
 			link = &parent->rb_right;
 	}
+	rb_link_node(&new->gfn_node, parent, link);
+	rb_insert_color(&new->gfn_node, &vgpu->vdev.gfn_cache);
 
-	rb_link_node(&new->node, parent, link);
-	rb_insert_color(&new->node, &vgpu->vdev.cache);
-	mutex_unlock(&vgpu->vdev.cache_lock);
-	return;
+	/* dma_addr_cache maps dma addr to struct gvt_dma. */
+	parent = NULL;
+	link = &vgpu->vdev.dma_addr_cache.rb_node;
+	while (*link) {
+		parent = *link;
+		itr = rb_entry(parent, struct gvt_dma, dma_addr_node);
 
-out:
-	mutex_unlock(&vgpu->vdev.cache_lock);
-	kfree(new);
+		if (dma_addr < itr->dma_addr)
+			link = &parent->rb_left;
+		else
+			link = &parent->rb_right;
+	}
+	rb_link_node(&new->dma_addr_node, parent, link);
+	rb_insert_color(&new->dma_addr_node, &vgpu->vdev.dma_addr_cache);
+
+	vgpu->vdev.nr_cache_entries++;
 }
 
 static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
 				struct gvt_dma *entry)
 {
-	rb_erase(&entry->node, &vgpu->vdev.cache);
+	rb_erase(&entry->gfn_node, &vgpu->vdev.gfn_cache);
+	rb_erase(&entry->dma_addr_node, &vgpu->vdev.dma_addr_cache);
 	kfree(entry);
-}
-
-static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
-{
-	struct device *dev = mdev_dev(vgpu->vdev.mdev);
-	struct gvt_dma *this;
-	unsigned long g1;
-	int rc;
-
-	mutex_lock(&vgpu->vdev.cache_lock);
-	this  = __gvt_cache_find(vgpu, gfn);
-	if (!this) {
-		mutex_unlock(&vgpu->vdev.cache_lock);
-		return;
-	}
-
-	g1 = gfn;
-	gvt_dma_unmap_iova(vgpu, this->iova);
-	rc = vfio_unpin_pages(dev, &g1, 1);
-	WARN_ON(rc != 1);
-	__gvt_cache_remove_entry(vgpu, this);
-	mutex_unlock(&vgpu->vdev.cache_lock);
-}
-
-static void gvt_cache_init(struct intel_vgpu *vgpu)
-{
-	vgpu->vdev.cache = RB_ROOT;
-	mutex_init(&vgpu->vdev.cache_lock);
+	vgpu->vdev.nr_cache_entries--;
 }
 
 static void gvt_cache_destroy(struct intel_vgpu *vgpu)
 {
 	struct gvt_dma *dma;
 	struct rb_node *node = NULL;
-	struct device *dev = mdev_dev(vgpu->vdev.mdev);
-	unsigned long gfn;
 
 	for (;;) {
 		mutex_lock(&vgpu->vdev.cache_lock);
-		node = rb_first(&vgpu->vdev.cache);
+		node = rb_first(&vgpu->vdev.gfn_cache);
 		if (!node) {
 			mutex_unlock(&vgpu->vdev.cache_lock);
 			break;
 		}
-		dma = rb_entry(node, struct gvt_dma, node);
-		gvt_dma_unmap_iova(vgpu, dma->iova);
-		gfn = dma->gfn;
+		dma = rb_entry(node, struct gvt_dma, gfn_node);
+		gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr);
 		__gvt_cache_remove_entry(vgpu, dma);
 		mutex_unlock(&vgpu->vdev.cache_lock);
-		vfio_unpin_pages(dev, &gfn, 1);
 	}
 }
 
+static void gvt_cache_init(struct intel_vgpu *vgpu)
+{
+	vgpu->vdev.gfn_cache = RB_ROOT;
+	vgpu->vdev.dma_addr_cache = RB_ROOT;
+	vgpu->vdev.nr_cache_entries = 0;
+	mutex_init(&vgpu->vdev.cache_lock);
+}
+
 static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
 {
 	hash_init(info->ptable);
@@ -452,7 +459,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
 	vgpu = intel_gvt_ops->vgpu_create(gvt, type);
 	if (IS_ERR_OR_NULL(vgpu)) {
 		ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
-		gvt_vgpu_err("failed to create intel vgpu: %d\n", ret);
+		gvt_err("failed to create intel vgpu: %d\n", ret);
 		goto out;
 	}
 
@@ -489,13 +496,22 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
 
 	if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
 		struct vfio_iommu_type1_dma_unmap *unmap = data;
-		unsigned long gfn, end_gfn;
+		struct gvt_dma *entry;
+		unsigned long iov_pfn, end_iov_pfn;
 
-		gfn = unmap->iova >> PAGE_SHIFT;
-		end_gfn = gfn + unmap->size / PAGE_SIZE;
+		iov_pfn = unmap->iova >> PAGE_SHIFT;
+		end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE;
 
-		while (gfn < end_gfn)
-			gvt_cache_remove(vgpu, gfn++);
+		mutex_lock(&vgpu->vdev.cache_lock);
+		for (; iov_pfn < end_iov_pfn; iov_pfn++) {
+			entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
+			if (!entry)
+				continue;
+
+			gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr);
+			__gvt_cache_remove_entry(vgpu, entry);
+		}
+		mutex_unlock(&vgpu->vdev.cache_lock);
 	}
 
 	return NOTIFY_OK;
@@ -1321,7 +1337,7 @@ static void kvmgt_host_exit(struct device *dev, void *gvt)
 	mdev_unregister_device(dev);
 }
 
-static int kvmgt_write_protect_add(unsigned long handle, u64 gfn)
+static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
 {
 	struct kvmgt_guest_info *info;
 	struct kvm *kvm;
@@ -1355,7 +1371,7 @@ out:
 	return 0;
 }
 
-static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn)
+static int kvmgt_page_track_remove(unsigned long handle, u64 gfn)
 {
 	struct kvmgt_guest_info *info;
 	struct kvm *kvm;
@@ -1483,11 +1499,20 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
 	info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
 	kvm_page_track_register_notifier(kvm, &info->track_node);
 
+	info->debugfs_cache_entries = debugfs_create_ulong(
+						"kvmgt_nr_cache_entries",
+						0444, vgpu->debugfs,
+						&vgpu->vdev.nr_cache_entries);
+	if (!info->debugfs_cache_entries)
+		gvt_vgpu_err("Cannot create kvmgt debugfs entry\n");
+
 	return 0;
 }
 
 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
 {
+	debugfs_remove(info->debugfs_cache_entries);
+
 	kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
 	kvm_put_kvm(info->kvm);
 	kvmgt_protect_table_destroy(info);
@@ -1527,39 +1552,77 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
 
 static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
 {
-	unsigned long iova, pfn;
 	struct kvmgt_guest_info *info;
-	struct device *dev;
-	struct intel_vgpu *vgpu;
-	int rc;
+	kvm_pfn_t pfn;
 
 	if (!handle_valid(handle))
 		return INTEL_GVT_INVALID_ADDR;
 
 	info = (struct kvmgt_guest_info *)handle;
-	vgpu = info->vgpu;
-	iova = gvt_cache_find(info->vgpu, gfn);
-	if (iova != INTEL_GVT_INVALID_ADDR)
-		return iova;
-
-	pfn = INTEL_GVT_INVALID_ADDR;
-	dev = mdev_dev(info->vgpu->vdev.mdev);
-	rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
-	if (rc != 1) {
-		gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
-			gfn, rc);
-		return INTEL_GVT_INVALID_ADDR;
-	}
-	/* transfer to host iova for GFX to use DMA */
-	rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
-	if (rc) {
-		gvt_vgpu_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
-		vfio_unpin_pages(dev, &gfn, 1);
+
+	pfn = gfn_to_pfn(info->kvm, gfn);
+	if (is_error_noslot_pfn(pfn))
 		return INTEL_GVT_INVALID_ADDR;
+
+	return pfn;
+}
+
+int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
+		dma_addr_t *dma_addr)
+{
+	struct kvmgt_guest_info *info;
+	struct intel_vgpu *vgpu;
+	struct gvt_dma *entry;
+	int ret;
+
+	if (!handle_valid(handle))
+		return -EINVAL;
+
+	info = (struct kvmgt_guest_info *)handle;
+	vgpu = info->vgpu;
+
+	mutex_lock(&info->vgpu->vdev.cache_lock);
+
+	entry = __gvt_cache_find_gfn(info->vgpu, gfn);
+	if (!entry) {
+		ret = gvt_dma_map_page(vgpu, gfn, dma_addr);
+		if (ret) {
+			mutex_unlock(&info->vgpu->vdev.cache_lock);
+			return ret;
+		}
+		__gvt_cache_add(info->vgpu, gfn, *dma_addr);
+	} else {
+		kref_get(&entry->ref);
+		*dma_addr = entry->dma_addr;
 	}
 
-	gvt_cache_add(info->vgpu, gfn, iova);
-	return iova;
+	mutex_unlock(&info->vgpu->vdev.cache_lock);
+	return 0;
+}
+
+static void __gvt_dma_release(struct kref *ref)
+{
+	struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
+
+	gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr);
+	__gvt_cache_remove_entry(entry->vgpu, entry);
+}
+
+void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
+{
+	struct kvmgt_guest_info *info;
+	struct gvt_dma *entry;
+
+	if (!handle_valid(handle))
+		return;
+
+	info = (struct kvmgt_guest_info *)handle;
+
+	mutex_lock(&info->vgpu->vdev.cache_lock);
+	entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
+	if (entry)
+		kref_put(&entry->ref, __gvt_dma_release);
+	mutex_unlock(&info->vgpu->vdev.cache_lock);
 }
 
 static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
@@ -1629,11 +1692,13 @@ struct intel_gvt_mpt kvmgt_mpt = {
 	.detach_vgpu = kvmgt_detach_vgpu,
 	.inject_msi = kvmgt_inject_msi,
 	.from_virt_to_mfn = kvmgt_virt_to_pfn,
-	.set_wp_page = kvmgt_write_protect_add,
-	.unset_wp_page = kvmgt_write_protect_remove,
+	.enable_page_track = kvmgt_page_track_add,
+	.disable_page_track = kvmgt_page_track_remove,
 	.read_gpa = kvmgt_read_gpa,
 	.write_gpa = kvmgt_write_gpa,
 	.gfn_to_mfn = kvmgt_gfn_to_pfn,
+	.dma_map_guest_page = kvmgt_dma_map_guest_page,
+	.dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
 	.set_opregion = kvmgt_set_opregion,
 	.get_vfio_device = kvmgt_get_vfio_device,
 	.put_vfio_device = kvmgt_put_vfio_device,

+ 4 - 5
drivers/gpu/drm/i915/gvt/mmio.c

@@ -76,10 +76,9 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
 		else
 			intel_vgpu_default_mmio_write(vgpu, offset, p_data,
 					bytes);
-	} else if (reg_is_gtt(gvt, offset) &&
-			vgpu->gtt.ggtt_mm->virtual_page_table) {
+	} else if (reg_is_gtt(gvt, offset)) {
 		offset -= gvt->device_info.gtt_start_offset;
-		pt = vgpu->gtt.ggtt_mm->virtual_page_table + offset;
+		pt = vgpu->gtt.ggtt_mm->ggtt_mm.virtual_ggtt + offset;
 		if (read)
 			memcpy(p_data, pt, bytes);
 		else
@@ -125,7 +124,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
 		if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
 			goto err;
 
-		ret = intel_vgpu_emulate_gtt_mmio_read(vgpu, offset,
+		ret = intel_vgpu_emulate_ggtt_mmio_read(vgpu, offset,
 				p_data, bytes);
 		if (ret)
 			goto err;
@@ -198,7 +197,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
 		if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
 			goto err;
 
-		ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset,
+		ret = intel_vgpu_emulate_ggtt_mmio_write(vgpu, offset,
 				p_data, bytes);
 		if (ret)
 			goto err;

+ 191 - 19
drivers/gpu/drm/i915/gvt/mmio_context.c

@@ -50,6 +50,8 @@
 #define RING_GFX_MODE(base)	_MMIO((base) + 0x29c)
 #define VF_GUARDBAND		_MMIO(0x83a4)
 
+#define GEN9_MOCS_SIZE		64
+
 /* Raw offset is appened to each line for convenience. */
 static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
 	{RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
@@ -151,8 +153,8 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
 
 static struct {
 	bool initialized;
-	u32 control_table[I915_NUM_ENGINES][64];
-	u32 l3cc_table[32];
+	u32 control_table[I915_NUM_ENGINES][GEN9_MOCS_SIZE];
+	u32 l3cc_table[GEN9_MOCS_SIZE / 2];
 } gen9_render_mocs;
 
 static void load_render_mocs(struct drm_i915_private *dev_priv)
@@ -169,7 +171,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
 
 	for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
 		offset.reg = regs[ring_id];
-		for (i = 0; i < 64; i++) {
+		for (i = 0; i < GEN9_MOCS_SIZE; i++) {
 			gen9_render_mocs.control_table[ring_id][i] =
 				I915_READ_FW(offset);
 			offset.reg += 4;
@@ -177,7 +179,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
 	}
 
 	offset.reg = 0xb020;
-	for (i = 0; i < 32; i++) {
+	for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
 		gen9_render_mocs.l3cc_table[i] =
 			I915_READ_FW(offset);
 		offset.reg += 4;
@@ -185,6 +187,153 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
 	gen9_render_mocs.initialized = true;
 }
 
+static int
+restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu,
+				 struct i915_request *req)
+{
+	u32 *cs;
+	int ret;
+	struct engine_mmio *mmio;
+	struct intel_gvt *gvt = vgpu->gvt;
+	int ring_id = req->engine->id;
+	int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id];
+
+	if (count == 0)
+		return 0;
+
+	ret = req->engine->emit_flush(req, EMIT_BARRIER);
+	if (ret)
+		return ret;
+
+	cs = intel_ring_begin(req, count * 2 + 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	*cs++ = MI_LOAD_REGISTER_IMM(count);
+	for (mmio = gvt->engine_mmio_list.mmio;
+	     i915_mmio_reg_valid(mmio->reg); mmio++) {
+		if (mmio->ring_id != ring_id ||
+		    !mmio->in_context)
+			continue;
+
+		*cs++ = i915_mmio_reg_offset(mmio->reg);
+		*cs++ = vgpu_vreg_t(vgpu, mmio->reg) |
+				(mmio->mask << 16);
+		gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
+			      *(cs-2), *(cs-1), vgpu->id, ring_id);
+	}
+
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
+
+	ret = req->engine->emit_flush(req, EMIT_BARRIER);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int
+restore_render_mocs_control_for_inhibit(struct intel_vgpu *vgpu,
+					struct i915_request *req)
+{
+	unsigned int index;
+	u32 *cs;
+
+	cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE + 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	*cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE);
+
+	for (index = 0; index < GEN9_MOCS_SIZE; index++) {
+		*cs++ = i915_mmio_reg_offset(GEN9_GFX_MOCS(index));
+		*cs++ = vgpu_vreg_t(vgpu, GEN9_GFX_MOCS(index));
+		gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
+			      *(cs-2), *(cs-1), vgpu->id, req->engine->id);
+
+	}
+
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
+
+	return 0;
+}
+
+static int
+restore_render_mocs_l3cc_for_inhibit(struct intel_vgpu *vgpu,
+				     struct i915_request *req)
+{
+	unsigned int index;
+	u32 *cs;
+
+	cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE / 2 + 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	*cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE / 2);
+
+	for (index = 0; index < GEN9_MOCS_SIZE / 2; index++) {
+		*cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(index));
+		*cs++ = vgpu_vreg_t(vgpu, GEN9_LNCFCMOCS(index));
+		gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
+			      *(cs-2), *(cs-1), vgpu->id, req->engine->id);
+
+	}
+
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
+
+	return 0;
+}
+
+/*
+ * Use lri command to initialize the mmio which is in context state image for
+ * inhibit context, it contains tracked engine mmio, render_mocs and
+ * render_mocs_l3cc.
+ */
+int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
+				       struct i915_request *req)
+{
+	int ret;
+	u32 *cs;
+
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
+
+	ret = restore_context_mmio_for_inhibit(vgpu, req);
+	if (ret)
+		goto out;
+
+	/* no MOCS register in context except render engine */
+	if (req->engine->id != RCS)
+		goto out;
+
+	ret = restore_render_mocs_control_for_inhibit(vgpu, req);
+	if (ret)
+		goto out;
+
+	ret = restore_render_mocs_l3cc_for_inhibit(vgpu, req);
+	if (ret)
+		goto out;
+
+out:
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
+
+	return ret;
+}
+
 static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
 {
 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
@@ -251,11 +400,14 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
 	if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
 		return;
 
+	if (IS_KABYLAKE(dev_priv) && ring_id == RCS)
+		return;
+
 	if (!pre && !gen9_render_mocs.initialized)
 		load_render_mocs(dev_priv);
 
 	offset.reg = regs[ring_id];
-	for (i = 0; i < 64; i++) {
+	for (i = 0; i < GEN9_MOCS_SIZE; i++) {
 		if (pre)
 			old_v = vgpu_vreg_t(pre, offset);
 		else
@@ -273,7 +425,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
 
 	if (ring_id == RCS) {
 		l3_offset.reg = 0xb020;
-		for (i = 0; i < 32; i++) {
+		for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
 			if (pre)
 				old_v = vgpu_vreg_t(pre, l3_offset);
 			else
@@ -293,6 +445,16 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
 
 #define CTX_CONTEXT_CONTROL_VAL	0x03
 
+bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id)
+{
+	u32 *reg_state = ctx->engine[ring_id].lrc_reg_state;
+	u32 inhibit_mask =
+		_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+
+	return inhibit_mask ==
+		(reg_state[CTX_CONTEXT_CONTROL_VAL] & inhibit_mask);
+}
+
 /* Switch ring mmio values (context). */
 static void switch_mmio(struct intel_vgpu *pre,
 			struct intel_vgpu *next,
@@ -300,9 +462,6 @@ static void switch_mmio(struct intel_vgpu *pre,
 {
 	struct drm_i915_private *dev_priv;
 	struct intel_vgpu_submission *s;
-	u32 *reg_state, ctx_ctrl;
-	u32 inhibit_mask =
-		_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
 	struct engine_mmio *mmio;
 	u32 old_v, new_v;
 
@@ -310,10 +469,18 @@ static void switch_mmio(struct intel_vgpu *pre,
 	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
 		switch_mocs(pre, next, ring_id);
 
-	for (mmio = dev_priv->gvt->engine_mmio_list;
+	for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
 	     i915_mmio_reg_valid(mmio->reg); mmio++) {
 		if (mmio->ring_id != ring_id)
 			continue;
+		/*
+		 * No need to do save or restore of the mmio which is in context
+		 * state image on kabylake, it's initialized by lri command and
+		 * save or restore with context together.
+		 */
+		if (IS_KABYLAKE(dev_priv) && mmio->in_context)
+			continue;
+
 		// save
 		if (pre) {
 			vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg);
@@ -327,16 +494,13 @@ static void switch_mmio(struct intel_vgpu *pre,
 		// restore
 		if (next) {
 			s = &next->submission;
-			reg_state =
-				s->shadow_ctx->engine[ring_id].lrc_reg_state;
-			ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
 			/*
-			 * if it is an inhibit context, load in_context mmio
-			 * into HW by mmio write. If it is not, skip this mmio
-			 * write.
+			 * No need to restore the mmio which is in context state
+			 * image if it's not inhibit context, it will restore
+			 * itself.
 			 */
 			if (mmio->in_context &&
-			    (ctx_ctrl & inhibit_mask) != inhibit_mask)
+			    !is_inhibit_context(s->shadow_ctx, ring_id))
 				continue;
 
 			if (mmio->mask)
@@ -405,8 +569,16 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
  */
 void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
 {
+	struct engine_mmio *mmio;
+
 	if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
-		gvt->engine_mmio_list = gen9_engine_mmio_list;
+		gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
 	else
-		gvt->engine_mmio_list = gen8_engine_mmio_list;
+		gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
+
+	for (mmio = gvt->engine_mmio_list.mmio;
+	     i915_mmio_reg_valid(mmio->reg); mmio++) {
+		if (mmio->in_context)
+			gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
+	}
 }

+ 5 - 0
drivers/gpu/drm/i915/gvt/mmio_context.h

@@ -49,4 +49,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
 
 void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
 
+bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id);
+
+int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
+				       struct i915_request *req);
+
 #endif

+ 36 - 31
drivers/gpu/drm/i915/gvt/mpt.h

@@ -154,54 +154,31 @@ static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
 }
 
 /**
- * intel_gvt_hypervisor_enable - set a guest page to write-protected
+ * intel_gvt_hypervisor_enable_page_track - track a guest page
  * @vgpu: a vGPU
- * @t: page track data structure
+ * @gfn: the gfn of guest
  *
  * Returns:
  * Zero on success, negative error code if failed.
  */
 static inline int intel_gvt_hypervisor_enable_page_track(
-		struct intel_vgpu *vgpu,
-		struct intel_vgpu_page_track *t)
+		struct intel_vgpu *vgpu, unsigned long gfn)
 {
-	int ret;
-
-	if (t->tracked)
-		return 0;
-
-	ret = intel_gvt_host.mpt->set_wp_page(vgpu->handle, t->gfn);
-	if (ret)
-		return ret;
-	t->tracked = true;
-	atomic_inc(&vgpu->gtt.n_tracked_guest_page);
-	return 0;
+	return intel_gvt_host.mpt->enable_page_track(vgpu->handle, gfn);
 }
 
 /**
- * intel_gvt_hypervisor_disable_page_track - remove the write-protection of a
- * guest page
+ * intel_gvt_hypervisor_disable_page_track - untrack a guest page
  * @vgpu: a vGPU
- * @t: page track data structure
+ * @gfn: the gfn of guest
  *
  * Returns:
  * Zero on success, negative error code if failed.
  */
 static inline int intel_gvt_hypervisor_disable_page_track(
-		struct intel_vgpu *vgpu,
-		struct intel_vgpu_page_track *t)
+		struct intel_vgpu *vgpu, unsigned long gfn)
 {
-	int ret;
-
-	if (!t->tracked)
-		return 0;
-
-	ret = intel_gvt_host.mpt->unset_wp_page(vgpu->handle, t->gfn);
-	if (ret)
-		return ret;
-	t->tracked = false;
-	atomic_dec(&vgpu->gtt.n_tracked_guest_page);
-	return 0;
+	return intel_gvt_host.mpt->disable_page_track(vgpu->handle, gfn);
 }
 
 /**
@@ -250,6 +227,34 @@ static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
 	return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
 }
 
+/**
+ * intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page
+ * @vgpu: a vGPU
+ * @gpfn: guest pfn
+ * @dma_addr: retrieve allocated dma addr
+ *
+ * Returns:
+ * 0 on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_dma_map_guest_page(
+		struct intel_vgpu *vgpu, unsigned long gfn,
+		dma_addr_t *dma_addr)
+{
+	return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn,
+						      dma_addr);
+}
+
+/**
+ * intel_gvt_hypervisor_dma_unmap_guest_page - cancel dma map for guest page
+ * @vgpu: a vGPU
+ * @dma_addr: the mapped dma addr
+ */
+static inline void intel_gvt_hypervisor_dma_unmap_guest_page(
+		struct intel_vgpu *vgpu, dma_addr_t dma_addr)
+{
+	intel_gvt_host.mpt->dma_unmap_guest_page(vgpu->handle, dma_addr);
+}
+
 /**
  * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
  * @vgpu: a vGPU

+ 184 - 0
drivers/gpu/drm/i915/gvt/page_track.c

@@ -0,0 +1,184 @@
+/*
+ * Copyright(c) 2011-2017 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "i915_drv.h"
+#include "gvt.h"
+
+/**
+ * intel_vgpu_find_page_track - find page track rcord of guest page
+ * @vgpu: a vGPU
+ * @gfn: the gfn of guest page
+ *
+ * Returns:
+ * A pointer to struct intel_vgpu_page_track if found, else NULL returned.
+ */
+struct intel_vgpu_page_track *intel_vgpu_find_page_track(
+		struct intel_vgpu *vgpu, unsigned long gfn)
+{
+	return radix_tree_lookup(&vgpu->page_track_tree, gfn);
+}
+
+/**
+ * intel_vgpu_register_page_track - register a guest page to be tacked
+ * @vgpu: a vGPU
+ * @gfn: the gfn of guest page
+ *
+ * Returns:
+ * zero on success, negative error code if failed.
+ */
+int intel_vgpu_register_page_track(struct intel_vgpu *vgpu, unsigned long gfn,
+		gvt_page_track_handler_t handler, void *priv)
+{
+	struct intel_vgpu_page_track *track;
+	int ret;
+
+	track = intel_vgpu_find_page_track(vgpu, gfn);
+	if (track)
+		return -EEXIST;
+
+	track = kzalloc(sizeof(*track), GFP_KERNEL);
+	if (!track)
+		return -ENOMEM;
+
+	track->handler = handler;
+	track->priv_data = priv;
+
+	ret = radix_tree_insert(&vgpu->page_track_tree, gfn, track);
+	if (ret) {
+		kfree(track);
+		return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * intel_vgpu_unregister_page_track - unregister the tracked guest page
+ * @vgpu: a vGPU
+ * @gfn: the gfn of guest page
+ *
+ */
+void intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu,
+		unsigned long gfn)
+{
+	struct intel_vgpu_page_track *track;
+
+	track = radix_tree_delete(&vgpu->page_track_tree, gfn);
+	if (track) {
+		if (track->tracked)
+			intel_gvt_hypervisor_disable_page_track(vgpu, gfn);
+		kfree(track);
+	}
+}
+
+/**
+ * intel_vgpu_enable_page_track - set write-protection on guest page
+ * @vgpu: a vGPU
+ * @gfn: the gfn of guest page
+ *
+ * Returns:
+ * zero on success, negative error code if failed.
+ */
+int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
+{
+	struct intel_vgpu_page_track *track;
+	int ret;
+
+	track = intel_vgpu_find_page_track(vgpu, gfn);
+	if (!track)
+		return -ENXIO;
+
+	if (track->tracked)
+		return 0;
+
+	ret = intel_gvt_hypervisor_enable_page_track(vgpu, gfn);
+	if (ret)
+		return ret;
+	track->tracked = true;
+	return 0;
+}
+
+/**
+ * intel_vgpu_enable_page_track - cancel write-protection on guest page
+ * @vgpu: a vGPU
+ * @gfn: the gfn of guest page
+ *
+ * Returns:
+ * zero on success, negative error code if failed.
+ */
+int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
+{
+	struct intel_vgpu_page_track *track;
+	int ret;
+
+	track = intel_vgpu_find_page_track(vgpu, gfn);
+	if (!track)
+		return -ENXIO;
+
+	if (!track->tracked)
+		return 0;
+
+	ret = intel_gvt_hypervisor_disable_page_track(vgpu, gfn);
+	if (ret)
+		return ret;
+	track->tracked = false;
+	return 0;
+}
+
+/**
+ * intel_vgpu_page_track_handler - called when write to write-protected page
+ * @vgpu: a vGPU
+ * @gpa: the gpa of this write
+ * @data: the writed data
+ * @bytes: the length of this write
+ *
+ * Returns:
+ * zero on success, negative error code if failed.
+ */
+int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
+		void *data, unsigned int bytes)
+{
+	struct intel_gvt *gvt = vgpu->gvt;
+	struct intel_vgpu_page_track *page_track;
+	int ret = 0;
+
+	mutex_lock(&gvt->lock);
+
+	page_track = intel_vgpu_find_page_track(vgpu, gpa >> PAGE_SHIFT);
+	if (!page_track) {
+		ret = -ENXIO;
+		goto out;
+	}
+
+	if (unlikely(vgpu->failsafe)) {
+		/* Remove write protection to prevent furture traps. */
+		intel_vgpu_disable_page_track(vgpu, gpa >> PAGE_SHIFT);
+	} else {
+		ret = page_track->handler(page_track, gpa, data, bytes);
+		if (ret)
+			gvt_err("guest page write error, gpa %llx\n", gpa);
+	}
+
+out:
+	mutex_unlock(&gvt->lock);
+	return ret;
+}

+ 56 - 0
drivers/gpu/drm/i915/gvt/page_track.h

@@ -0,0 +1,56 @@
+/*
+ * Copyright(c) 2011-2017 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _GVT_PAGE_TRACK_H_
+#define _GVT_PAGE_TRACK_H_
+
+struct intel_vgpu_page_track;
+
+typedef int (*gvt_page_track_handler_t)(
+			struct intel_vgpu_page_track *page_track,
+			u64 gpa, void *data, int bytes);
+
+/* Track record for a write-protected guest page. */
+struct intel_vgpu_page_track {
+	gvt_page_track_handler_t handler;
+	bool tracked;
+	void *priv_data;
+};
+
+struct intel_vgpu_page_track *intel_vgpu_find_page_track(
+		struct intel_vgpu *vgpu, unsigned long gfn);
+
+int intel_vgpu_register_page_track(struct intel_vgpu *vgpu,
+		unsigned long gfn, gvt_page_track_handler_t handler,
+		void *priv);
+void intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu,
+		unsigned long gfn);
+
+int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
+int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
+
+int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
+		void *data, unsigned int bytes);
+
+#endif

+ 2 - 3
drivers/gpu/drm/i915/gvt/sched_policy.c

@@ -103,9 +103,8 @@ static void gvt_balance_timeslice(struct gvt_sched_data *sched_data)
 
 		list_for_each(pos, &sched_data->lru_runq_head) {
 			vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
-			fair_timeslice = ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS) *
-						vgpu_data->sched_ctl.weight /
-						total_weight;
+			fair_timeslice = ktime_divns(ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS),
+						     total_weight) * vgpu_data->sched_ctl.weight;
 
 			vgpu_data->allocated_ts = fair_timeslice;
 			vgpu_data->left_ts = vgpu_data->allocated_ts;

+ 22 - 22
drivers/gpu/drm/i915/gvt/scheduler.c

@@ -113,7 +113,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 #undef COPY_REG
 
 	set_context_pdp_root_pointer(shadow_ring_context,
-				     workload->shadow_mm->shadow_page_table);
+				     (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
 
 	intel_gvt_hypervisor_read_gpa(vgpu,
 			workload->ring_context_gpa +
@@ -225,6 +225,11 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
 	struct intel_vgpu *vgpu = workload->vgpu;
 	void *shadow_ring_buffer_va;
 	u32 *cs;
+	struct i915_request *req = workload->req;
+
+	if (IS_KABYLAKE(req->i915) &&
+	    is_inhibit_context(req->ctx, req->engine->id))
+		intel_vgpu_restore_inhibit_context(vgpu, req);
 
 	/* allocate shadow ring buffer */
 	cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
@@ -1132,7 +1137,7 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
 	struct intel_vgpu_submission *s = &workload->vgpu->submission;
 
 	if (workload->shadow_mm)
-		intel_gvt_mm_unreference(workload->shadow_mm);
+		intel_vgpu_mm_put(workload->shadow_mm);
 
 	kmem_cache_free(s->workloads, workload);
 }
@@ -1181,32 +1186,27 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
 	struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
 	struct intel_vgpu_mm *mm;
 	struct intel_vgpu *vgpu = workload->vgpu;
-	int page_table_level;
-	u32 pdp[8];
-
-	if (desc->addressing_mode == 1) { /* legacy 32-bit */
-		page_table_level = 3;
-	} else if (desc->addressing_mode == 3) { /* legacy 64 bit */
-		page_table_level = 4;
-	} else {
+	intel_gvt_gtt_type_t root_entry_type;
+	u64 pdps[GVT_RING_CTX_NR_PDPS];
+
+	switch (desc->addressing_mode) {
+	case 1: /* legacy 32-bit */
+		root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
+		break;
+	case 3: /* legacy 64-bit */
+		root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
+		break;
+	default:
 		gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
 		return -EINVAL;
 	}
 
-	read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp);
+	read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps);
 
-	mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp);
-	if (mm) {
-		intel_gvt_mm_reference(mm);
-	} else {
+	mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps);
+	if (IS_ERR(mm))
+		return PTR_ERR(mm);
 
-		mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
-				pdp, page_table_level, 0);
-		if (IS_ERR(mm)) {
-			gvt_vgpu_err("fail to create mm object.\n");
-			return PTR_ERR(mm);
-		}
-	}
 	workload->shadow_mm = mm;
 	return 0;
 }

+ 5 - 5
drivers/gpu/drm/i915/gvt/trace.h

@@ -113,10 +113,10 @@ TRACE_EVENT(gma_index,
 );
 
 TRACE_EVENT(gma_translate,
-	TP_PROTO(int id, char *type, int ring_id, int pt_level,
+	TP_PROTO(int id, char *type, int ring_id, int root_entry_type,
 		unsigned long gma, unsigned long gpa),
 
-	TP_ARGS(id, type, ring_id, pt_level, gma, gpa),
+	TP_ARGS(id, type, ring_id, root_entry_type, gma, gpa),
 
 	TP_STRUCT__entry(
 		__array(char, buf, MAX_BUF_LEN)
@@ -124,8 +124,8 @@ TRACE_EVENT(gma_translate,
 
 	TP_fast_assign(
 		snprintf(__entry->buf, MAX_BUF_LEN,
-			"VM%d %s ring %d pt_level %d gma 0x%lx -> gpa 0x%lx\n",
-				id, type, ring_id, pt_level, gma, gpa);
+			"VM%d %s ring %d root_entry_type %d gma 0x%lx -> gpa 0x%lx\n",
+			id, type, ring_id, root_entry_type, gma, gpa);
 	),
 
 	TP_printk("%s", __entry->buf)
@@ -168,7 +168,7 @@ TRACE_EVENT(spt_change,
 	TP_printk("%s", __entry->buf)
 );
 
-TRACE_EVENT(gpt_change,
+TRACE_EVENT(spt_guest_change,
 	TP_PROTO(int id, const char *tag, void *spt, int type, u64 v,
 		unsigned long index),
 

+ 1 - 0
drivers/gpu/drm/i915/gvt/vgpu.c

@@ -354,6 +354,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
 	vgpu->gvt = gvt;
 	vgpu->sched_ctl.weight = param->weight;
 	INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
+	INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
 	idr_init(&vgpu->object_idr);
 	intel_vgpu_init_cfg_space(vgpu, param->primary);