|
@@ -259,7 +259,7 @@ static void write_pte64(struct drm_i915_private *dev_priv,
|
|
writeq(pte, addr);
|
|
writeq(pte, addr);
|
|
}
|
|
}
|
|
|
|
|
|
-static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt,
|
|
|
|
|
|
+static inline int gtt_get_entry64(void *pt,
|
|
struct intel_gvt_gtt_entry *e,
|
|
struct intel_gvt_gtt_entry *e,
|
|
unsigned long index, bool hypervisor_access, unsigned long gpa,
|
|
unsigned long index, bool hypervisor_access, unsigned long gpa,
|
|
struct intel_vgpu *vgpu)
|
|
struct intel_vgpu *vgpu)
|
|
@@ -268,22 +268,23 @@ static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt,
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
if (WARN_ON(info->gtt_entry_size != 8))
|
|
if (WARN_ON(info->gtt_entry_size != 8))
|
|
- return e;
|
|
|
|
|
|
+ return -EINVAL;
|
|
|
|
|
|
if (hypervisor_access) {
|
|
if (hypervisor_access) {
|
|
ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
|
|
ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
|
|
(index << info->gtt_entry_size_shift),
|
|
(index << info->gtt_entry_size_shift),
|
|
&e->val64, 8);
|
|
&e->val64, 8);
|
|
- WARN_ON(ret);
|
|
|
|
|
|
+ if (WARN_ON(ret))
|
|
|
|
+ return ret;
|
|
} else if (!pt) {
|
|
} else if (!pt) {
|
|
e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
|
|
e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
|
|
} else {
|
|
} else {
|
|
e->val64 = *((u64 *)pt + index);
|
|
e->val64 = *((u64 *)pt + index);
|
|
}
|
|
}
|
|
- return e;
|
|
|
|
|
|
+ return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static inline struct intel_gvt_gtt_entry *gtt_set_entry64(void *pt,
|
|
|
|
|
|
+static inline int gtt_set_entry64(void *pt,
|
|
struct intel_gvt_gtt_entry *e,
|
|
struct intel_gvt_gtt_entry *e,
|
|
unsigned long index, bool hypervisor_access, unsigned long gpa,
|
|
unsigned long index, bool hypervisor_access, unsigned long gpa,
|
|
struct intel_vgpu *vgpu)
|
|
struct intel_vgpu *vgpu)
|
|
@@ -292,19 +293,20 @@ static inline struct intel_gvt_gtt_entry *gtt_set_entry64(void *pt,
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
if (WARN_ON(info->gtt_entry_size != 8))
|
|
if (WARN_ON(info->gtt_entry_size != 8))
|
|
- return e;
|
|
|
|
|
|
+ return -EINVAL;
|
|
|
|
|
|
if (hypervisor_access) {
|
|
if (hypervisor_access) {
|
|
ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
|
|
ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
|
|
(index << info->gtt_entry_size_shift),
|
|
(index << info->gtt_entry_size_shift),
|
|
&e->val64, 8);
|
|
&e->val64, 8);
|
|
- WARN_ON(ret);
|
|
|
|
|
|
+ if (WARN_ON(ret))
|
|
|
|
+ return ret;
|
|
} else if (!pt) {
|
|
} else if (!pt) {
|
|
write_pte64(vgpu->gvt->dev_priv, index, e->val64);
|
|
write_pte64(vgpu->gvt->dev_priv, index, e->val64);
|
|
} else {
|
|
} else {
|
|
*((u64 *)pt + index) = e->val64;
|
|
*((u64 *)pt + index) = e->val64;
|
|
}
|
|
}
|
|
- return e;
|
|
|
|
|
|
+ return 0;
|
|
}
|
|
}
|
|
|
|
|
|
#define GTT_HAW 46
|
|
#define GTT_HAW 46
|
|
@@ -445,21 +447,25 @@ static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
|
|
/*
|
|
/*
|
|
* MM helpers.
|
|
* MM helpers.
|
|
*/
|
|
*/
|
|
-struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
|
|
|
|
|
|
+int intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
|
|
void *page_table, struct intel_gvt_gtt_entry *e,
|
|
void *page_table, struct intel_gvt_gtt_entry *e,
|
|
unsigned long index)
|
|
unsigned long index)
|
|
{
|
|
{
|
|
struct intel_gvt *gvt = mm->vgpu->gvt;
|
|
struct intel_gvt *gvt = mm->vgpu->gvt;
|
|
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
|
|
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
|
|
|
|
+ int ret;
|
|
|
|
|
|
e->type = mm->page_table_entry_type;
|
|
e->type = mm->page_table_entry_type;
|
|
|
|
|
|
- ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
|
|
|
|
|
|
+ ret = ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
|
|
|
|
+ if (ret)
|
|
|
|
+ return ret;
|
|
|
|
+
|
|
ops->test_pse(e);
|
|
ops->test_pse(e);
|
|
- return e;
|
|
|
|
|
|
+ return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
|
|
|
|
|
|
+int intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
|
|
void *page_table, struct intel_gvt_gtt_entry *e,
|
|
void *page_table, struct intel_gvt_gtt_entry *e,
|
|
unsigned long index)
|
|
unsigned long index)
|
|
{
|
|
{
|
|
@@ -472,7 +478,7 @@ struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
|
|
/*
|
|
/*
|
|
* PPGTT shadow page table helpers.
|
|
* PPGTT shadow page table helpers.
|
|
*/
|
|
*/
|
|
-static inline struct intel_gvt_gtt_entry *ppgtt_spt_get_entry(
|
|
|
|
|
|
+static inline int ppgtt_spt_get_entry(
|
|
struct intel_vgpu_ppgtt_spt *spt,
|
|
struct intel_vgpu_ppgtt_spt *spt,
|
|
void *page_table, int type,
|
|
void *page_table, int type,
|
|
struct intel_gvt_gtt_entry *e, unsigned long index,
|
|
struct intel_gvt_gtt_entry *e, unsigned long index,
|
|
@@ -480,20 +486,24 @@ static inline struct intel_gvt_gtt_entry *ppgtt_spt_get_entry(
|
|
{
|
|
{
|
|
struct intel_gvt *gvt = spt->vgpu->gvt;
|
|
struct intel_gvt *gvt = spt->vgpu->gvt;
|
|
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
|
|
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
|
|
|
|
+ int ret;
|
|
|
|
|
|
e->type = get_entry_type(type);
|
|
e->type = get_entry_type(type);
|
|
|
|
|
|
if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
|
|
if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
|
|
- return e;
|
|
|
|
|
|
+ return -EINVAL;
|
|
|
|
|
|
- ops->get_entry(page_table, e, index, guest,
|
|
|
|
|
|
+ ret = ops->get_entry(page_table, e, index, guest,
|
|
spt->guest_page.gfn << GTT_PAGE_SHIFT,
|
|
spt->guest_page.gfn << GTT_PAGE_SHIFT,
|
|
spt->vgpu);
|
|
spt->vgpu);
|
|
|
|
+ if (ret)
|
|
|
|
+ return ret;
|
|
|
|
+
|
|
ops->test_pse(e);
|
|
ops->test_pse(e);
|
|
- return e;
|
|
|
|
|
|
+ return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static inline struct intel_gvt_gtt_entry *ppgtt_spt_set_entry(
|
|
|
|
|
|
+static inline int ppgtt_spt_set_entry(
|
|
struct intel_vgpu_ppgtt_spt *spt,
|
|
struct intel_vgpu_ppgtt_spt *spt,
|
|
void *page_table, int type,
|
|
void *page_table, int type,
|
|
struct intel_gvt_gtt_entry *e, unsigned long index,
|
|
struct intel_gvt_gtt_entry *e, unsigned long index,
|
|
@@ -503,7 +513,7 @@ static inline struct intel_gvt_gtt_entry *ppgtt_spt_set_entry(
|
|
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
|
|
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
|
|
|
|
|
|
if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
|
|
if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
|
|
- return e;
|
|
|
|
|
|
+ return -EINVAL;
|
|
|
|
|
|
return ops->set_entry(page_table, e, index, guest,
|
|
return ops->set_entry(page_table, e, index, guest,
|
|
spt->guest_page.gfn << GTT_PAGE_SHIFT,
|
|
spt->guest_page.gfn << GTT_PAGE_SHIFT,
|
|
@@ -792,13 +802,13 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
|
|
|
|
|
|
#define for_each_present_guest_entry(spt, e, i) \
|
|
#define for_each_present_guest_entry(spt, e, i) \
|
|
for (i = 0; i < pt_entries(spt); i++) \
|
|
for (i = 0; i < pt_entries(spt); i++) \
|
|
- if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
|
|
|
|
- ppgtt_get_guest_entry(spt, e, i)))
|
|
|
|
|
|
+ if (!ppgtt_get_guest_entry(spt, e, i) && \
|
|
|
|
+ spt->vgpu->gvt->gtt.pte_ops->test_present(e))
|
|
|
|
|
|
#define for_each_present_shadow_entry(spt, e, i) \
|
|
#define for_each_present_shadow_entry(spt, e, i) \
|
|
for (i = 0; i < pt_entries(spt); i++) \
|
|
for (i = 0; i < pt_entries(spt); i++) \
|
|
- if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
|
|
|
|
- ppgtt_get_shadow_entry(spt, e, i)))
|
|
|
|
|
|
+ if (!ppgtt_get_shadow_entry(spt, e, i) && \
|
|
|
|
+ spt->vgpu->gvt->gtt.pte_ops->test_present(e))
|
|
|
|
|
|
static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
|
|
static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
|
|
{
|
|
{
|
|
@@ -979,29 +989,26 @@ fail:
|
|
}
|
|
}
|
|
|
|
|
|
static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
|
|
static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
|
|
- unsigned long index)
|
|
|
|
|
|
+ struct intel_gvt_gtt_entry *se, unsigned long index)
|
|
{
|
|
{
|
|
struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
|
|
struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
|
|
struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
|
|
struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
|
|
struct intel_vgpu *vgpu = spt->vgpu;
|
|
struct intel_vgpu *vgpu = spt->vgpu;
|
|
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
|
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
|
- struct intel_gvt_gtt_entry e;
|
|
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
- ppgtt_get_shadow_entry(spt, &e, index);
|
|
|
|
-
|
|
|
|
- trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, e.val64,
|
|
|
|
|
|
+ trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, se->val64,
|
|
index);
|
|
index);
|
|
|
|
|
|
- if (!ops->test_present(&e))
|
|
|
|
|
|
+ if (!ops->test_present(se))
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
- if (ops->get_pfn(&e) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
|
|
|
|
|
|
+ if (ops->get_pfn(se) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
- if (gtt_type_is_pt(get_next_pt_type(e.type))) {
|
|
|
|
|
|
+ if (gtt_type_is_pt(get_next_pt_type(se->type))) {
|
|
struct intel_vgpu_ppgtt_spt *s =
|
|
struct intel_vgpu_ppgtt_spt *s =
|
|
- ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
|
|
|
|
|
|
+ ppgtt_find_shadow_page(vgpu, ops->get_pfn(se));
|
|
if (!s) {
|
|
if (!s) {
|
|
gvt_vgpu_err("fail to find guest page\n");
|
|
gvt_vgpu_err("fail to find guest page\n");
|
|
ret = -ENXIO;
|
|
ret = -ENXIO;
|
|
@@ -1011,12 +1018,10 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
|
|
if (ret)
|
|
if (ret)
|
|
goto fail;
|
|
goto fail;
|
|
}
|
|
}
|
|
- ops->set_pfn(&e, vgpu->gtt.scratch_pt[sp->type].page_mfn);
|
|
|
|
- ppgtt_set_shadow_entry(spt, &e, index);
|
|
|
|
return 0;
|
|
return 0;
|
|
fail:
|
|
fail:
|
|
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
|
|
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
|
|
- spt, e.val64, e.type);
|
|
|
|
|
|
+ spt, se->val64, se->type);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1236,22 +1241,37 @@ static int ppgtt_handle_guest_write_page_table(
|
|
{
|
|
{
|
|
struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
|
|
struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
|
|
struct intel_vgpu *vgpu = spt->vgpu;
|
|
struct intel_vgpu *vgpu = spt->vgpu;
|
|
|
|
+ int type = spt->shadow_page.type;
|
|
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
|
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
|
|
|
+ struct intel_gvt_gtt_entry se;
|
|
|
|
|
|
int ret;
|
|
int ret;
|
|
int new_present;
|
|
int new_present;
|
|
|
|
|
|
new_present = ops->test_present(we);
|
|
new_present = ops->test_present(we);
|
|
|
|
|
|
- ret = ppgtt_handle_guest_entry_removal(gpt, index);
|
|
|
|
- if (ret)
|
|
|
|
- goto fail;
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Adding the new entry first and then removing the old one, that can
|
|
|
|
+ * guarantee the ppgtt table is validated during the window between
|
|
|
|
+ * adding and removal.
|
|
|
|
+ */
|
|
|
|
+ ppgtt_get_shadow_entry(spt, &se, index);
|
|
|
|
|
|
if (new_present) {
|
|
if (new_present) {
|
|
ret = ppgtt_handle_guest_entry_add(gpt, we, index);
|
|
ret = ppgtt_handle_guest_entry_add(gpt, we, index);
|
|
if (ret)
|
|
if (ret)
|
|
goto fail;
|
|
goto fail;
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
|
|
|
|
+ if (ret)
|
|
|
|
+ goto fail;
|
|
|
|
+
|
|
|
|
+ if (!new_present) {
|
|
|
|
+ ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
|
|
|
|
+ ppgtt_set_shadow_entry(spt, &se, index);
|
|
|
|
+ }
|
|
|
|
+
|
|
return 0;
|
|
return 0;
|
|
fail:
|
|
fail:
|
|
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
|
|
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
|
|
@@ -1323,7 +1343,7 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
|
|
struct intel_vgpu *vgpu = spt->vgpu;
|
|
struct intel_vgpu *vgpu = spt->vgpu;
|
|
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
|
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
|
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
|
|
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
|
|
- struct intel_gvt_gtt_entry we;
|
|
|
|
|
|
+ struct intel_gvt_gtt_entry we, se;
|
|
unsigned long index;
|
|
unsigned long index;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
@@ -1339,7 +1359,8 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
|
|
return ret;
|
|
return ret;
|
|
} else {
|
|
} else {
|
|
if (!test_bit(index, spt->post_shadow_bitmap)) {
|
|
if (!test_bit(index, spt->post_shadow_bitmap)) {
|
|
- ret = ppgtt_handle_guest_entry_removal(gpt, index);
|
|
|
|
|
|
+ ppgtt_get_shadow_entry(spt, &se, index);
|
|
|
|
+ ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
|
|
if (ret)
|
|
if (ret)
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
@@ -1713,8 +1734,10 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
|
|
if (!vgpu_gmadr_is_valid(vgpu, gma))
|
|
if (!vgpu_gmadr_is_valid(vgpu, gma))
|
|
goto err;
|
|
goto err;
|
|
|
|
|
|
- ggtt_get_guest_entry(mm, &e,
|
|
|
|
- gma_ops->gma_to_ggtt_pte_index(gma));
|
|
|
|
|
|
+ ret = ggtt_get_guest_entry(mm, &e,
|
|
|
|
+ gma_ops->gma_to_ggtt_pte_index(gma));
|
|
|
|
+ if (ret)
|
|
|
|
+ goto err;
|
|
gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
|
|
gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
|
|
+ (gma & ~GTT_PAGE_MASK);
|
|
+ (gma & ~GTT_PAGE_MASK);
|
|
|
|
|
|
@@ -1724,7 +1747,9 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
|
|
|
|
|
|
switch (mm->page_table_level) {
|
|
switch (mm->page_table_level) {
|
|
case 4:
|
|
case 4:
|
|
- ppgtt_get_shadow_root_entry(mm, &e, 0);
|
|
|
|
|
|
+ ret = ppgtt_get_shadow_root_entry(mm, &e, 0);
|
|
|
|
+ if (ret)
|
|
|
|
+ goto err;
|
|
gma_index[0] = gma_ops->gma_to_pml4_index(gma);
|
|
gma_index[0] = gma_ops->gma_to_pml4_index(gma);
|
|
gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
|
|
gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
|
|
gma_index[2] = gma_ops->gma_to_pde_index(gma);
|
|
gma_index[2] = gma_ops->gma_to_pde_index(gma);
|
|
@@ -1732,15 +1757,19 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
|
|
index = 4;
|
|
index = 4;
|
|
break;
|
|
break;
|
|
case 3:
|
|
case 3:
|
|
- ppgtt_get_shadow_root_entry(mm, &e,
|
|
|
|
|
|
+ ret = ppgtt_get_shadow_root_entry(mm, &e,
|
|
gma_ops->gma_to_l3_pdp_index(gma));
|
|
gma_ops->gma_to_l3_pdp_index(gma));
|
|
|
|
+ if (ret)
|
|
|
|
+ goto err;
|
|
gma_index[0] = gma_ops->gma_to_pde_index(gma);
|
|
gma_index[0] = gma_ops->gma_to_pde_index(gma);
|
|
gma_index[1] = gma_ops->gma_to_pte_index(gma);
|
|
gma_index[1] = gma_ops->gma_to_pte_index(gma);
|
|
index = 2;
|
|
index = 2;
|
|
break;
|
|
break;
|
|
case 2:
|
|
case 2:
|
|
- ppgtt_get_shadow_root_entry(mm, &e,
|
|
|
|
|
|
+ ret = ppgtt_get_shadow_root_entry(mm, &e,
|
|
gma_ops->gma_to_pde_index(gma));
|
|
gma_ops->gma_to_pde_index(gma));
|
|
|
|
+ if (ret)
|
|
|
|
+ goto err;
|
|
gma_index[0] = gma_ops->gma_to_pte_index(gma);
|
|
gma_index[0] = gma_ops->gma_to_pte_index(gma);
|
|
index = 1;
|
|
index = 1;
|
|
break;
|
|
break;
|
|
@@ -1755,6 +1784,11 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
|
|
(i == index - 1));
|
|
(i == index - 1));
|
|
if (ret)
|
|
if (ret)
|
|
goto err;
|
|
goto err;
|
|
|
|
+
|
|
|
|
+ if (!pte_ops->test_present(&e)) {
|
|
|
|
+ gvt_dbg_core("GMA 0x%lx is not present\n", gma);
|
|
|
|
+ goto err;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
|
|
gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
|
|
@@ -2329,13 +2363,12 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
|
|
/**
|
|
/**
|
|
* intel_vgpu_reset_gtt - reset the all GTT related status
|
|
* intel_vgpu_reset_gtt - reset the all GTT related status
|
|
* @vgpu: a vGPU
|
|
* @vgpu: a vGPU
|
|
- * @dmlr: true for vGPU Device Model Level Reset, false for GT Reset
|
|
|
|
*
|
|
*
|
|
* This function is called from vfio core to reset reset all
|
|
* This function is called from vfio core to reset reset all
|
|
* GTT related status, including GGTT, PPGTT, scratch page.
|
|
* GTT related status, including GGTT, PPGTT, scratch page.
|
|
*
|
|
*
|
|
*/
|
|
*/
|
|
-void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
|
|
|
|
|
|
+void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
|
|
|
|
@@ -2347,9 +2380,6 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
|
|
*/
|
|
*/
|
|
intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
|
|
intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
|
|
|
|
|
|
- if (!dmlr)
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
intel_vgpu_reset_ggtt(vgpu);
|
|
intel_vgpu_reset_ggtt(vgpu);
|
|
|
|
|
|
/* clear scratch page for security */
|
|
/* clear scratch page for security */
|