|
@@ -187,12 +187,35 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
|
|
|
EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
|
|
EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
|
|
|
|
|
|
|
|
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
|
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
|
|
-static void kvmppc_rm_clear_tce(struct iommu_table *tbl, unsigned long entry)
|
|
|
|
|
|
|
+static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl,
|
|
|
|
|
+ unsigned long entry, unsigned long *hpa,
|
|
|
|
|
+ enum dma_data_direction *direction)
|
|
|
|
|
+{
|
|
|
|
|
+ long ret;
|
|
|
|
|
+
|
|
|
|
|
+ ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
|
|
|
|
|
+
|
|
|
|
|
+ if (!ret && ((*direction == DMA_FROM_DEVICE) ||
|
|
|
|
|
+ (*direction == DMA_BIDIRECTIONAL))) {
|
|
|
|
|
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
|
|
|
|
|
+ /*
|
|
|
|
|
+ * kvmppc_rm_tce_iommu_do_map() updates the UA cache after
|
|
|
|
|
+ * calling this so we still get here a valid UA.
|
|
|
|
|
+ */
|
|
|
|
|
+ if (pua && *pua)
|
|
|
|
|
+ mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua));
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ return ret;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
|
|
|
|
|
+ unsigned long entry)
|
|
|
{
|
|
{
|
|
|
unsigned long hpa = 0;
|
|
unsigned long hpa = 0;
|
|
|
enum dma_data_direction dir = DMA_NONE;
|
|
enum dma_data_direction dir = DMA_NONE;
|
|
|
|
|
|
|
|
- iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
|
|
|
|
|
|
|
+ iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
|
|
static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
|
|
@@ -224,7 +247,7 @@ static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
|
|
|
unsigned long hpa = 0;
|
|
unsigned long hpa = 0;
|
|
|
long ret;
|
|
long ret;
|
|
|
|
|
|
|
|
- if (iommu_tce_xchg_rm(tbl, entry, &hpa, &dir))
|
|
|
|
|
|
|
+ if (iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir))
|
|
|
/*
|
|
/*
|
|
|
* real mode xchg can fail if struct page crosses
|
|
* real mode xchg can fail if struct page crosses
|
|
|
* a page boundary
|
|
* a page boundary
|
|
@@ -236,7 +259,7 @@ static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
|
|
|
|
|
|
|
|
ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
|
|
ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
|
|
|
if (ret)
|
|
if (ret)
|
|
|
- iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
|
|
|
|
|
|
|
+ iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
|
|
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
|
}
|
|
}
|
|
@@ -282,7 +305,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
|
|
|
if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
|
|
if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
|
|
|
return H_CLOSED;
|
|
return H_CLOSED;
|
|
|
|
|
|
|
|
- ret = iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
|
|
|
|
|
|
|
+ ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
|
|
|
if (ret) {
|
|
if (ret) {
|
|
|
mm_iommu_mapped_dec(mem);
|
|
mm_iommu_mapped_dec(mem);
|
|
|
/*
|
|
/*
|
|
@@ -371,7 +394,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
|
|
WARN_ON_ONCE_RM(1);
|
|
WARN_ON_ONCE_RM(1);
|
|
|
- kvmppc_rm_clear_tce(stit->tbl, entry);
|
|
|
|
|
|
|
+ kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
kvmppc_tce_put(stt, entry, tce);
|
|
kvmppc_tce_put(stt, entry, tce);
|
|
@@ -520,7 +543,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
|
|
goto unlock_exit;
|
|
goto unlock_exit;
|
|
|
|
|
|
|
|
WARN_ON_ONCE_RM(1);
|
|
WARN_ON_ONCE_RM(1);
|
|
|
- kvmppc_rm_clear_tce(stit->tbl, entry);
|
|
|
|
|
|
|
+ kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
kvmppc_tce_put(stt, entry + i, tce);
|
|
kvmppc_tce_put(stt, entry + i, tce);
|
|
@@ -571,7 +594,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
|
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
|
|
WARN_ON_ONCE_RM(1);
|
|
WARN_ON_ONCE_RM(1);
|
|
|
- kvmppc_rm_clear_tce(stit->tbl, entry);
|
|
|
|
|
|
|
+ kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
|
|
|