|
@@ -111,8 +111,7 @@ static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
|
|
|
if (iommu_tce_check_gpa(stt->page_shift, gpa))
|
|
|
return H_PARAMETER;
|
|
|
|
|
|
- if (kvmppc_gpa_to_ua(stt->kvm, tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
|
|
|
- &ua, NULL))
|
|
|
+ if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL))
|
|
|
return H_TOO_HARD;
|
|
|
|
|
|
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
|
@@ -182,10 +181,10 @@ void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(kvmppc_tce_put);
|
|
|
|
|
|
-long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
|
|
|
+long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
|
|
|
unsigned long *ua, unsigned long **prmap)
|
|
|
{
|
|
|
- unsigned long gfn = gpa >> PAGE_SHIFT;
|
|
|
+ unsigned long gfn = tce >> PAGE_SHIFT;
|
|
|
struct kvm_memory_slot *memslot;
|
|
|
|
|
|
memslot = search_memslots(kvm_memslots(kvm), gfn);
|
|
@@ -193,7 +192,7 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
|
|
|
return -EINVAL;
|
|
|
|
|
|
*ua = __gfn_to_hva_memslot(memslot, gfn) |
|
|
|
- (gpa & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
|
|
|
+ (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
|
|
|
|
|
|
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
|
|
if (prmap)
|
|
@@ -202,7 +201,7 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
|
|
|
+EXPORT_SYMBOL_GPL(kvmppc_tce_to_ua);
|
|
|
|
|
|
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
|
|
static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl,
|
|
@@ -391,8 +390,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
|
|
return ret;
|
|
|
|
|
|
dir = iommu_tce_direction(tce);
|
|
|
- if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
|
|
|
- tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL))
|
|
|
+ if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
|
|
|
return H_PARAMETER;
|
|
|
|
|
|
entry = ioba >> stt->page_shift;
|
|
@@ -494,7 +492,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
|
|
*/
|
|
|
struct mm_iommu_table_group_mem_t *mem;
|
|
|
|
|
|
- if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL))
|
|
|
+ if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL))
|
|
|
return H_TOO_HARD;
|
|
|
|
|
|
mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
|
|
@@ -510,7 +508,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
|
|
* We do not require memory to be preregistered in this case
|
|
|
* so lock rmap and do __find_linux_pte_or_hugepte().
|
|
|
*/
|
|
|
- if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
|
|
|
+ if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
|
|
|
return H_TOO_HARD;
|
|
|
|
|
|
rmap = (void *) vmalloc_to_phys(rmap);
|
|
@@ -544,9 +542,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
|
|
unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
|
|
|
|
|
|
ua = 0;
|
|
|
- if (kvmppc_gpa_to_ua(vcpu->kvm,
|
|
|
- tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
|
|
|
- &ua, NULL))
|
|
|
+ if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
|
|
|
return H_PARAMETER;
|
|
|
|
|
|
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|