|
@@ -300,10 +300,10 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
|
|
|
|
|
|
if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
|
|
|
&hpa)))
|
|
|
- return H_HARDWARE;
|
|
|
+ return H_TOO_HARD;
|
|
|
|
|
|
if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
|
|
|
- return H_CLOSED;
|
|
|
+ return H_TOO_HARD;
|
|
|
|
|
|
ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
|
|
|
if (ret) {
|
|
@@ -501,7 +501,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
|
|
|
|
|
rmap = (void *) vmalloc_to_phys(rmap);
|
|
|
if (WARN_ON_ONCE_RM(!rmap))
|
|
|
- return H_HARDWARE;
|
|
|
+ return H_TOO_HARD;
|
|
|
|
|
|
/*
|
|
|
* Synchronize with the MMU notifier callbacks in
|