瀏覽代碼

KVM: PPC: BOOK3S: HV: Use base page size when comparing against slb value

With guests supporting Multiple page size per segment (MPSS),
hpte_page_size returns the actual page size used. Add a new function to
return base page size and use that to compare against the the page size
calculated from SLB. Without this patch a hpte lookup can fail since
we are comparing wrong page size in kvmppc_hv_find_lock_hpte.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
Aneesh Kumar K.V 11 年之前
父節點
當前提交
341acbb3aa
共有 3 個文件被更改,包括 20 次插入8 次删除
  1. 17 2
      arch/powerpc/include/asm/kvm_book3s_64.h
  2. 1 1
      arch/powerpc/kvm/book3s_64_mmu_hv.c
  3. 2 5
      arch/powerpc/kvm/book3s_hv_rm_mmu.c

+ 17 - 2
arch/powerpc/include/asm/kvm_book3s_64.h

@@ -198,8 +198,10 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
 	return rb;
 	return rb;
 }
 }
 
 
-static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
+static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
+					     bool is_base_size)
 {
 {
+
 	int size, a_psize;
 	int size, a_psize;
 	/* Look at the 8 bit LP value */
 	/* Look at the 8 bit LP value */
 	unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
 	unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
@@ -214,14 +216,27 @@ static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
 				continue;
 				continue;
 
 
 			a_psize = __hpte_actual_psize(lp, size);
 			a_psize = __hpte_actual_psize(lp, size);
-			if (a_psize != -1)
+			if (a_psize != -1) {
+				if (is_base_size)
+					return 1ul << mmu_psize_defs[size].shift;
 				return 1ul << mmu_psize_defs[a_psize].shift;
 				return 1ul << mmu_psize_defs[a_psize].shift;
+			}
 		}
 		}
 
 
 	}
 	}
 	return 0;
 	return 0;
 }
 }
 
 
+static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
+{
+	return __hpte_page_size(h, l, 0);
+}
+
+static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
+{
+	return __hpte_page_size(h, l, 1);
+}
+
 static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
 static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
 {
 {
 	return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
 	return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;

+ 1 - 1
arch/powerpc/kvm/book3s_64_mmu_hv.c

@@ -1562,7 +1562,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
 				goto out;
 				goto out;
 			}
 			}
 			if (!rma_setup && is_vrma_hpte(v)) {
 			if (!rma_setup && is_vrma_hpte(v)) {
-				unsigned long psize = hpte_page_size(v, r);
+				unsigned long psize = hpte_base_page_size(v, r);
 				unsigned long senc = slb_pgsize_encoding(psize);
 				unsigned long senc = slb_pgsize_encoding(psize);
 				unsigned long lpcr;
 				unsigned long lpcr;
 
 

+ 2 - 5
arch/powerpc/kvm/book3s_hv_rm_mmu.c

@@ -814,13 +814,10 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
 			r = hpte[i+1];
 			r = hpte[i+1];
 
 
 			/*
 			/*
-			 * Check the HPTE again, including large page size
-			 * Since we don't currently allow any MPSS (mixed
-			 * page-size segment) page sizes, it is sufficient
-			 * to check against the actual page size.
+			 * Check the HPTE again, including base page size
 			 */
 			 */
 			if ((v & valid) && (v & mask) == val &&
 			if ((v & valid) && (v & mask) == val &&
-			    hpte_page_size(v, r) == (1ul << pshift))
+			    hpte_base_page_size(v, r) == (1ul << pshift))
 				/* Return with the HPTE still locked */
 				/* Return with the HPTE still locked */
 				return (hash << 3) + (i >> 1);
 				return (hash << 3) + (i >> 1);