|
@@ -546,6 +546,24 @@ static int pSeries_lpar_hpte_removebolted(unsigned long ea,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+
|
|
|
+static inline unsigned long compute_slot(real_pte_t pte,
|
|
|
+ unsigned long vpn,
|
|
|
+ unsigned long index,
|
|
|
+ unsigned long shift,
|
|
|
+ int ssize)
|
|
|
+{
|
|
|
+ unsigned long slot, hash, hidx;
|
|
|
+
|
|
|
+ hash = hpt_hash(vpn, shift, ssize);
|
|
|
+ hidx = __rpte_to_hidx(pte, index);
|
|
|
+ if (hidx & _PTEIDX_SECONDARY)
|
|
|
+ hash = ~hash;
|
|
|
+ slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
|
|
|
+ slot += hidx & _PTEIDX_GROUP_IX;
|
|
|
+ return slot;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
|
|
|
* lock.
|
|
@@ -558,7 +576,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
|
|
|
struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
|
|
|
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
|
|
|
unsigned long param[PLPAR_HCALL9_BUFSIZE];
|
|
|
- unsigned long hash, index, shift, hidx, slot;
|
|
|
+ unsigned long index, shift, slot;
|
|
|
real_pte_t pte;
|
|
|
int psize, ssize;
|
|
|
|
|
@@ -572,12 +590,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
|
|
|
vpn = batch->vpn[i];
|
|
|
pte = batch->pte[i];
|
|
|
pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
|
|
|
- hash = hpt_hash(vpn, shift, ssize);
|
|
|
- hidx = __rpte_to_hidx(pte, index);
|
|
|
- if (hidx & _PTEIDX_SECONDARY)
|
|
|
- hash = ~hash;
|
|
|
- slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
|
|
|
- slot += hidx & _PTEIDX_GROUP_IX;
|
|
|
+ slot = compute_slot(pte, vpn, index, shift, ssize);
|
|
|
if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
|
|
|
/*
|
|
|
* lpar doesn't use the passed actual page size
|