|
@@ -15,34 +15,22 @@
|
|
|
#include <linux/mm.h>
|
|
|
#include <asm/machdep.h>
|
|
|
#include <asm/mmu.h>
|
|
|
+
|
|
|
/*
|
|
|
- * index from 0 - 15
|
|
|
+ * Return true, if the entry has a slot value which
|
|
|
+ * the software considers as invalid.
|
|
|
*/
|
|
|
-bool __rpte_sub_valid(real_pte_t rpte, unsigned long index)
|
|
|
+static inline bool hpte_soft_invalid(unsigned long hidx)
|
|
|
{
|
|
|
- unsigned long g_idx;
|
|
|
- unsigned long ptev = pte_val(rpte.pte);
|
|
|
-
|
|
|
- g_idx = (ptev & H_PAGE_COMBO_VALID) >> H_PAGE_F_GIX_SHIFT;
|
|
|
- index = index >> 2;
|
|
|
- if (g_idx & (0x1 << index))
|
|
|
- return true;
|
|
|
- else
|
|
|
- return false;
|
|
|
+ return ((hidx & 0xfUL) == 0xfUL);
|
|
|
}
|
|
|
+
|
|
|
/*
|
|
|
* index from 0 - 15
|
|
|
*/
|
|
|
-static unsigned long mark_subptegroup_valid(unsigned long ptev, unsigned long index)
|
|
|
+bool __rpte_sub_valid(real_pte_t rpte, unsigned long index)
|
|
|
{
|
|
|
- unsigned long g_idx;
|
|
|
-
|
|
|
- if (!(ptev & H_PAGE_COMBO))
|
|
|
- return ptev;
|
|
|
- index = index >> 2;
|
|
|
- g_idx = 0x1 << index;
|
|
|
-
|
|
|
- return ptev | (g_idx << H_PAGE_F_GIX_SHIFT);
|
|
|
+ return !(hpte_soft_invalid(__rpte_to_hidx(rpte, index)));
|
|
|
}
|
|
|
|
|
|
int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
|
|
@@ -50,12 +38,11 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
|
|
|
int ssize, int subpg_prot)
|
|
|
{
|
|
|
real_pte_t rpte;
|
|
|
- unsigned long *hidxp;
|
|
|
unsigned long hpte_group;
|
|
|
unsigned int subpg_index;
|
|
|
- unsigned long rflags, pa, hidx;
|
|
|
+ unsigned long rflags, pa;
|
|
|
unsigned long old_pte, new_pte, subpg_pte;
|
|
|
- unsigned long vpn, hash, slot;
|
|
|
+ unsigned long vpn, hash, slot, gslot;
|
|
|
unsigned long shift = mmu_psize_defs[MMU_PAGE_4K].shift;
|
|
|
|
|
|
/*
|
|
@@ -126,18 +113,14 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
|
|
|
if (__rpte_sub_valid(rpte, subpg_index)) {
|
|
|
int ret;
|
|
|
|
|
|
- hash = hpt_hash(vpn, shift, ssize);
|
|
|
- hidx = __rpte_to_hidx(rpte, subpg_index);
|
|
|
- if (hidx & _PTEIDX_SECONDARY)
|
|
|
- hash = ~hash;
|
|
|
- slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
|
|
|
- slot += hidx & _PTEIDX_GROUP_IX;
|
|
|
-
|
|
|
- ret = mmu_hash_ops.hpte_updatepp(slot, rflags, vpn,
|
|
|
+ gslot = pte_get_hash_gslot(vpn, shift, ssize, rpte,
|
|
|
+ subpg_index);
|
|
|
+ ret = mmu_hash_ops.hpte_updatepp(gslot, rflags, vpn,
|
|
|
MMU_PAGE_4K, MMU_PAGE_4K,
|
|
|
ssize, flags);
|
|
|
+
|
|
|
/*
|
|
|
- *if we failed because typically the HPTE wasn't really here
|
|
|
+ * If we failed because typically the HPTE wasn't really here
|
|
|
* we try an insertion.
|
|
|
*/
|
|
|
if (ret == -1)
|
|
@@ -148,6 +131,14 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
|
|
|
}
|
|
|
|
|
|
htab_insert_hpte:
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Initialize all hidx entries to invalid value, the first time
|
|
|
+ * the PTE is about to allocate a 4K HPTE.
|
|
|
+ */
|
|
|
+ if (!(old_pte & H_PAGE_COMBO))
|
|
|
+ rpte.hidx = INVALID_RPTE_HIDX;
|
|
|
+
|
|
|
/*
|
|
|
* handle H_PAGE_4K_PFN case
|
|
|
*/
|
|
@@ -172,15 +163,39 @@ repeat:
|
|
|
* Primary is full, try the secondary
|
|
|
*/
|
|
|
if (unlikely(slot == -1)) {
|
|
|
+ bool soft_invalid;
|
|
|
+
|
|
|
hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
|
|
|
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
|
|
|
rflags, HPTE_V_SECONDARY,
|
|
|
MMU_PAGE_4K, MMU_PAGE_4K,
|
|
|
ssize);
|
|
|
- if (slot == -1) {
|
|
|
- if (mftb() & 0x1)
|
|
|
+
|
|
|
+ soft_invalid = hpte_soft_invalid(slot);
|
|
|
+ if (unlikely(soft_invalid)) {
|
|
|
+ /*
|
|
|
+ * We got a valid slot from a hardware point of view.
|
|
|
+ * but we cannot use it, because we use this special
|
|
|
+ * value; as defined by hpte_soft_invalid(), to track
|
|
|
+ * invalid slots. We cannot use it. So invalidate it.
|
|
|
+ */
|
|
|
+ gslot = slot & _PTEIDX_GROUP_IX;
|
|
|
+ mmu_hash_ops.hpte_invalidate(hpte_group + gslot, vpn,
|
|
|
+ MMU_PAGE_4K, MMU_PAGE_4K,
|
|
|
+ ssize, 0);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (unlikely(slot == -1 || soft_invalid)) {
|
|
|
+ /*
|
|
|
+ * For soft invalid slot, let's ensure that we release a
|
|
|
+ * slot from the primary, with the hope that we will
|
|
|
+ * acquire that slot next time we try. This will ensure
|
|
|
+ * that we do not get the same soft-invalid slot.
|
|
|
+ */
|
|
|
+ if (soft_invalid || (mftb() & 0x1))
|
|
|
hpte_group = ((hash & htab_hash_mask) *
|
|
|
HPTES_PER_GROUP) & ~0x7UL;
|
|
|
+
|
|
|
mmu_hash_ops.hpte_remove(hpte_group);
|
|
|
/*
|
|
|
* FIXME!! Should be try the group from which we removed ?
|
|
@@ -198,21 +213,10 @@ repeat:
|
|
|
MMU_PAGE_4K, MMU_PAGE_4K, old_pte);
|
|
|
return -1;
|
|
|
}
|
|
|
- /*
|
|
|
- * Insert slot number & secondary bit in PTE second half,
|
|
|
- * clear H_PAGE_BUSY and set appropriate HPTE slot bit
|
|
|
- * Since we have H_PAGE_BUSY set on ptep, we can be sure
|
|
|
- * nobody is undating hidx.
|
|
|
- */
|
|
|
- hidxp = (unsigned long *)(ptep + PTRS_PER_PTE);
|
|
|
- rpte.hidx &= ~(0xfUL << (subpg_index << 2));
|
|
|
- *hidxp = rpte.hidx | (slot << (subpg_index << 2));
|
|
|
- new_pte = mark_subptegroup_valid(new_pte, subpg_index);
|
|
|
- new_pte |= H_PAGE_HASHPTE;
|
|
|
- /*
|
|
|
- * check __real_pte for details on matching smp_rmb()
|
|
|
- */
|
|
|
- smp_wmb();
|
|
|
+
|
|
|
+ new_pte |= pte_set_hidx(ptep, rpte, subpg_index, slot);
|
|
|
+ new_pte |= H_PAGE_HASHPTE;
|
|
|
+
|
|
|
*ptep = __pte(new_pte & ~H_PAGE_BUSY);
|
|
|
return 0;
|
|
|
}
|