|
@@ -18,6 +18,57 @@
|
|
|
#include <linux/mm.h>
|
|
|
#include <asm/machdep.h>
|
|
|
|
|
|
+static void invalidate_old_hpte(unsigned long vsid, unsigned long addr,
|
|
|
+ pmd_t *pmdp, unsigned int psize, int ssize)
|
|
|
+{
|
|
|
+ int i, max_hpte_count, valid;
|
|
|
+ unsigned long s_addr;
|
|
|
+ unsigned char *hpte_slot_array;
|
|
|
+ unsigned long hidx, shift, vpn, hash, slot;
|
|
|
+
|
|
|
+ s_addr = addr & HPAGE_PMD_MASK;
|
|
|
+ hpte_slot_array = get_hpte_slot_array(pmdp);
|
|
|
+ /*
|
|
|
+ * IF we try to do a HUGE PTE update after a withdraw is done.
|
|
|
+ * we will find the below NULL. This happens when we do
|
|
|
+ * split_huge_page_pmd
|
|
|
+ */
|
|
|
+ if (!hpte_slot_array)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (ppc_md.hugepage_invalidate)
|
|
|
+ return ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
|
|
|
+ psize, ssize);
|
|
|
+ /*
|
|
|
+ * No bluk hpte removal support, invalidate each entry
|
|
|
+ */
|
|
|
+ shift = mmu_psize_defs[psize].shift;
|
|
|
+ max_hpte_count = HPAGE_PMD_SIZE >> shift;
|
|
|
+ for (i = 0; i < max_hpte_count; i++) {
|
|
|
+ /*
|
|
|
+ * 8 bits per each hpte entries
|
|
|
+ * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
|
|
|
+ */
|
|
|
+ valid = hpte_valid(hpte_slot_array, i);
|
|
|
+ if (!valid)
|
|
|
+ continue;
|
|
|
+ hidx = hpte_hash_index(hpte_slot_array, i);
|
|
|
+
|
|
|
+ /* get the vpn */
|
|
|
+ addr = s_addr + (i * (1ul << shift));
|
|
|
+ vpn = hpt_vpn(addr, vsid, ssize);
|
|
|
+ hash = hpt_hash(vpn, shift, ssize);
|
|
|
+ if (hidx & _PTEIDX_SECONDARY)
|
|
|
+ hash = ~hash;
|
|
|
+
|
|
|
+ slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
|
|
|
+ slot += hidx & _PTEIDX_GROUP_IX;
|
|
|
+ ppc_md.hpte_invalidate(slot, vpn, psize,
|
|
|
+ MMU_PAGE_16M, ssize, 0);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
|
|
|
pmd_t *pmdp, unsigned long trap, int local, int ssize,
|
|
|
unsigned int psize)
|
|
@@ -33,7 +84,9 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
|
|
|
* atomically mark the linux large page PMD busy and dirty
|
|
|
*/
|
|
|
do {
|
|
|
- old_pmd = pmd_val(*pmdp);
|
|
|
+ pmd_t pmd = ACCESS_ONCE(*pmdp);
|
|
|
+
|
|
|
+ old_pmd = pmd_val(pmd);
|
|
|
/* If PMD busy, retry the access */
|
|
|
if (unlikely(old_pmd & _PAGE_BUSY))
|
|
|
return 0;
|
|
@@ -85,6 +138,15 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
|
|
|
vpn = hpt_vpn(ea, vsid, ssize);
|
|
|
hash = hpt_hash(vpn, shift, ssize);
|
|
|
hpte_slot_array = get_hpte_slot_array(pmdp);
|
|
|
+ if (psize == MMU_PAGE_4K) {
|
|
|
+ /*
|
|
|
+ * invalidate the old hpte entry if we have that mapped via 64K
|
|
|
+ * base page size. This is because demote_segment won't flush
|
|
|
+ * hash page table entries.
|
|
|
+ */
|
|
|
+ if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO))
|
|
|
+ invalidate_old_hpte(vsid, ea, pmdp, MMU_PAGE_64K, ssize);
|
|
|
+ }
|
|
|
|
|
|
valid = hpte_valid(hpte_slot_array, index);
|
|
|
if (valid) {
|
|
@@ -107,11 +169,8 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
|
|
|
* safely update this here.
|
|
|
*/
|
|
|
valid = 0;
|
|
|
- new_pmd &= ~_PAGE_HPTEFLAGS;
|
|
|
hpte_slot_array[index] = 0;
|
|
|
- } else
|
|
|
- /* clear the busy bits and set the hash pte bits */
|
|
|
- new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
if (!valid) {
|
|
@@ -119,11 +178,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
|
|
|
|
|
|
/* insert new entry */
|
|
|
pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
|
|
|
-repeat:
|
|
|
- hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
|
|
|
-
|
|
|
- /* clear the busy bits and set the hash pte bits */
|
|
|
- new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
|
|
|
+ new_pmd |= _PAGE_HASHPTE;
|
|
|
|
|
|
/* Add in WIMG bits */
|
|
|
rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
|
|
@@ -132,6 +187,8 @@ repeat:
|
|
|
* enable the memory coherence always
|
|
|
*/
|
|
|
rflags |= HPTE_R_M;
|
|
|
+repeat:
|
|
|
+ hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
|
|
|
|
|
|
/* Insert into the hash table, primary slot */
|
|
|
slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
|
|
@@ -172,8 +229,17 @@ repeat:
|
|
|
mark_hpte_slot_valid(hpte_slot_array, index, slot);
|
|
|
}
|
|
|
/*
|
|
|
- * No need to use ldarx/stdcx here
|
|
|
+ * Mark the pte with _PAGE_COMBO, if we are trying to hash it with
|
|
|
+ * base page size 4k.
|
|
|
+ */
|
|
|
+ if (psize == MMU_PAGE_4K)
|
|
|
+ new_pmd |= _PAGE_COMBO;
|
|
|
+ /*
|
|
|
+ * The hpte valid is stored in the pgtable whose address is in the
|
|
|
+ * second half of the PMD. Order this against clearing of the busy bit in
|
|
|
+ * huge pmd.
|
|
|
*/
|
|
|
+ smp_wmb();
|
|
|
*pmdp = __pmd(new_pmd & ~_PAGE_BUSY);
|
|
|
return 0;
|
|
|
}
|