|
@@ -23,7 +23,7 @@ bool __rpte_sub_valid(real_pte_t rpte, unsigned long index)
|
|
|
unsigned long g_idx;
|
|
|
unsigned long ptev = pte_val(rpte.pte);
|
|
|
|
|
|
- g_idx = (ptev & _PAGE_COMBO_VALID) >> _PAGE_F_GIX_SHIFT;
|
|
|
+ g_idx = (ptev & H_PAGE_COMBO_VALID) >> H_PAGE_F_GIX_SHIFT;
|
|
|
index = index >> 2;
|
|
|
if (g_idx & (0x1 << index))
|
|
|
return true;
|
|
@@ -37,12 +37,12 @@ static unsigned long mark_subptegroup_valid(unsigned long ptev, unsigned long in
|
|
|
{
|
|
|
unsigned long g_idx;
|
|
|
|
|
|
- if (!(ptev & _PAGE_COMBO))
|
|
|
+ if (!(ptev & H_PAGE_COMBO))
|
|
|
return ptev;
|
|
|
index = index >> 2;
|
|
|
g_idx = 0x1 << index;
|
|
|
|
|
|
- return ptev | (g_idx << _PAGE_F_GIX_SHIFT);
|
|
|
+ return ptev | (g_idx << H_PAGE_F_GIX_SHIFT);
|
|
|
}
|
|
|
|
|
|
int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
|
|
@@ -66,7 +66,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
|
|
|
|
|
|
old_pte = pte_val(pte);
|
|
|
/* If PTE busy, retry the access */
|
|
|
- if (unlikely(old_pte & _PAGE_BUSY))
|
|
|
+ if (unlikely(old_pte & H_PAGE_BUSY))
|
|
|
return 0;
|
|
|
/* If PTE permissions don't match, take page fault */
|
|
|
if (unlikely(!check_pte_access(access, old_pte)))
|
|
@@ -74,9 +74,9 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
|
|
|
/*
|
|
|
* Try to lock the PTE, add ACCESSED and DIRTY if it was
|
|
|
* a write access. Since this is 4K insert of 64K page size
|
|
|
- * also add _PAGE_COMBO
|
|
|
+ * also add H_PAGE_COMBO
|
|
|
*/
|
|
|
- new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED | _PAGE_COMBO;
|
|
|
+ new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED | H_PAGE_COMBO;
|
|
|
if (access & _PAGE_WRITE)
|
|
|
new_pte |= _PAGE_DIRTY;
|
|
|
} while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
|
|
@@ -103,21 +103,21 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
|
|
|
/*
|
|
|
*None of the sub 4k page is hashed
|
|
|
*/
|
|
|
- if (!(old_pte & _PAGE_HASHPTE))
|
|
|
+ if (!(old_pte & H_PAGE_HASHPTE))
|
|
|
goto htab_insert_hpte;
|
|
|
/*
|
|
|
* Check if the pte was already inserted into the hash table
|
|
|
* as a 64k HW page, and invalidate the 64k HPTE if so.
|
|
|
*/
|
|
|
- if (!(old_pte & _PAGE_COMBO)) {
|
|
|
+ if (!(old_pte & H_PAGE_COMBO)) {
|
|
|
flush_hash_page(vpn, rpte, MMU_PAGE_64K, ssize, flags);
|
|
|
/*
|
|
|
* clear the old slot details from the old and new pte.
|
|
|
* On hash insert failure we use old pte value and we don't
|
|
|
* want slot information there if we have a insert failure.
|
|
|
*/
|
|
|
- old_pte &= ~(_PAGE_HASHPTE | _PAGE_F_GIX | _PAGE_F_SECOND);
|
|
|
- new_pte &= ~(_PAGE_HASHPTE | _PAGE_F_GIX | _PAGE_F_SECOND);
|
|
|
+ old_pte &= ~(H_PAGE_HASHPTE | H_PAGE_F_GIX | H_PAGE_F_SECOND);
|
|
|
+ new_pte &= ~(H_PAGE_HASHPTE | H_PAGE_F_GIX | H_PAGE_F_SECOND);
|
|
|
goto htab_insert_hpte;
|
|
|
}
|
|
|
/*
|
|
@@ -143,15 +143,15 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
|
|
|
if (ret == -1)
|
|
|
goto htab_insert_hpte;
|
|
|
|
|
|
- *ptep = __pte(new_pte & ~_PAGE_BUSY);
|
|
|
+ *ptep = __pte(new_pte & ~H_PAGE_BUSY);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
htab_insert_hpte:
|
|
|
/*
|
|
|
- * handle _PAGE_4K_PFN case
|
|
|
+ * handle H_PAGE_4K_PFN case
|
|
|
*/
|
|
|
- if (old_pte & _PAGE_4K_PFN) {
|
|
|
+ if (old_pte & H_PAGE_4K_PFN) {
|
|
|
/*
|
|
|
* All the sub 4k page have the same
|
|
|
* physical address.
|
|
@@ -199,20 +199,20 @@ repeat:
|
|
|
}
|
|
|
/*
|
|
|
* Insert slot number & secondary bit in PTE second half,
|
|
|
- * clear _PAGE_BUSY and set appropriate HPTE slot bit
|
|
|
- * Since we have _PAGE_BUSY set on ptep, we can be sure
|
|
|
+ * clear H_PAGE_BUSY and set appropriate HPTE slot bit
|
|
|
+ * Since we have H_PAGE_BUSY set on ptep, we can be sure
|
|
|
* nobody is undating hidx.
|
|
|
*/
|
|
|
hidxp = (unsigned long *)(ptep + PTRS_PER_PTE);
|
|
|
rpte.hidx &= ~(0xfUL << (subpg_index << 2));
|
|
|
*hidxp = rpte.hidx | (slot << (subpg_index << 2));
|
|
|
new_pte = mark_subptegroup_valid(new_pte, subpg_index);
|
|
|
- new_pte |= _PAGE_HASHPTE;
|
|
|
+ new_pte |= H_PAGE_HASHPTE;
|
|
|
/*
|
|
|
* check __real_pte for details on matching smp_rmb()
|
|
|
*/
|
|
|
smp_wmb();
|
|
|
- *ptep = __pte(new_pte & ~_PAGE_BUSY);
|
|
|
+ *ptep = __pte(new_pte & ~H_PAGE_BUSY);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -234,7 +234,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
|
|
|
|
|
|
old_pte = pte_val(pte);
|
|
|
/* If PTE busy, retry the access */
|
|
|
- if (unlikely(old_pte & _PAGE_BUSY))
|
|
|
+ if (unlikely(old_pte & H_PAGE_BUSY))
|
|
|
return 0;
|
|
|
/* If PTE permissions don't match, take page fault */
|
|
|
if (unlikely(!check_pte_access(access, old_pte)))
|
|
@@ -250,7 +250,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
|
|
|
* Try to lock the PTE, add ACCESSED and DIRTY if it was
|
|
|
* a write access.
|
|
|
*/
|
|
|
- new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
|
|
|
+ new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED;
|
|
|
if (access & _PAGE_WRITE)
|
|
|
new_pte |= _PAGE_DIRTY;
|
|
|
} while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
|
|
@@ -262,22 +262,22 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
|
|
|
rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
|
|
|
|
|
|
vpn = hpt_vpn(ea, vsid, ssize);
|
|
|
- if (unlikely(old_pte & _PAGE_HASHPTE)) {
|
|
|
+ if (unlikely(old_pte & H_PAGE_HASHPTE)) {
|
|
|
/*
|
|
|
* There MIGHT be an HPTE for this pte
|
|
|
*/
|
|
|
hash = hpt_hash(vpn, shift, ssize);
|
|
|
- if (old_pte & _PAGE_F_SECOND)
|
|
|
+ if (old_pte & H_PAGE_F_SECOND)
|
|
|
hash = ~hash;
|
|
|
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
|
|
|
- slot += (old_pte & _PAGE_F_GIX) >> _PAGE_F_GIX_SHIFT;
|
|
|
+ slot += (old_pte & H_PAGE_F_GIX) >> H_PAGE_F_GIX_SHIFT;
|
|
|
|
|
|
if (ppc_md.hpte_updatepp(slot, rflags, vpn, MMU_PAGE_64K,
|
|
|
MMU_PAGE_64K, ssize, flags) == -1)
|
|
|
old_pte &= ~_PAGE_HPTEFLAGS;
|
|
|
}
|
|
|
|
|
|
- if (likely(!(old_pte & _PAGE_HASHPTE))) {
|
|
|
+ if (likely(!(old_pte & H_PAGE_HASHPTE))) {
|
|
|
|
|
|
pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
|
|
|
hash = hpt_hash(vpn, shift, ssize);
|
|
@@ -317,9 +317,10 @@ repeat:
|
|
|
MMU_PAGE_64K, MMU_PAGE_64K, old_pte);
|
|
|
return -1;
|
|
|
}
|
|
|
- new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
|
|
|
- new_pte |= (slot << _PAGE_F_GIX_SHIFT) & (_PAGE_F_SECOND | _PAGE_F_GIX);
|
|
|
+ new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE;
|
|
|
+ new_pte |= (slot << H_PAGE_F_GIX_SHIFT) &
|
|
|
+ (H_PAGE_F_SECOND | H_PAGE_F_GIX);
|
|
|
}
|
|
|
- *ptep = __pte(new_pte & ~_PAGE_BUSY);
|
|
|
+ *ptep = __pte(new_pte & ~H_PAGE_BUSY);
|
|
|
return 0;
|
|
|
}
|