|
@@ -99,35 +99,9 @@ int hash__map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flag
|
|
|
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
|
|
|
|
-/*
|
|
|
|
- * This is called when relaxing access to a hugepage. It's also called in the page
|
|
|
|
- * fault path when we don't hit any of the major fault cases, ie, a minor
|
|
|
|
- * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
|
|
|
|
- * handled those two for us, we additionally deal with missing execute
|
|
|
|
- * permission here on some processors
|
|
|
|
- */
|
|
|
|
-int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
|
|
|
|
- pmd_t *pmdp, pmd_t entry, int dirty)
|
|
|
|
-{
|
|
|
|
- int changed;
|
|
|
|
-#ifdef CONFIG_DEBUG_VM
|
|
|
|
- WARN_ON(!pmd_trans_huge(*pmdp));
|
|
|
|
- assert_spin_locked(&vma->vm_mm->page_table_lock);
|
|
|
|
-#endif
|
|
|
|
- changed = !pmd_same(*(pmdp), entry);
|
|
|
|
- if (changed) {
|
|
|
|
- __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));
|
|
|
|
- /*
|
|
|
|
- * Since we are not supporting SW TLB systems, we don't
|
|
|
|
- * have any thing similar to flush_tlb_page_nohash()
|
|
|
|
- */
|
|
|
|
- }
|
|
|
|
- return changed;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
|
|
|
|
- pmd_t *pmdp, unsigned long clr,
|
|
|
|
- unsigned long set)
|
|
|
|
|
|
+unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
|
|
|
|
+ pmd_t *pmdp, unsigned long clr,
|
|
|
|
+ unsigned long set)
|
|
{
|
|
{
|
|
__be64 old_be, tmp;
|
|
__be64 old_be, tmp;
|
|
unsigned long old;
|
|
unsigned long old;
|
|
@@ -158,8 +132,8 @@ unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
|
|
return old;
|
|
return old;
|
|
}
|
|
}
|
|
|
|
|
|
-pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
|
|
|
|
- pmd_t *pmdp)
|
|
|
|
|
|
+pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
|
|
|
|
+ pmd_t *pmdp)
|
|
{
|
|
{
|
|
pmd_t pmd;
|
|
pmd_t pmd;
|
|
|
|
|
|
@@ -197,25 +171,12 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
|
|
return pmd;
|
|
return pmd;
|
|
}
|
|
}
|
|
|
|
|
|
-/*
|
|
|
|
- * We currently remove entries from the hashtable regardless of whether
|
|
|
|
- * the entry was young or dirty.
|
|
|
|
- *
|
|
|
|
- * We should be more intelligent about this but for the moment we override
|
|
|
|
- * these functions and force a tlb flush unconditionally
|
|
|
|
- */
|
|
|
|
-int pmdp_test_and_clear_young(struct vm_area_struct *vma,
|
|
|
|
- unsigned long address, pmd_t *pmdp)
|
|
|
|
-{
|
|
|
|
- return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* We want to put the pgtable in pmd and use pgtable for tracking
|
|
* We want to put the pgtable in pmd and use pgtable for tracking
|
|
* the base page size hptes
|
|
* the base page size hptes
|
|
*/
|
|
*/
|
|
-void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
|
|
|
|
- pgtable_t pgtable)
|
|
|
|
|
|
+void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
|
|
|
|
+ pgtable_t pgtable)
|
|
{
|
|
{
|
|
pgtable_t *pgtable_slot;
|
|
pgtable_t *pgtable_slot;
|
|
assert_spin_locked(&mm->page_table_lock);
|
|
assert_spin_locked(&mm->page_table_lock);
|
|
@@ -233,7 +194,7 @@ void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
|
|
smp_wmb();
|
|
smp_wmb();
|
|
}
|
|
}
|
|
|
|
|
|
-pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
|
|
|
|
|
|
+pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
|
|
{
|
|
{
|
|
pgtable_t pgtable;
|
|
pgtable_t pgtable;
|
|
pgtable_t *pgtable_slot;
|
|
pgtable_t *pgtable_slot;
|
|
@@ -253,8 +214,8 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
|
|
return pgtable;
|
|
return pgtable;
|
|
}
|
|
}
|
|
|
|
|
|
-void pmdp_huge_split_prepare(struct vm_area_struct *vma,
|
|
|
|
- unsigned long address, pmd_t *pmdp)
|
|
|
|
|
|
+void hash__pmdp_huge_split_prepare(struct vm_area_struct *vma,
|
|
|
|
+ unsigned long address, pmd_t *pmdp)
|
|
{
|
|
{
|
|
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
|
|
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
|
|
VM_BUG_ON(REGION_ID(address) != USER_REGION_ID);
|
|
VM_BUG_ON(REGION_ID(address) != USER_REGION_ID);
|
|
@@ -274,39 +235,6 @@ void pmdp_huge_split_prepare(struct vm_area_struct *vma,
|
|
pmd_hugepage_update(vma->vm_mm, address, pmdp, 0, _PAGE_PRIVILEGED);
|
|
pmd_hugepage_update(vma->vm_mm, address, pmdp, 0, _PAGE_PRIVILEGED);
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * set a new huge pmd. We should not be called for updating
|
|
|
|
- * an existing pmd entry. That should go via pmd_hugepage_update.
|
|
|
|
- */
|
|
|
|
-void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
|
|
|
- pmd_t *pmdp, pmd_t pmd)
|
|
|
|
-{
|
|
|
|
-#ifdef CONFIG_DEBUG_VM
|
|
|
|
- WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
|
|
|
|
- assert_spin_locked(&mm->page_table_lock);
|
|
|
|
- WARN_ON(!pmd_trans_huge(pmd));
|
|
|
|
-#endif
|
|
|
|
- trace_hugepage_set_pmd(addr, pmd_val(pmd));
|
|
|
|
- return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * We use this to invalidate a pmdp entry before switching from a
|
|
|
|
- * hugepte to regular pmd entry.
|
|
|
|
- */
|
|
|
|
-void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
|
|
|
|
- pmd_t *pmdp)
|
|
|
|
-{
|
|
|
|
- pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * This ensures that generic code that rely on IRQ disabling
|
|
|
|
- * to prevent a parallel THP split work as expected.
|
|
|
|
- */
|
|
|
|
- kick_all_cpus_sync();
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* A linux hugepage PMD was changed and the corresponding hash table entries
|
|
* A linux hugepage PMD was changed and the corresponding hash table entries
|
|
* neesd to be flushed.
|
|
* neesd to be flushed.
|
|
@@ -346,47 +274,8 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
|
|
return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags);
|
|
return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags);
|
|
}
|
|
}
|
|
|
|
|
|
-static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
|
|
|
|
-{
|
|
|
|
- return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
|
|
|
|
-{
|
|
|
|
- unsigned long pmdv;
|
|
|
|
-
|
|
|
|
- pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
|
|
|
|
- return pmd_set_protbits(__pmd(pmdv), pgprot);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
|
|
|
|
-{
|
|
|
|
- return pfn_pmd(page_to_pfn(page), pgprot);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
|
|
|
|
-{
|
|
|
|
- unsigned long pmdv;
|
|
|
|
-
|
|
|
|
- pmdv = pmd_val(pmd);
|
|
|
|
- pmdv &= _HPAGE_CHG_MASK;
|
|
|
|
- return pmd_set_protbits(__pmd(pmdv), newprot);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * This is called at the end of handling a user page fault, when the
|
|
|
|
- * fault has been handled by updating a HUGE PMD entry in the linux page tables.
|
|
|
|
- * We use it to preload an HPTE into the hash table corresponding to
|
|
|
|
- * the updated linux HUGE PMD entry.
|
|
|
|
- */
|
|
|
|
-void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
|
|
|
|
- pmd_t *pmd)
|
|
|
|
-{
|
|
|
|
- return;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
|
|
|
|
- unsigned long addr, pmd_t *pmdp)
|
|
|
|
|
|
+pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
|
|
|
|
+ unsigned long addr, pmd_t *pmdp)
|
|
{
|
|
{
|
|
pmd_t old_pmd;
|
|
pmd_t old_pmd;
|
|
pgtable_t pgtable;
|
|
pgtable_t pgtable;
|
|
@@ -421,7 +310,7 @@ pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
|
|
return old_pmd;
|
|
return old_pmd;
|
|
}
|
|
}
|
|
|
|
|
|
-int has_transparent_hugepage(void)
|
|
|
|
|
|
+int hash__has_transparent_hugepage(void)
|
|
{
|
|
{
|
|
|
|
|
|
if (!mmu_has_feature(MMU_FTR_16M_PAGE))
|
|
if (!mmu_has_feature(MMU_FTR_16M_PAGE))
|