|
@@ -603,55 +603,6 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma,
|
|
|
return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * We mark the pmd splitting and invalidate all the hpte
|
|
|
- * entries for this hugepage.
|
|
|
- */
|
|
|
-void pmdp_splitting_flush(struct vm_area_struct *vma,
|
|
|
- unsigned long address, pmd_t *pmdp)
|
|
|
-{
|
|
|
- unsigned long old, tmp;
|
|
|
-
|
|
|
- VM_BUG_ON(address & ~HPAGE_PMD_MASK);
|
|
|
-
|
|
|
-#ifdef CONFIG_DEBUG_VM
|
|
|
- WARN_ON(!pmd_trans_huge(*pmdp));
|
|
|
- assert_spin_locked(&vma->vm_mm->page_table_lock);
|
|
|
-#endif
|
|
|
-
|
|
|
-#ifdef PTE_ATOMIC_UPDATES
|
|
|
-
|
|
|
- __asm__ __volatile__(
|
|
|
- "1: ldarx %0,0,%3\n\
|
|
|
- andi. %1,%0,%6\n\
|
|
|
- bne- 1b \n\
|
|
|
- oris %1,%0,%4@h \n\
|
|
|
- stdcx. %1,0,%3 \n\
|
|
|
- bne- 1b"
|
|
|
- : "=&r" (old), "=&r" (tmp), "=m" (*pmdp)
|
|
|
- : "r" (pmdp), "i" (_PAGE_SPLITTING), "m" (*pmdp), "i" (_PAGE_BUSY)
|
|
|
- : "cc" );
|
|
|
-#else
|
|
|
- old = pmd_val(*pmdp);
|
|
|
- *pmdp = __pmd(old | _PAGE_SPLITTING);
|
|
|
-#endif
|
|
|
- /*
|
|
|
- * If we didn't had the splitting flag set, go and flush the
|
|
|
- * HPTE entries.
|
|
|
- */
|
|
|
- trace_hugepage_splitting(address, old);
|
|
|
- if (!(old & _PAGE_SPLITTING)) {
|
|
|
- /* We need to flush the hpte */
|
|
|
- if (old & _PAGE_HASHPTE)
|
|
|
- hpte_do_hugepage_flush(vma->vm_mm, address, pmdp, old);
|
|
|
- }
|
|
|
- /*
|
|
|
- * This ensures that generic code that rely on IRQ disabling
|
|
|
- * to prevent a parallel THP split work as expected.
|
|
|
- */
|
|
|
- kick_all_cpus_sync();
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* We want to put the pgtable in pmd and use pgtable for tracking
|
|
|
* the base page size hptes
|