|
@@ -109,7 +109,7 @@ int pgd_huge(pgd_t pgd)
|
|
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
|
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
{
|
|
/* Only called for hugetlbfs pages, hence can ignore THP */
|
|
/* Only called for hugetlbfs pages, hence can ignore THP */
|
|
- return find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
|
|
|
|
|
|
+ return __find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
|
|
}
|
|
}
|
|
|
|
|
|
static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
|
|
static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
|
|
@@ -682,28 +682,35 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
|
} while (addr = next, addr != end);
|
|
} while (addr = next, addr != end);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * We are holding mmap_sem, so a parallel huge page collapse cannot run.
|
|
|
|
+ * To prevent hugepage split, disable irq.
|
|
|
|
+ */
|
|
struct page *
|
|
struct page *
|
|
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
|
|
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
|
|
{
|
|
{
|
|
pte_t *ptep;
|
|
pte_t *ptep;
|
|
struct page *page;
|
|
struct page *page;
|
|
unsigned shift;
|
|
unsigned shift;
|
|
- unsigned long mask;
|
|
|
|
|
|
+ unsigned long mask, flags;
|
|
/*
|
|
/*
|
|
* Transparent hugepages are handled by generic code. We can skip them
|
|
* Transparent hugepages are handled by generic code. We can skip them
|
|
* here.
|
|
* here.
|
|
*/
|
|
*/
|
|
|
|
+ local_irq_save(flags);
|
|
ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
|
|
ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
|
|
|
|
|
|
/* Verify it is a huge page else bail. */
|
|
/* Verify it is a huge page else bail. */
|
|
- if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep))
|
|
|
|
|
|
+ if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep)) {
|
|
|
|
+ local_irq_restore(flags);
|
|
return ERR_PTR(-EINVAL);
|
|
return ERR_PTR(-EINVAL);
|
|
-
|
|
|
|
|
|
+ }
|
|
mask = (1UL << shift) - 1;
|
|
mask = (1UL << shift) - 1;
|
|
page = pte_page(*ptep);
|
|
page = pte_page(*ptep);
|
|
if (page)
|
|
if (page)
|
|
page += (address & mask) / PAGE_SIZE;
|
|
page += (address & mask) / PAGE_SIZE;
|
|
|
|
|
|
|
|
+ local_irq_restore(flags);
|
|
return page;
|
|
return page;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -950,9 +957,12 @@ void flush_dcache_icache_hugepage(struct page *page)
|
|
*
|
|
*
|
|
* So long as we atomically load page table pointers we are safe against teardown,
|
|
* So long as we atomically load page table pointers we are safe against teardown,
|
|
* we can follow the address down to the the page and take a ref on it.
|
|
* we can follow the address down to the the page and take a ref on it.
|
|
|
|
+ * This function need to be called with interrupts disabled. We use this variant
|
|
|
|
+ * when we have MSR[EE] = 0 but the paca->soft_enabled = 1
|
|
*/
|
|
*/
|
|
|
|
|
|
-pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
|
|
|
|
|
|
+pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
|
|
|
|
+ unsigned *shift)
|
|
{
|
|
{
|
|
pgd_t pgd, *pgdp;
|
|
pgd_t pgd, *pgdp;
|
|
pud_t pud, *pudp;
|
|
pud_t pud, *pudp;
|
|
@@ -1031,7 +1041,7 @@ out:
|
|
*shift = pdshift;
|
|
*shift = pdshift;
|
|
return ret_pte;
|
|
return ret_pte;
|
|
}
|
|
}
|
|
-EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte);
|
|
|
|
|
|
+EXPORT_SYMBOL_GPL(__find_linux_pte_or_hugepte);
|
|
|
|
|
|
int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
|
|
int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
|
|
unsigned long end, int write, struct page **pages, int *nr)
|
|
unsigned long end, int write, struct page **pages, int *nr)
|