|
@@ -3246,7 +3246,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
|
|
|
|
|
|
for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
|
|
|
spinlock_t *src_ptl, *dst_ptl;
|
|
|
- src_pte = huge_pte_offset(src, addr);
|
|
|
+ src_pte = huge_pte_offset(src, addr, sz);
|
|
|
if (!src_pte)
|
|
|
continue;
|
|
|
dst_pte = huge_pte_alloc(dst, addr, sz);
|
|
@@ -3330,7 +3330,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
|
|
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
|
|
|
address = start;
|
|
|
for (; address < end; address += sz) {
|
|
|
- ptep = huge_pte_offset(mm, address);
|
|
|
+ ptep = huge_pte_offset(mm, address, sz);
|
|
|
if (!ptep)
|
|
|
continue;
|
|
|
|
|
@@ -3548,7 +3548,8 @@ retry_avoidcopy:
|
|
|
unmap_ref_private(mm, vma, old_page, address);
|
|
|
BUG_ON(huge_pte_none(pte));
|
|
|
spin_lock(ptl);
|
|
|
- ptep = huge_pte_offset(mm, address & huge_page_mask(h));
|
|
|
+ ptep = huge_pte_offset(mm, address & huge_page_mask(h),
|
|
|
+ huge_page_size(h));
|
|
|
if (likely(ptep &&
|
|
|
pte_same(huge_ptep_get(ptep), pte)))
|
|
|
goto retry_avoidcopy;
|
|
@@ -3587,7 +3588,8 @@ retry_avoidcopy:
|
|
|
* before the page tables are altered
|
|
|
*/
|
|
|
spin_lock(ptl);
|
|
|
- ptep = huge_pte_offset(mm, address & huge_page_mask(h));
|
|
|
+ ptep = huge_pte_offset(mm, address & huge_page_mask(h),
|
|
|
+ huge_page_size(h));
|
|
|
if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
|
|
|
ClearPagePrivate(new_page);
|
|
|
|
|
@@ -3874,7 +3876,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
|
|
|
address &= huge_page_mask(h);
|
|
|
|
|
|
- ptep = huge_pte_offset(mm, address);
|
|
|
+ ptep = huge_pte_offset(mm, address, huge_page_size(h));
|
|
|
if (ptep) {
|
|
|
entry = huge_ptep_get(ptep);
|
|
|
if (unlikely(is_hugetlb_entry_migration(entry))) {
|
|
@@ -4131,7 +4133,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
*
|
|
|
* Note that page table lock is not held when pte is null.
|
|
|
*/
|
|
|
- pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
|
|
|
+ pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
|
|
|
+ huge_page_size(h));
|
|
|
if (pte)
|
|
|
ptl = huge_pte_lock(h, mm, pte);
|
|
|
absent = !pte || huge_pte_none(huge_ptep_get(pte));
|
|
@@ -4270,7 +4273,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
|
|
i_mmap_lock_write(vma->vm_file->f_mapping);
|
|
|
for (; address < end; address += huge_page_size(h)) {
|
|
|
spinlock_t *ptl;
|
|
|
- ptep = huge_pte_offset(mm, address);
|
|
|
+ ptep = huge_pte_offset(mm, address, huge_page_size(h));
|
|
|
if (!ptep)
|
|
|
continue;
|
|
|
ptl = huge_pte_lock(h, mm, ptep);
|
|
@@ -4534,7 +4537,8 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
|
|
|
|
|
|
saddr = page_table_shareable(svma, vma, addr, idx);
|
|
|
if (saddr) {
|
|
|
- spte = huge_pte_offset(svma->vm_mm, saddr);
|
|
|
+ spte = huge_pte_offset(svma->vm_mm, saddr,
|
|
|
+ vma_mmu_pagesize(svma));
|
|
|
if (spte) {
|
|
|
get_page(virt_to_page(spte));
|
|
|
break;
|
|
@@ -4630,7 +4634,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
|
|
|
return pte;
|
|
|
}
|
|
|
|
|
|
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
|
|
+pte_t *huge_pte_offset(struct mm_struct *mm,
|
|
|
+ unsigned long addr, unsigned long sz)
|
|
|
{
|
|
|
pgd_t *pgd;
|
|
|
p4d_t *p4d;
|