|
@@ -140,9 +140,11 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
|
|
pgprot_t newprot, int dirty_accountable, int prot_numa)
|
|
pgprot_t newprot, int dirty_accountable, int prot_numa)
|
|
{
|
|
{
|
|
pmd_t *pmd;
|
|
pmd_t *pmd;
|
|
|
|
+ struct mm_struct *mm = vma->vm_mm;
|
|
unsigned long next;
|
|
unsigned long next;
|
|
unsigned long pages = 0;
|
|
unsigned long pages = 0;
|
|
unsigned long nr_huge_updates = 0;
|
|
unsigned long nr_huge_updates = 0;
|
|
|
|
+ unsigned long mni_start = 0;
|
|
|
|
|
|
pmd = pmd_offset(pud, addr);
|
|
pmd = pmd_offset(pud, addr);
|
|
do {
|
|
do {
|
|
@@ -151,6 +153,13 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
|
|
next = pmd_addr_end(addr, end);
|
|
next = pmd_addr_end(addr, end);
|
|
if (!pmd_trans_huge(*pmd) && pmd_none_or_clear_bad(pmd))
|
|
if (!pmd_trans_huge(*pmd) && pmd_none_or_clear_bad(pmd))
|
|
continue;
|
|
continue;
|
|
|
|
+
|
|
|
|
+ /* invoke the mmu notifier if the pmd is populated */
|
|
|
|
+ if (!mni_start) {
|
|
|
|
+ mni_start = addr;
|
|
|
|
+ mmu_notifier_invalidate_range_start(mm, mni_start, end);
|
|
|
|
+ }
|
|
|
|
+
|
|
if (pmd_trans_huge(*pmd)) {
|
|
if (pmd_trans_huge(*pmd)) {
|
|
if (next - addr != HPAGE_PMD_SIZE)
|
|
if (next - addr != HPAGE_PMD_SIZE)
|
|
split_huge_page_pmd(vma, addr, pmd);
|
|
split_huge_page_pmd(vma, addr, pmd);
|
|
@@ -175,6 +184,9 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
|
|
pages += this_pages;
|
|
pages += this_pages;
|
|
} while (pmd++, addr = next, addr != end);
|
|
} while (pmd++, addr = next, addr != end);
|
|
|
|
|
|
|
|
+ if (mni_start)
|
|
|
|
+ mmu_notifier_invalidate_range_end(mm, mni_start, end);
|
|
|
|
+
|
|
if (nr_huge_updates)
|
|
if (nr_huge_updates)
|
|
count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
|
|
count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
|
|
return pages;
|
|
return pages;
|
|
@@ -234,15 +246,12 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
|
|
unsigned long end, pgprot_t newprot,
|
|
unsigned long end, pgprot_t newprot,
|
|
int dirty_accountable, int prot_numa)
|
|
int dirty_accountable, int prot_numa)
|
|
{
|
|
{
|
|
- struct mm_struct *mm = vma->vm_mm;
|
|
|
|
unsigned long pages;
|
|
unsigned long pages;
|
|
|
|
|
|
- mmu_notifier_invalidate_range_start(mm, start, end);
|
|
|
|
if (is_vm_hugetlb_page(vma))
|
|
if (is_vm_hugetlb_page(vma))
|
|
pages = hugetlb_change_protection(vma, start, end, newprot);
|
|
pages = hugetlb_change_protection(vma, start, end, newprot);
|
|
else
|
|
else
|
|
pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
|
|
pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
|
|
- mmu_notifier_invalidate_range_end(mm, start, end);
|
|
|
|
|
|
|
|
return pages;
|
|
return pages;
|
|
}
|
|
}
|