|
@@ -1886,6 +1886,9 @@ int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
|
|
|
if (addr < vma->vm_start || addr >= vma->vm_end)
|
|
|
return -EFAULT;
|
|
|
|
|
|
+ if (!pfn_modify_allowed(pfn, pgprot))
|
|
|
+ return -EACCES;
|
|
|
+
|
|
|
track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
|
|
|
|
|
|
ret = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
|
|
@@ -1921,6 +1924,9 @@ static int __vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
|
|
|
|
|
|
track_pfn_insert(vma, &pgprot, pfn);
|
|
|
|
|
|
+ if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
|
|
|
+ return -EACCES;
|
|
|
+
|
|
|
/*
|
|
|
* If we don't have pte special, then we have to use the pfn_valid()
|
|
|
* based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
|
|
@@ -1982,6 +1988,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
|
|
{
|
|
|
pte_t *pte;
|
|
|
spinlock_t *ptl;
|
|
|
+ int err = 0;
|
|
|
|
|
|
pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
|
|
|
if (!pte)
|
|
@@ -1989,12 +1996,16 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
|
|
arch_enter_lazy_mmu_mode();
|
|
|
do {
|
|
|
BUG_ON(!pte_none(*pte));
|
|
|
+ if (!pfn_modify_allowed(pfn, prot)) {
|
|
|
+ err = -EACCES;
|
|
|
+ break;
|
|
|
+ }
|
|
|
set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
|
|
|
pfn++;
|
|
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
|
|
arch_leave_lazy_mmu_mode();
|
|
|
pte_unmap_unlock(pte - 1, ptl);
|
|
|
- return 0;
|
|
|
+ return err;
|
|
|
}
|
|
|
|
|
|
static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
|
|
@@ -2003,6 +2014,7 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
|
|
|
{
|
|
|
pmd_t *pmd;
|
|
|
unsigned long next;
|
|
|
+ int err;
|
|
|
|
|
|
pfn -= addr >> PAGE_SHIFT;
|
|
|
pmd = pmd_alloc(mm, pud, addr);
|
|
@@ -2011,9 +2023,10 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
|
|
|
VM_BUG_ON(pmd_trans_huge(*pmd));
|
|
|
do {
|
|
|
next = pmd_addr_end(addr, end);
|
|
|
- if (remap_pte_range(mm, pmd, addr, next,
|
|
|
- pfn + (addr >> PAGE_SHIFT), prot))
|
|
|
- return -ENOMEM;
|
|
|
+ err = remap_pte_range(mm, pmd, addr, next,
|
|
|
+ pfn + (addr >> PAGE_SHIFT), prot);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
} while (pmd++, addr = next, addr != end);
|
|
|
return 0;
|
|
|
}
|
|
@@ -2024,6 +2037,7 @@ static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
|
|
|
{
|
|
|
pud_t *pud;
|
|
|
unsigned long next;
|
|
|
+ int err;
|
|
|
|
|
|
pfn -= addr >> PAGE_SHIFT;
|
|
|
pud = pud_alloc(mm, p4d, addr);
|
|
@@ -2031,9 +2045,10 @@ static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
|
|
|
return -ENOMEM;
|
|
|
do {
|
|
|
next = pud_addr_end(addr, end);
|
|
|
- if (remap_pmd_range(mm, pud, addr, next,
|
|
|
- pfn + (addr >> PAGE_SHIFT), prot))
|
|
|
- return -ENOMEM;
|
|
|
+ err = remap_pmd_range(mm, pud, addr, next,
|
|
|
+ pfn + (addr >> PAGE_SHIFT), prot);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
} while (pud++, addr = next, addr != end);
|
|
|
return 0;
|
|
|
}
|
|
@@ -2044,6 +2059,7 @@ static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
|
|
|
{
|
|
|
p4d_t *p4d;
|
|
|
unsigned long next;
|
|
|
+ int err;
|
|
|
|
|
|
pfn -= addr >> PAGE_SHIFT;
|
|
|
p4d = p4d_alloc(mm, pgd, addr);
|
|
@@ -2051,9 +2067,10 @@ static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
|
|
|
return -ENOMEM;
|
|
|
do {
|
|
|
next = p4d_addr_end(addr, end);
|
|
|
- if (remap_pud_range(mm, p4d, addr, next,
|
|
|
- pfn + (addr >> PAGE_SHIFT), prot))
|
|
|
- return -ENOMEM;
|
|
|
+ err = remap_pud_range(mm, p4d, addr, next,
|
|
|
+ pfn + (addr >> PAGE_SHIFT), prot);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
} while (p4d++, addr = next, addr != end);
|
|
|
return 0;
|
|
|
}
|