|
@@ -459,12 +459,16 @@ static void validate_mm(struct mm_struct *mm)
|
|
|
struct vm_area_struct *vma = mm->mmap;
|
|
|
|
|
|
while (vma) {
|
|
|
+ struct anon_vma *anon_vma = vma->anon_vma;
|
|
|
struct anon_vma_chain *avc;
|
|
|
|
|
|
- vma_lock_anon_vma(vma);
|
|
|
- list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
|
|
|
- anon_vma_interval_tree_verify(avc);
|
|
|
- vma_unlock_anon_vma(vma);
|
|
|
+ if (anon_vma) {
|
|
|
+ anon_vma_lock_read(anon_vma);
|
|
|
+ list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
|
|
|
+ anon_vma_interval_tree_verify(avc);
|
|
|
+ anon_vma_unlock_read(anon_vma);
|
|
|
+ }
|
|
|
+
|
|
|
highest_address = vma->vm_end;
|
|
|
vma = vma->vm_next;
|
|
|
i++;
|
|
@@ -2145,32 +2149,27 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
|
|
|
int expand_upwards(struct vm_area_struct *vma, unsigned long address)
|
|
|
{
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
- int error;
|
|
|
+ int error = 0;
|
|
|
|
|
|
if (!(vma->vm_flags & VM_GROWSUP))
|
|
|
return -EFAULT;
|
|
|
|
|
|
- /*
|
|
|
- * We must make sure the anon_vma is allocated
|
|
|
- * so that the anon_vma locking is not a noop.
|
|
|
- */
|
|
|
+ /* Guard against wrapping around to address 0. */
|
|
|
+ if (address < PAGE_ALIGN(address+4))
|
|
|
+ address = PAGE_ALIGN(address+4);
|
|
|
+ else
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ /* We must make sure the anon_vma is allocated. */
|
|
|
if (unlikely(anon_vma_prepare(vma)))
|
|
|
return -ENOMEM;
|
|
|
- vma_lock_anon_vma(vma);
|
|
|
|
|
|
/*
|
|
|
* vma->vm_start/vm_end cannot change under us because the caller
|
|
|
* is required to hold the mmap_sem in read mode. We need the
|
|
|
* anon_vma lock to serialize against concurrent expand_stacks.
|
|
|
- * Also guard against wrapping around to address 0.
|
|
|
*/
|
|
|
- if (address < PAGE_ALIGN(address+4))
|
|
|
- address = PAGE_ALIGN(address+4);
|
|
|
- else {
|
|
|
- vma_unlock_anon_vma(vma);
|
|
|
- return -ENOMEM;
|
|
|
- }
|
|
|
- error = 0;
|
|
|
+ anon_vma_lock_write(vma->anon_vma);
|
|
|
|
|
|
/* Somebody else might have raced and expanded it already */
|
|
|
if (address > vma->vm_end) {
|
|
@@ -2188,7 +2187,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
|
|
|
* updates, but we only hold a shared mmap_sem
|
|
|
* lock here, so we need to protect against
|
|
|
* concurrent vma expansions.
|
|
|
- * vma_lock_anon_vma() doesn't help here, as
|
|
|
+ * anon_vma_lock_write() doesn't help here, as
|
|
|
* we don't guarantee that all growable vmas
|
|
|
* in a mm share the same root anon vma.
|
|
|
* So, we reuse mm->page_table_lock to guard
|
|
@@ -2211,7 +2210,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
- vma_unlock_anon_vma(vma);
|
|
|
+ anon_vma_unlock_write(vma->anon_vma);
|
|
|
khugepaged_enter_vma_merge(vma, vma->vm_flags);
|
|
|
validate_mm(mm);
|
|
|
return error;
|
|
@@ -2227,25 +2226,21 @@ int expand_downwards(struct vm_area_struct *vma,
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
int error;
|
|
|
|
|
|
- /*
|
|
|
- * We must make sure the anon_vma is allocated
|
|
|
- * so that the anon_vma locking is not a noop.
|
|
|
- */
|
|
|
- if (unlikely(anon_vma_prepare(vma)))
|
|
|
- return -ENOMEM;
|
|
|
-
|
|
|
address &= PAGE_MASK;
|
|
|
error = security_mmap_addr(address);
|
|
|
if (error)
|
|
|
return error;
|
|
|
|
|
|
- vma_lock_anon_vma(vma);
|
|
|
+ /* We must make sure the anon_vma is allocated. */
|
|
|
+ if (unlikely(anon_vma_prepare(vma)))
|
|
|
+ return -ENOMEM;
|
|
|
|
|
|
/*
|
|
|
* vma->vm_start/vm_end cannot change under us because the caller
|
|
|
* is required to hold the mmap_sem in read mode. We need the
|
|
|
* anon_vma lock to serialize against concurrent expand_stacks.
|
|
|
*/
|
|
|
+ anon_vma_lock_write(vma->anon_vma);
|
|
|
|
|
|
/* Somebody else might have raced and expanded it already */
|
|
|
if (address < vma->vm_start) {
|
|
@@ -2263,7 +2258,7 @@ int expand_downwards(struct vm_area_struct *vma,
|
|
|
* updates, but we only hold a shared mmap_sem
|
|
|
* lock here, so we need to protect against
|
|
|
* concurrent vma expansions.
|
|
|
- * vma_lock_anon_vma() doesn't help here, as
|
|
|
+ * anon_vma_lock_write() doesn't help here, as
|
|
|
* we don't guarantee that all growable vmas
|
|
|
* in a mm share the same root anon vma.
|
|
|
* So, we reuse mm->page_table_lock to guard
|
|
@@ -2284,7 +2279,7 @@ int expand_downwards(struct vm_area_struct *vma,
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
- vma_unlock_anon_vma(vma);
|
|
|
+ anon_vma_unlock_write(vma->anon_vma);
|
|
|
khugepaged_enter_vma_merge(vma, vma->vm_flags);
|
|
|
validate_mm(mm);
|
|
|
return error;
|