|
@@ -390,8 +390,9 @@ static long vma_compute_subtree_gap(struct vm_area_struct *vma)
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_VM_RB
|
|
|
-static int browse_rb(struct rb_root *root)
|
|
|
+static int browse_rb(struct mm_struct *mm)
|
|
|
{
|
|
|
+ struct rb_root *root = &mm->mm_rb;
|
|
|
int i = 0, j, bug = 0;
|
|
|
struct rb_node *nd, *pn = NULL;
|
|
|
unsigned long prev = 0, pend = 0;
|
|
@@ -414,12 +415,14 @@ static int browse_rb(struct rb_root *root)
|
|
|
vma->vm_start, vma->vm_end);
|
|
|
bug = 1;
|
|
|
}
|
|
|
+ spin_lock(&mm->page_table_lock);
|
|
|
if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
|
|
|
pr_emerg("free gap %lx, correct %lx\n",
|
|
|
vma->rb_subtree_gap,
|
|
|
vma_compute_subtree_gap(vma));
|
|
|
bug = 1;
|
|
|
}
|
|
|
+ spin_unlock(&mm->page_table_lock);
|
|
|
i++;
|
|
|
pn = nd;
|
|
|
prev = vma->vm_start;
|
|
@@ -456,12 +459,16 @@ static void validate_mm(struct mm_struct *mm)
|
|
|
struct vm_area_struct *vma = mm->mmap;
|
|
|
|
|
|
while (vma) {
|
|
|
+ struct anon_vma *anon_vma = vma->anon_vma;
|
|
|
struct anon_vma_chain *avc;
|
|
|
|
|
|
- vma_lock_anon_vma(vma);
|
|
|
- list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
|
|
|
- anon_vma_interval_tree_verify(avc);
|
|
|
- vma_unlock_anon_vma(vma);
|
|
|
+ if (anon_vma) {
|
|
|
+ anon_vma_lock_read(anon_vma);
|
|
|
+ list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
|
|
|
+ anon_vma_interval_tree_verify(avc);
|
|
|
+ anon_vma_unlock_read(anon_vma);
|
|
|
+ }
|
|
|
+
|
|
|
highest_address = vma->vm_end;
|
|
|
vma = vma->vm_next;
|
|
|
i++;
|
|
@@ -475,7 +482,7 @@ static void validate_mm(struct mm_struct *mm)
|
|
|
mm->highest_vm_end, highest_address);
|
|
|
bug = 1;
|
|
|
}
|
|
|
- i = browse_rb(&mm->mm_rb);
|
|
|
+ i = browse_rb(mm);
|
|
|
if (i != mm->map_count) {
|
|
|
if (i != -1)
|
|
|
pr_emerg("map_count %d rb %d\n", mm->map_count, i);
|
|
@@ -2142,32 +2149,27 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
|
|
|
int expand_upwards(struct vm_area_struct *vma, unsigned long address)
|
|
|
{
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
- int error;
|
|
|
+ int error = 0;
|
|
|
|
|
|
if (!(vma->vm_flags & VM_GROWSUP))
|
|
|
return -EFAULT;
|
|
|
|
|
|
- /*
|
|
|
- * We must make sure the anon_vma is allocated
|
|
|
- * so that the anon_vma locking is not a noop.
|
|
|
- */
|
|
|
+ /* Guard against wrapping around to address 0. */
|
|
|
+ if (address < PAGE_ALIGN(address+4))
|
|
|
+ address = PAGE_ALIGN(address+4);
|
|
|
+ else
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ /* We must make sure the anon_vma is allocated. */
|
|
|
if (unlikely(anon_vma_prepare(vma)))
|
|
|
return -ENOMEM;
|
|
|
- vma_lock_anon_vma(vma);
|
|
|
|
|
|
/*
|
|
|
* vma->vm_start/vm_end cannot change under us because the caller
|
|
|
* is required to hold the mmap_sem in read mode. We need the
|
|
|
* anon_vma lock to serialize against concurrent expand_stacks.
|
|
|
- * Also guard against wrapping around to address 0.
|
|
|
*/
|
|
|
- if (address < PAGE_ALIGN(address+4))
|
|
|
- address = PAGE_ALIGN(address+4);
|
|
|
- else {
|
|
|
- vma_unlock_anon_vma(vma);
|
|
|
- return -ENOMEM;
|
|
|
- }
|
|
|
- error = 0;
|
|
|
+ anon_vma_lock_write(vma->anon_vma);
|
|
|
|
|
|
/* Somebody else might have raced and expanded it already */
|
|
|
if (address > vma->vm_end) {
|
|
@@ -2185,7 +2187,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
|
|
|
* updates, but we only hold a shared mmap_sem
|
|
|
* lock here, so we need to protect against
|
|
|
* concurrent vma expansions.
|
|
|
- * vma_lock_anon_vma() doesn't help here, as
|
|
|
+ * anon_vma_lock_write() doesn't help here, as
|
|
|
* we don't guarantee that all growable vmas
|
|
|
* in a mm share the same root anon vma.
|
|
|
* So, we reuse mm->page_table_lock to guard
|
|
@@ -2208,7 +2210,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
- vma_unlock_anon_vma(vma);
|
|
|
+ anon_vma_unlock_write(vma->anon_vma);
|
|
|
khugepaged_enter_vma_merge(vma, vma->vm_flags);
|
|
|
validate_mm(mm);
|
|
|
return error;
|
|
@@ -2224,25 +2226,21 @@ int expand_downwards(struct vm_area_struct *vma,
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
int error;
|
|
|
|
|
|
- /*
|
|
|
- * We must make sure the anon_vma is allocated
|
|
|
- * so that the anon_vma locking is not a noop.
|
|
|
- */
|
|
|
- if (unlikely(anon_vma_prepare(vma)))
|
|
|
- return -ENOMEM;
|
|
|
-
|
|
|
address &= PAGE_MASK;
|
|
|
error = security_mmap_addr(address);
|
|
|
if (error)
|
|
|
return error;
|
|
|
|
|
|
- vma_lock_anon_vma(vma);
|
|
|
+ /* We must make sure the anon_vma is allocated. */
|
|
|
+ if (unlikely(anon_vma_prepare(vma)))
|
|
|
+ return -ENOMEM;
|
|
|
|
|
|
/*
|
|
|
* vma->vm_start/vm_end cannot change under us because the caller
|
|
|
* is required to hold the mmap_sem in read mode. We need the
|
|
|
* anon_vma lock to serialize against concurrent expand_stacks.
|
|
|
*/
|
|
|
+ anon_vma_lock_write(vma->anon_vma);
|
|
|
|
|
|
/* Somebody else might have raced and expanded it already */
|
|
|
if (address < vma->vm_start) {
|
|
@@ -2260,7 +2258,7 @@ int expand_downwards(struct vm_area_struct *vma,
|
|
|
* updates, but we only hold a shared mmap_sem
|
|
|
* lock here, so we need to protect against
|
|
|
* concurrent vma expansions.
|
|
|
- * vma_lock_anon_vma() doesn't help here, as
|
|
|
+ * anon_vma_lock_write() doesn't help here, as
|
|
|
* we don't guarantee that all growable vmas
|
|
|
* in a mm share the same root anon vma.
|
|
|
* So, we reuse mm->page_table_lock to guard
|
|
@@ -2281,7 +2279,7 @@ int expand_downwards(struct vm_area_struct *vma,
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
- vma_unlock_anon_vma(vma);
|
|
|
+ anon_vma_unlock_write(vma->anon_vma);
|
|
|
khugepaged_enter_vma_merge(vma, vma->vm_flags);
|
|
|
validate_mm(mm);
|
|
|
return error;
|