|
@@ -783,6 +783,7 @@ static int unmerge_and_remove_all_rmap_items(void)
|
|
|
}
|
|
|
|
|
|
remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list);
|
|
|
+ up_read(&mm->mmap_sem);
|
|
|
|
|
|
spin_lock(&ksm_mmlist_lock);
|
|
|
ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
|
|
@@ -794,12 +795,9 @@ static int unmerge_and_remove_all_rmap_items(void)
|
|
|
|
|
|
free_mm_slot(mm_slot);
|
|
|
clear_bit(MMF_VM_MERGEABLE, &mm->flags);
|
|
|
- up_read(&mm->mmap_sem);
|
|
|
mmdrop(mm);
|
|
|
- } else {
|
|
|
+ } else
|
|
|
spin_unlock(&ksm_mmlist_lock);
|
|
|
- up_read(&mm->mmap_sem);
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
/* Clean up stable nodes, but don't worry if some are still busy */
|
|
@@ -1663,8 +1661,15 @@ next_mm:
|
|
|
up_read(&mm->mmap_sem);
|
|
|
mmdrop(mm);
|
|
|
} else {
|
|
|
- spin_unlock(&ksm_mmlist_lock);
|
|
|
up_read(&mm->mmap_sem);
|
|
|
+ /*
|
|
|
+ * up_read(&mm->mmap_sem) first because after
|
|
|
+ * spin_unlock(&ksm_mmlist_lock) run, the "mm" may
|
|
|
+ * already have been freed under us by __ksm_exit()
|
|
|
+ * because the "mm_slot" is still hashed and
|
|
|
+ * ksm_scan.mm_slot doesn't point to it anymore.
|
|
|
+ */
|
|
|
+ spin_unlock(&ksm_mmlist_lock);
|
|
|
}
|
|
|
|
|
|
/* Repeat until we've completed scanning the whole list */
|