|
@@ -1426,6 +1426,79 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static int try_to_unmap_nonlinear(struct page *page,
|
|
|
+ struct address_space *mapping, struct vm_area_struct *vma)
|
|
|
+{
|
|
|
+ int ret = SWAP_AGAIN;
|
|
|
+ unsigned long cursor;
|
|
|
+ unsigned long max_nl_cursor = 0;
|
|
|
+ unsigned long max_nl_size = 0;
|
|
|
+ unsigned int mapcount;
|
|
|
+
|
|
|
+ list_for_each_entry(vma,
|
|
|
+ &mapping->i_mmap_nonlinear, shared.nonlinear) {
|
|
|
+
|
|
|
+ cursor = (unsigned long) vma->vm_private_data;
|
|
|
+ if (cursor > max_nl_cursor)
|
|
|
+ max_nl_cursor = cursor;
|
|
|
+ cursor = vma->vm_end - vma->vm_start;
|
|
|
+ if (cursor > max_nl_size)
|
|
|
+ max_nl_size = cursor;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */
|
|
|
+ return SWAP_FAIL;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We don't try to search for this page in the nonlinear vmas,
|
|
|
+ * and page_referenced wouldn't have found it anyway. Instead
|
|
|
+ * just walk the nonlinear vmas trying to age and unmap some.
|
|
|
+ * The mapcount of the page we came in with is irrelevant,
|
|
|
+ * but even so use it as a guide to how hard we should try?
|
|
|
+ */
|
|
|
+ mapcount = page_mapcount(page);
|
|
|
+ if (!mapcount)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ cond_resched();
|
|
|
+
|
|
|
+ max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
|
|
|
+ if (max_nl_cursor == 0)
|
|
|
+ max_nl_cursor = CLUSTER_SIZE;
|
|
|
+
|
|
|
+ do {
|
|
|
+ list_for_each_entry(vma,
|
|
|
+ &mapping->i_mmap_nonlinear, shared.nonlinear) {
|
|
|
+
|
|
|
+ cursor = (unsigned long) vma->vm_private_data;
|
|
|
+ while (cursor < max_nl_cursor &&
|
|
|
+ cursor < vma->vm_end - vma->vm_start) {
|
|
|
+ if (try_to_unmap_cluster(cursor, &mapcount,
|
|
|
+ vma, page) == SWAP_MLOCK)
|
|
|
+ ret = SWAP_MLOCK;
|
|
|
+ cursor += CLUSTER_SIZE;
|
|
|
+ vma->vm_private_data = (void *) cursor;
|
|
|
+ if ((int)mapcount <= 0)
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+ vma->vm_private_data = (void *) max_nl_cursor;
|
|
|
+ }
|
|
|
+ cond_resched();
|
|
|
+ max_nl_cursor += CLUSTER_SIZE;
|
|
|
+ } while (max_nl_cursor <= max_nl_size);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Don't loop forever (perhaps all the remaining pages are
|
|
|
+ * in locked vmas). Reset cursor on all unreserved nonlinear
|
|
|
+ * vmas, now forgetting on which ones it had fallen behind.
|
|
|
+ */
|
|
|
+ list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.nonlinear)
|
|
|
+ vma->vm_private_data = NULL;
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
bool is_vma_temporary_stack(struct vm_area_struct *vma)
|
|
|
{
|
|
|
int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
|
|
@@ -1515,10 +1588,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
|
|
|
pgoff_t pgoff = page->index << compound_order(page);
|
|
|
struct vm_area_struct *vma;
|
|
|
int ret = SWAP_AGAIN;
|
|
|
- unsigned long cursor;
|
|
|
- unsigned long max_nl_cursor = 0;
|
|
|
- unsigned long max_nl_size = 0;
|
|
|
- unsigned int mapcount;
|
|
|
|
|
|
mutex_lock(&mapping->i_mmap_mutex);
|
|
|
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
|
|
@@ -1539,64 +1608,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
|
|
|
if (TTU_ACTION(flags) == TTU_MUNLOCK)
|
|
|
goto out;
|
|
|
|
|
|
- list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
|
|
|
- shared.nonlinear) {
|
|
|
- cursor = (unsigned long) vma->vm_private_data;
|
|
|
- if (cursor > max_nl_cursor)
|
|
|
- max_nl_cursor = cursor;
|
|
|
- cursor = vma->vm_end - vma->vm_start;
|
|
|
- if (cursor > max_nl_size)
|
|
|
- max_nl_size = cursor;
|
|
|
- }
|
|
|
-
|
|
|
- if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */
|
|
|
- ret = SWAP_FAIL;
|
|
|
- goto out;
|
|
|
- }
|
|
|
-
|
|
|
- /*
|
|
|
- * We don't try to search for this page in the nonlinear vmas,
|
|
|
- * and page_referenced wouldn't have found it anyway. Instead
|
|
|
- * just walk the nonlinear vmas trying to age and unmap some.
|
|
|
- * The mapcount of the page we came in with is irrelevant,
|
|
|
- * but even so use it as a guide to how hard we should try?
|
|
|
- */
|
|
|
- mapcount = page_mapcount(page);
|
|
|
- if (!mapcount)
|
|
|
- goto out;
|
|
|
- cond_resched();
|
|
|
-
|
|
|
- max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
|
|
|
- if (max_nl_cursor == 0)
|
|
|
- max_nl_cursor = CLUSTER_SIZE;
|
|
|
-
|
|
|
- do {
|
|
|
- list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
|
|
|
- shared.nonlinear) {
|
|
|
- cursor = (unsigned long) vma->vm_private_data;
|
|
|
- while ( cursor < max_nl_cursor &&
|
|
|
- cursor < vma->vm_end - vma->vm_start) {
|
|
|
- if (try_to_unmap_cluster(cursor, &mapcount,
|
|
|
- vma, page) == SWAP_MLOCK)
|
|
|
- ret = SWAP_MLOCK;
|
|
|
- cursor += CLUSTER_SIZE;
|
|
|
- vma->vm_private_data = (void *) cursor;
|
|
|
- if ((int)mapcount <= 0)
|
|
|
- goto out;
|
|
|
- }
|
|
|
- vma->vm_private_data = (void *) max_nl_cursor;
|
|
|
- }
|
|
|
- cond_resched();
|
|
|
- max_nl_cursor += CLUSTER_SIZE;
|
|
|
- } while (max_nl_cursor <= max_nl_size);
|
|
|
-
|
|
|
- /*
|
|
|
- * Don't loop forever (perhaps all the remaining pages are
|
|
|
- * in locked vmas). Reset cursor on all unreserved nonlinear
|
|
|
- * vmas, now forgetting on which ones it had fallen behind.
|
|
|
- */
|
|
|
- list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.nonlinear)
|
|
|
- vma->vm_private_data = NULL;
|
|
|
+ ret = try_to_unmap_nonlinear(page, mapping, vma);
|
|
|
out:
|
|
|
mutex_unlock(&mapping->i_mmap_mutex);
|
|
|
return ret;
|