|
@@ -361,41 +361,18 @@ static int shmem_free_swap(struct address_space *mapping,
|
|
|
|
|
|
/*
|
|
|
* Determine (in bytes) how many of the shmem object's pages mapped by the
|
|
|
- * given vma is swapped out.
|
|
|
+ * given offsets are swapped out.
|
|
|
*
|
|
|
* This is safe to call without i_mutex or mapping->tree_lock thanks to RCU,
|
|
|
* as long as the inode doesn't go away and racy results are not a problem.
|
|
|
*/
|
|
|
-unsigned long shmem_swap_usage(struct vm_area_struct *vma)
|
|
|
+unsigned long shmem_partial_swap_usage(struct address_space *mapping,
|
|
|
+ pgoff_t start, pgoff_t end)
|
|
|
{
|
|
|
- struct inode *inode = file_inode(vma->vm_file);
|
|
|
- struct shmem_inode_info *info = SHMEM_I(inode);
|
|
|
- struct address_space *mapping = inode->i_mapping;
|
|
|
- unsigned long swapped;
|
|
|
- pgoff_t start, end;
|
|
|
struct radix_tree_iter iter;
|
|
|
void **slot;
|
|
|
struct page *page;
|
|
|
-
|
|
|
- /* Be careful as we don't hold info->lock */
|
|
|
- swapped = READ_ONCE(info->swapped);
|
|
|
-
|
|
|
- /*
|
|
|
- * The easier cases are when the shmem object has nothing in swap, or
|
|
|
- * the vma maps it whole. Then we can simply use the stats that we
|
|
|
- * already track.
|
|
|
- */
|
|
|
- if (!swapped)
|
|
|
- return 0;
|
|
|
-
|
|
|
- if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
|
|
|
- return swapped << PAGE_SHIFT;
|
|
|
-
|
|
|
- swapped = 0;
|
|
|
-
|
|
|
- /* Here comes the more involved part */
|
|
|
- start = linear_page_index(vma, vma->vm_start);
|
|
|
- end = linear_page_index(vma, vma->vm_end);
|
|
|
+ unsigned long swapped = 0;
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
@@ -429,6 +406,40 @@ restart:
|
|
|
return swapped << PAGE_SHIFT;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Determine (in bytes) how many of the shmem object's pages mapped by the
|
|
|
+ * given vma is swapped out.
|
|
|
+ *
|
|
|
+ * This is safe to call without i_mutex or mapping->tree_lock thanks to RCU,
|
|
|
+ * as long as the inode doesn't go away and racy results are not a problem.
|
|
|
+ */
|
|
|
+unsigned long shmem_swap_usage(struct vm_area_struct *vma)
|
|
|
+{
|
|
|
+ struct inode *inode = file_inode(vma->vm_file);
|
|
|
+ struct shmem_inode_info *info = SHMEM_I(inode);
|
|
|
+ struct address_space *mapping = inode->i_mapping;
|
|
|
+ unsigned long swapped;
|
|
|
+
|
|
|
+ /* Be careful as we don't hold info->lock */
|
|
|
+ swapped = READ_ONCE(info->swapped);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The easier cases are when the shmem object has nothing in swap, or
|
|
|
+ * the vma maps it whole. Then we can simply use the stats that we
|
|
|
+ * already track.
|
|
|
+ */
|
|
|
+ if (!swapped)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
|
|
|
+ return swapped << PAGE_SHIFT;
|
|
|
+
|
|
|
+ /* Here comes the more involved part */
|
|
|
+ return shmem_partial_swap_usage(mapping,
|
|
|
+ linear_page_index(vma, vma->vm_start),
|
|
|
+ linear_page_index(vma, vma->vm_end));
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
|
|
|
*/
|