|
@@ -101,7 +101,6 @@ struct shmem_falloc {
|
|
|
enum sgp_type {
|
|
|
SGP_READ, /* don't exceed i_size, don't allocate page */
|
|
|
SGP_CACHE, /* don't exceed i_size, may allocate page */
|
|
|
- SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */
|
|
|
SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */
|
|
|
SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */
|
|
|
};
|
|
@@ -169,7 +168,7 @@ static inline int shmem_reacct_size(unsigned long flags,
|
|
|
|
|
|
/*
|
|
|
* ... whereas tmpfs objects are accounted incrementally as
|
|
|
- * pages are allocated, in order to allow huge sparse files.
|
|
|
+ * pages are allocated, in order to allow large sparse files.
|
|
|
* shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
|
|
|
* so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
|
|
|
*/
|
|
@@ -947,8 +946,7 @@ redirty:
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_NUMA
|
|
|
-#ifdef CONFIG_TMPFS
|
|
|
+#if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
|
|
|
static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
|
|
|
{
|
|
|
char buffer[64];
|
|
@@ -972,7 +970,18 @@ static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
|
|
|
}
|
|
|
return mpol;
|
|
|
}
|
|
|
-#endif /* CONFIG_TMPFS */
|
|
|
+#else /* !CONFIG_NUMA || !CONFIG_TMPFS */
|
|
|
+static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
|
|
|
+{
|
|
|
+}
|
|
|
+static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
|
|
|
+{
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+#endif /* CONFIG_NUMA && CONFIG_TMPFS */
|
|
|
+#ifndef CONFIG_NUMA
|
|
|
+#define vm_policy vm_private_data
|
|
|
+#endif
|
|
|
|
|
|
static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
|
|
|
struct shmem_inode_info *info, pgoff_t index)
|
|
@@ -1008,39 +1017,17 @@ static struct page *shmem_alloc_page(gfp_t gfp,
|
|
|
pvma.vm_ops = NULL;
|
|
|
pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
|
|
|
|
|
|
- page = alloc_page_vma(gfp, &pvma, 0);
|
|
|
+ page = alloc_pages_vma(gfp, 0, &pvma, 0, numa_node_id(), false);
|
|
|
+ if (page) {
|
|
|
+ __SetPageLocked(page);
|
|
|
+ __SetPageSwapBacked(page);
|
|
|
+ }
|
|
|
|
|
|
/* Drop reference taken by mpol_shared_policy_lookup() */
|
|
|
mpol_cond_put(pvma.vm_policy);
|
|
|
|
|
|
return page;
|
|
|
}
|
|
|
-#else /* !CONFIG_NUMA */
|
|
|
-#ifdef CONFIG_TMPFS
|
|
|
-static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
|
|
|
-{
|
|
|
-}
|
|
|
-#endif /* CONFIG_TMPFS */
|
|
|
-
|
|
|
-static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
|
|
|
- struct shmem_inode_info *info, pgoff_t index)
|
|
|
-{
|
|
|
- return swapin_readahead(swap, gfp, NULL, 0);
|
|
|
-}
|
|
|
-
|
|
|
-static inline struct page *shmem_alloc_page(gfp_t gfp,
|
|
|
- struct shmem_inode_info *info, pgoff_t index)
|
|
|
-{
|
|
|
- return alloc_page(gfp);
|
|
|
-}
|
|
|
-#endif /* CONFIG_NUMA */
|
|
|
-
|
|
|
-#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
|
|
|
-static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
|
|
|
-{
|
|
|
- return NULL;
|
|
|
-}
|
|
|
-#endif
|
|
|
|
|
|
/*
|
|
|
* When a page is moved from swapcache to shmem filecache (either by the
|
|
@@ -1084,8 +1071,6 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
|
|
|
copy_highpage(newpage, oldpage);
|
|
|
flush_dcache_page(newpage);
|
|
|
|
|
|
- __SetPageLocked(newpage);
|
|
|
- __SetPageSwapBacked(newpage);
|
|
|
SetPageUptodate(newpage);
|
|
|
set_page_private(newpage, swap_index);
|
|
|
SetPageSwapCache(newpage);
|
|
@@ -1155,7 +1140,7 @@ repeat:
|
|
|
page = NULL;
|
|
|
}
|
|
|
|
|
|
- if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
|
|
|
+ if (sgp <= SGP_CACHE &&
|
|
|
((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
|
|
|
error = -EINVAL;
|
|
|
goto unlock;
|
|
@@ -1275,9 +1260,6 @@ repeat:
|
|
|
error = -ENOMEM;
|
|
|
goto decused;
|
|
|
}
|
|
|
-
|
|
|
- __SetPageLocked(page);
|
|
|
- __SetPageSwapBacked(page);
|
|
|
if (sgp == SGP_WRITE)
|
|
|
__SetPageReferenced(page);
|
|
|
|
|
@@ -1321,12 +1303,10 @@ clear:
|
|
|
flush_dcache_page(page);
|
|
|
SetPageUptodate(page);
|
|
|
}
|
|
|
- if (sgp == SGP_DIRTY)
|
|
|
- set_page_dirty(page);
|
|
|
}
|
|
|
|
|
|
/* Perhaps the file has been truncated since we checked */
|
|
|
- if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
|
|
|
+ if (sgp <= SGP_CACHE &&
|
|
|
((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
|
|
|
if (alloced) {
|
|
|
ClearPageDirty(page);
|
|
@@ -1633,7 +1613,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
|
|
* and even mark them dirty, so it cannot exceed the max_blocks limit.
|
|
|
*/
|
|
|
if (!iter_is_iovec(to))
|
|
|
- sgp = SGP_DIRTY;
|
|
|
+ sgp = SGP_CACHE;
|
|
|
|
|
|
index = *ppos >> PAGE_SHIFT;
|
|
|
offset = *ppos & ~PAGE_MASK;
|
|
@@ -1659,8 +1639,11 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
|
|
error = 0;
|
|
|
break;
|
|
|
}
|
|
|
- if (page)
|
|
|
+ if (page) {
|
|
|
+ if (sgp == SGP_CACHE)
|
|
|
+ set_page_dirty(page);
|
|
|
unlock_page(page);
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* We must evaluate after, since reads (unlike writes)
|