Browse Source

mm: replace init_page_accessed by __SetPageReferenced

Do we really need an exported alias for __SetPageReferenced()? Its
callers better know what they're doing, in which case the page would not
be already marked referenced.  Kill init_page_accessed(), just
__SetPageReferenced() inline.

Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Mel Gorman <mgorman@suse.de>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Prabhakar Lad <prabhakar.csengg@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Hugh Dickins 11 years ago
parent
commit
eb39d618f9
4 changed files with 6 additions and 15 deletions
  1. 0 1
      include/linux/swap.h
  2. 2 2
      mm/filemap.c
  3. 1 1
      mm/shmem.c
  4. 3 11
      mm/swap.c

+ 0 - 1
include/linux/swap.h

@@ -311,7 +311,6 @@ extern void lru_add_page_tail(struct page *page, struct page *page_tail,
 			 struct lruvec *lruvec, struct list_head *head);
 			 struct lruvec *lruvec, struct list_head *head);
 extern void activate_page(struct page *);
 extern void activate_page(struct page *);
 extern void mark_page_accessed(struct page *);
 extern void mark_page_accessed(struct page *);
-extern void init_page_accessed(struct page *page);
 extern void lru_add_drain(void);
 extern void lru_add_drain(void);
 extern void lru_add_drain_cpu(int cpu);
 extern void lru_add_drain_cpu(int cpu);
 extern void lru_add_drain_all(void);
 extern void lru_add_drain_all(void);

+ 2 - 2
mm/filemap.c

@@ -1091,9 +1091,9 @@ no_page:
 		if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK)))
 		if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK)))
 			fgp_flags |= FGP_LOCK;
 			fgp_flags |= FGP_LOCK;
 
 
-		/* Init accessed so avoit atomic mark_page_accessed later */
+		/* Init accessed so avoid atomic mark_page_accessed later */
 		if (fgp_flags & FGP_ACCESSED)
 		if (fgp_flags & FGP_ACCESSED)
-			init_page_accessed(page);
+			__SetPageReferenced(page);
 
 
 		err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask);
 		err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask);
 		if (unlikely(err)) {
 		if (unlikely(err)) {

+ 1 - 1
mm/shmem.c

@@ -1166,7 +1166,7 @@ repeat:
 		__SetPageSwapBacked(page);
 		__SetPageSwapBacked(page);
 		__set_page_locked(page);
 		__set_page_locked(page);
 		if (sgp == SGP_WRITE)
 		if (sgp == SGP_WRITE)
-			init_page_accessed(page);
+			__SetPageReferenced(page);
 
 
 		error = mem_cgroup_charge_file(page, current->mm,
 		error = mem_cgroup_charge_file(page, current->mm,
 						gfp & GFP_RECLAIM_MASK);
 						gfp & GFP_RECLAIM_MASK);

+ 3 - 11
mm/swap.c

@@ -589,6 +589,9 @@ static void __lru_cache_activate_page(struct page *page)
  * inactive,unreferenced	->	inactive,referenced
  * inactive,unreferenced	->	inactive,referenced
  * inactive,referenced		->	active,unreferenced
  * inactive,referenced		->	active,unreferenced
  * active,unreferenced		->	active,referenced
  * active,unreferenced		->	active,referenced
+ *
+ * When a newly allocated page is not yet visible, so safe for non-atomic ops,
+ * __SetPageReferenced(page) may be substituted for mark_page_accessed(page).
  */
  */
 void mark_page_accessed(struct page *page)
 void mark_page_accessed(struct page *page)
 {
 {
@@ -614,17 +617,6 @@ void mark_page_accessed(struct page *page)
 }
 }
 EXPORT_SYMBOL(mark_page_accessed);
 EXPORT_SYMBOL(mark_page_accessed);
 
 
-/*
- * Used to mark_page_accessed(page) that is not visible yet and when it is
- * still safe to use non-atomic ops
- */
-void init_page_accessed(struct page *page)
-{
-	if (!PageReferenced(page))
-		__SetPageReferenced(page);
-}
-EXPORT_SYMBOL(init_page_accessed);
-
 static void __lru_cache_add(struct page *page)
 static void __lru_cache_add(struct page *page)
 {
 {
 	struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
 	struct pagevec *pvec = &get_cpu_var(lru_add_pvec);