rmap.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635
  1. /*
  2. * mm/rmap.c - physical to virtual reverse mappings
  3. *
  4. * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
  5. * Released under the General Public License (GPL).
  6. *
  7. * Simple, low overhead reverse mapping scheme.
  8. * Please try to keep this thing as modular as possible.
  9. *
  10. * Provides methods for unmapping each kind of mapped page:
  11. * the anon methods track anonymous pages, and
  12. * the file methods track pages belonging to an inode.
  13. *
  14. * Original design by Rik van Riel <riel@conectiva.com.br> 2001
  15. * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
  16. * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
  17. * Contributions by Hugh Dickins 2003, 2004
  18. */
  19. /*
  20. * Lock ordering in mm:
  21. *
  22. * inode->i_mutex (while writing or truncating, not reading or faulting)
  23. * inode->i_alloc_sem (vmtruncate_range)
  24. * mm->mmap_sem
  25. * page->flags PG_locked (lock_page)
  26. * mapping->i_mmap_mutex
  27. * anon_vma->lock
  28. * mm->page_table_lock or pte_lock
  29. * zone->lru_lock (in mark_page_accessed, isolate_lru_page)
  30. * swap_lock (in swap_duplicate, swap_info_get)
  31. * mmlist_lock (in mmput, drain_mmlist and others)
  32. * mapping->private_lock (in __set_page_dirty_buffers)
  33. * inode->i_lock (in set_page_dirty's __mark_inode_dirty)
  34. * inode_wb_list_lock (in set_page_dirty's __mark_inode_dirty)
  35. * sb_lock (within inode_lock in fs/fs-writeback.c)
  36. * mapping->tree_lock (widely used, in set_page_dirty,
  37. * in arch-dependent flush_dcache_mmap_lock,
  38. * within inode_wb_list_lock in __sync_single_inode)
  39. *
  40. * (code doesn't rely on that order so it could be switched around)
  41. * ->tasklist_lock
  42. * anon_vma->lock (memory_failure, collect_procs_anon)
  43. * pte map lock
  44. */
  45. #include <linux/mm.h>
  46. #include <linux/pagemap.h>
  47. #include <linux/swap.h>
  48. #include <linux/swapops.h>
  49. #include <linux/slab.h>
  50. #include <linux/init.h>
  51. #include <linux/ksm.h>
  52. #include <linux/rmap.h>
  53. #include <linux/rcupdate.h>
  54. #include <linux/module.h>
  55. #include <linux/memcontrol.h>
  56. #include <linux/mmu_notifier.h>
  57. #include <linux/migrate.h>
  58. #include <linux/hugetlb.h>
  59. #include <asm/tlbflush.h>
  60. #include "internal.h"
  61. static struct kmem_cache *anon_vma_cachep;
  62. static struct kmem_cache *anon_vma_chain_cachep;
  63. static inline struct anon_vma *anon_vma_alloc(void)
  64. {
  65. struct anon_vma *anon_vma;
  66. anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
  67. if (anon_vma) {
  68. atomic_set(&anon_vma->refcount, 1);
  69. /*
  70. * Initialise the anon_vma root to point to itself. If called
  71. * from fork, the root will be reset to the parents anon_vma.
  72. */
  73. anon_vma->root = anon_vma;
  74. }
  75. return anon_vma;
  76. }
  77. static inline void anon_vma_free(struct anon_vma *anon_vma)
  78. {
  79. VM_BUG_ON(atomic_read(&anon_vma->refcount));
  80. kmem_cache_free(anon_vma_cachep, anon_vma);
  81. }
  82. static inline struct anon_vma_chain *anon_vma_chain_alloc(void)
  83. {
  84. return kmem_cache_alloc(anon_vma_chain_cachep, GFP_KERNEL);
  85. }
  86. static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
  87. {
  88. kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
  89. }
  90. /**
  91. * anon_vma_prepare - attach an anon_vma to a memory region
  92. * @vma: the memory region in question
  93. *
  94. * This makes sure the memory mapping described by 'vma' has
  95. * an 'anon_vma' attached to it, so that we can associate the
  96. * anonymous pages mapped into it with that anon_vma.
  97. *
  98. * The common case will be that we already have one, but if
  99. * not we either need to find an adjacent mapping that we
  100. * can re-use the anon_vma from (very common when the only
  101. * reason for splitting a vma has been mprotect()), or we
  102. * allocate a new one.
  103. *
  104. * Anon-vma allocations are very subtle, because we may have
  105. * optimistically looked up an anon_vma in page_lock_anon_vma()
  106. * and that may actually touch the spinlock even in the newly
  107. * allocated vma (it depends on RCU to make sure that the
  108. * anon_vma isn't actually destroyed).
  109. *
  110. * As a result, we need to do proper anon_vma locking even
  111. * for the new allocation. At the same time, we do not want
  112. * to do any locking for the common case of already having
  113. * an anon_vma.
  114. *
  115. * This must be called with the mmap_sem held for reading.
  116. */
  117. int anon_vma_prepare(struct vm_area_struct *vma)
  118. {
  119. struct anon_vma *anon_vma = vma->anon_vma;
  120. struct anon_vma_chain *avc;
  121. might_sleep();
  122. if (unlikely(!anon_vma)) {
  123. struct mm_struct *mm = vma->vm_mm;
  124. struct anon_vma *allocated;
  125. avc = anon_vma_chain_alloc();
  126. if (!avc)
  127. goto out_enomem;
  128. anon_vma = find_mergeable_anon_vma(vma);
  129. allocated = NULL;
  130. if (!anon_vma) {
  131. anon_vma = anon_vma_alloc();
  132. if (unlikely(!anon_vma))
  133. goto out_enomem_free_avc;
  134. allocated = anon_vma;
  135. }
  136. anon_vma_lock(anon_vma);
  137. /* page_table_lock to protect against threads */
  138. spin_lock(&mm->page_table_lock);
  139. if (likely(!vma->anon_vma)) {
  140. vma->anon_vma = anon_vma;
  141. avc->anon_vma = anon_vma;
  142. avc->vma = vma;
  143. list_add(&avc->same_vma, &vma->anon_vma_chain);
  144. list_add_tail(&avc->same_anon_vma, &anon_vma->head);
  145. allocated = NULL;
  146. avc = NULL;
  147. }
  148. spin_unlock(&mm->page_table_lock);
  149. anon_vma_unlock(anon_vma);
  150. if (unlikely(allocated))
  151. put_anon_vma(allocated);
  152. if (unlikely(avc))
  153. anon_vma_chain_free(avc);
  154. }
  155. return 0;
  156. out_enomem_free_avc:
  157. anon_vma_chain_free(avc);
  158. out_enomem:
  159. return -ENOMEM;
  160. }
  161. static void anon_vma_chain_link(struct vm_area_struct *vma,
  162. struct anon_vma_chain *avc,
  163. struct anon_vma *anon_vma)
  164. {
  165. avc->vma = vma;
  166. avc->anon_vma = anon_vma;
  167. list_add(&avc->same_vma, &vma->anon_vma_chain);
  168. anon_vma_lock(anon_vma);
  169. /*
  170. * It's critical to add new vmas to the tail of the anon_vma,
  171. * see comment in huge_memory.c:__split_huge_page().
  172. */
  173. list_add_tail(&avc->same_anon_vma, &anon_vma->head);
  174. anon_vma_unlock(anon_vma);
  175. }
  176. /*
  177. * Attach the anon_vmas from src to dst.
  178. * Returns 0 on success, -ENOMEM on failure.
  179. */
  180. int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
  181. {
  182. struct anon_vma_chain *avc, *pavc;
  183. list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
  184. avc = anon_vma_chain_alloc();
  185. if (!avc)
  186. goto enomem_failure;
  187. anon_vma_chain_link(dst, avc, pavc->anon_vma);
  188. }
  189. return 0;
  190. enomem_failure:
  191. unlink_anon_vmas(dst);
  192. return -ENOMEM;
  193. }
  194. /*
  195. * Attach vma to its own anon_vma, as well as to the anon_vmas that
  196. * the corresponding VMA in the parent process is attached to.
  197. * Returns 0 on success, non-zero on failure.
  198. */
  199. int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
  200. {
  201. struct anon_vma_chain *avc;
  202. struct anon_vma *anon_vma;
  203. /* Don't bother if the parent process has no anon_vma here. */
  204. if (!pvma->anon_vma)
  205. return 0;
  206. /*
  207. * First, attach the new VMA to the parent VMA's anon_vmas,
  208. * so rmap can find non-COWed pages in child processes.
  209. */
  210. if (anon_vma_clone(vma, pvma))
  211. return -ENOMEM;
  212. /* Then add our own anon_vma. */
  213. anon_vma = anon_vma_alloc();
  214. if (!anon_vma)
  215. goto out_error;
  216. avc = anon_vma_chain_alloc();
  217. if (!avc)
  218. goto out_error_free_anon_vma;
  219. /*
  220. * The root anon_vma's spinlock is the lock actually used when we
  221. * lock any of the anon_vmas in this anon_vma tree.
  222. */
  223. anon_vma->root = pvma->anon_vma->root;
  224. /*
  225. * With refcounts, an anon_vma can stay around longer than the
  226. * process it belongs to. The root anon_vma needs to be pinned until
  227. * this anon_vma is freed, because the lock lives in the root.
  228. */
  229. get_anon_vma(anon_vma->root);
  230. /* Mark this anon_vma as the one where our new (COWed) pages go. */
  231. vma->anon_vma = anon_vma;
  232. anon_vma_chain_link(vma, avc, anon_vma);
  233. return 0;
  234. out_error_free_anon_vma:
  235. put_anon_vma(anon_vma);
  236. out_error:
  237. unlink_anon_vmas(vma);
  238. return -ENOMEM;
  239. }
  240. static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain)
  241. {
  242. struct anon_vma *anon_vma = anon_vma_chain->anon_vma;
  243. int empty;
  244. /* If anon_vma_fork fails, we can get an empty anon_vma_chain. */
  245. if (!anon_vma)
  246. return;
  247. anon_vma_lock(anon_vma);
  248. list_del(&anon_vma_chain->same_anon_vma);
  249. /* We must garbage collect the anon_vma if it's empty */
  250. empty = list_empty(&anon_vma->head);
  251. anon_vma_unlock(anon_vma);
  252. if (empty)
  253. put_anon_vma(anon_vma);
  254. }
  255. void unlink_anon_vmas(struct vm_area_struct *vma)
  256. {
  257. struct anon_vma_chain *avc, *next;
  258. /*
  259. * Unlink each anon_vma chained to the VMA. This list is ordered
  260. * from newest to oldest, ensuring the root anon_vma gets freed last.
  261. */
  262. list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
  263. anon_vma_unlink(avc);
  264. list_del(&avc->same_vma);
  265. anon_vma_chain_free(avc);
  266. }
  267. }
  268. static void anon_vma_ctor(void *data)
  269. {
  270. struct anon_vma *anon_vma = data;
  271. spin_lock_init(&anon_vma->lock);
  272. atomic_set(&anon_vma->refcount, 0);
  273. INIT_LIST_HEAD(&anon_vma->head);
  274. }
  275. void __init anon_vma_init(void)
  276. {
  277. anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
  278. 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
  279. anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
  280. }
  281. /*
  282. * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
  283. *
  284. * Since there is no serialization what so ever against page_remove_rmap()
  285. * the best this function can do is return a locked anon_vma that might
  286. * have been relevant to this page.
  287. *
  288. * The page might have been remapped to a different anon_vma or the anon_vma
  289. * returned may already be freed (and even reused).
  290. *
  291. * All users of this function must be very careful when walking the anon_vma
  292. * chain and verify that the page in question is indeed mapped in it
  293. * [ something equivalent to page_mapped_in_vma() ].
  294. *
  295. * Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap()
  296. * that the anon_vma pointer from page->mapping is valid if there is a
  297. * mapcount, we can dereference the anon_vma after observing those.
  298. */
  299. struct anon_vma *page_lock_anon_vma(struct page *page)
  300. {
  301. struct anon_vma *anon_vma, *root_anon_vma;
  302. unsigned long anon_mapping;
  303. rcu_read_lock();
  304. anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
  305. if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
  306. goto out;
  307. if (!page_mapped(page))
  308. goto out;
  309. anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
  310. root_anon_vma = ACCESS_ONCE(anon_vma->root);
  311. spin_lock(&root_anon_vma->lock);
  312. /*
  313. * If this page is still mapped, then its anon_vma cannot have been
  314. * freed. But if it has been unmapped, we have no security against
  315. * the anon_vma structure being freed and reused (for another anon_vma:
  316. * SLAB_DESTROY_BY_RCU guarantees that - so the spin_lock above cannot
  317. * corrupt): with anon_vma_prepare() or anon_vma_fork() redirecting
  318. * anon_vma->root before page_unlock_anon_vma() is called to unlock.
  319. */
  320. if (page_mapped(page))
  321. return anon_vma;
  322. spin_unlock(&root_anon_vma->lock);
  323. out:
  324. rcu_read_unlock();
  325. return NULL;
  326. }
  327. void page_unlock_anon_vma(struct anon_vma *anon_vma)
  328. {
  329. anon_vma_unlock(anon_vma);
  330. rcu_read_unlock();
  331. }
  332. /*
  333. * At what user virtual address is page expected in @vma?
  334. * Returns virtual address or -EFAULT if page's index/offset is not
  335. * within the range mapped the @vma.
  336. */
  337. inline unsigned long
  338. vma_address(struct page *page, struct vm_area_struct *vma)
  339. {
  340. pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  341. unsigned long address;
  342. if (unlikely(is_vm_hugetlb_page(vma)))
  343. pgoff = page->index << huge_page_order(page_hstate(page));
  344. address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
  345. if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
  346. /* page should be within @vma mapping range */
  347. return -EFAULT;
  348. }
  349. return address;
  350. }
  351. /*
  352. * At what user virtual address is page expected in vma?
  353. * Caller should check the page is actually part of the vma.
  354. */
  355. unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
  356. {
  357. if (PageAnon(page)) {
  358. struct anon_vma *page__anon_vma = page_anon_vma(page);
  359. /*
  360. * Note: swapoff's unuse_vma() is more efficient with this
  361. * check, and needs it to match anon_vma when KSM is active.
  362. */
  363. if (!vma->anon_vma || !page__anon_vma ||
  364. vma->anon_vma->root != page__anon_vma->root)
  365. return -EFAULT;
  366. } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
  367. if (!vma->vm_file ||
  368. vma->vm_file->f_mapping != page->mapping)
  369. return -EFAULT;
  370. } else
  371. return -EFAULT;
  372. return vma_address(page, vma);
  373. }
  374. /*
  375. * Check that @page is mapped at @address into @mm.
  376. *
  377. * If @sync is false, page_check_address may perform a racy check to avoid
  378. * the page table lock when the pte is not present (helpful when reclaiming
  379. * highly shared pages).
  380. *
  381. * On success returns with pte mapped and locked.
  382. */
  383. pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
  384. unsigned long address, spinlock_t **ptlp, int sync)
  385. {
  386. pgd_t *pgd;
  387. pud_t *pud;
  388. pmd_t *pmd;
  389. pte_t *pte;
  390. spinlock_t *ptl;
  391. if (unlikely(PageHuge(page))) {
  392. pte = huge_pte_offset(mm, address);
  393. ptl = &mm->page_table_lock;
  394. goto check;
  395. }
  396. pgd = pgd_offset(mm, address);
  397. if (!pgd_present(*pgd))
  398. return NULL;
  399. pud = pud_offset(pgd, address);
  400. if (!pud_present(*pud))
  401. return NULL;
  402. pmd = pmd_offset(pud, address);
  403. if (!pmd_present(*pmd))
  404. return NULL;
  405. if (pmd_trans_huge(*pmd))
  406. return NULL;
  407. pte = pte_offset_map(pmd, address);
  408. /* Make a quick check before getting the lock */
  409. if (!sync && !pte_present(*pte)) {
  410. pte_unmap(pte);
  411. return NULL;
  412. }
  413. ptl = pte_lockptr(mm, pmd);
  414. check:
  415. spin_lock(ptl);
  416. if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
  417. *ptlp = ptl;
  418. return pte;
  419. }
  420. pte_unmap_unlock(pte, ptl);
  421. return NULL;
  422. }
  423. /**
  424. * page_mapped_in_vma - check whether a page is really mapped in a VMA
  425. * @page: the page to test
  426. * @vma: the VMA to test
  427. *
  428. * Returns 1 if the page is mapped into the page tables of the VMA, 0
  429. * if the page is not mapped into the page tables of this VMA. Only
  430. * valid for normal file or anonymous VMAs.
  431. */
  432. int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
  433. {
  434. unsigned long address;
  435. pte_t *pte;
  436. spinlock_t *ptl;
  437. address = vma_address(page, vma);
  438. if (address == -EFAULT) /* out of vma range */
  439. return 0;
  440. pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
  441. if (!pte) /* the page is not in this mm */
  442. return 0;
  443. pte_unmap_unlock(pte, ptl);
  444. return 1;
  445. }
  446. /*
  447. * Subfunctions of page_referenced: page_referenced_one called
  448. * repeatedly from either page_referenced_anon or page_referenced_file.
  449. */
  450. int page_referenced_one(struct page *page, struct vm_area_struct *vma,
  451. unsigned long address, unsigned int *mapcount,
  452. unsigned long *vm_flags)
  453. {
  454. struct mm_struct *mm = vma->vm_mm;
  455. int referenced = 0;
  456. if (unlikely(PageTransHuge(page))) {
  457. pmd_t *pmd;
  458. spin_lock(&mm->page_table_lock);
  459. /*
  460. * rmap might return false positives; we must filter
  461. * these out using page_check_address_pmd().
  462. */
  463. pmd = page_check_address_pmd(page, mm, address,
  464. PAGE_CHECK_ADDRESS_PMD_FLAG);
  465. if (!pmd) {
  466. spin_unlock(&mm->page_table_lock);
  467. goto out;
  468. }
  469. if (vma->vm_flags & VM_LOCKED) {
  470. spin_unlock(&mm->page_table_lock);
  471. *mapcount = 0; /* break early from loop */
  472. *vm_flags |= VM_LOCKED;
  473. goto out;
  474. }
  475. /* go ahead even if the pmd is pmd_trans_splitting() */
  476. if (pmdp_clear_flush_young_notify(vma, address, pmd))
  477. referenced++;
  478. spin_unlock(&mm->page_table_lock);
  479. } else {
  480. pte_t *pte;
  481. spinlock_t *ptl;
  482. /*
  483. * rmap might return false positives; we must filter
  484. * these out using page_check_address().
  485. */
  486. pte = page_check_address(page, mm, address, &ptl, 0);
  487. if (!pte)
  488. goto out;
  489. if (vma->vm_flags & VM_LOCKED) {
  490. pte_unmap_unlock(pte, ptl);
  491. *mapcount = 0; /* break early from loop */
  492. *vm_flags |= VM_LOCKED;
  493. goto out;
  494. }
  495. if (ptep_clear_flush_young_notify(vma, address, pte)) {
  496. /*
  497. * Don't treat a reference through a sequentially read
  498. * mapping as such. If the page has been used in
  499. * another mapping, we will catch it; if this other
  500. * mapping is already gone, the unmap path will have
  501. * set PG_referenced or activated the page.
  502. */
  503. if (likely(!VM_SequentialReadHint(vma)))
  504. referenced++;
  505. }
  506. pte_unmap_unlock(pte, ptl);
  507. }
  508. /* Pretend the page is referenced if the task has the
  509. swap token and is in the middle of a page fault. */
  510. if (mm != current->mm && has_swap_token(mm) &&
  511. rwsem_is_locked(&mm->mmap_sem))
  512. referenced++;
  513. (*mapcount)--;
  514. if (referenced)
  515. *vm_flags |= vma->vm_flags;
  516. out:
  517. return referenced;
  518. }
  519. static int page_referenced_anon(struct page *page,
  520. struct mem_cgroup *mem_cont,
  521. unsigned long *vm_flags)
  522. {
  523. unsigned int mapcount;
  524. struct anon_vma *anon_vma;
  525. struct anon_vma_chain *avc;
  526. int referenced = 0;
  527. anon_vma = page_lock_anon_vma(page);
  528. if (!anon_vma)
  529. return referenced;
  530. mapcount = page_mapcount(page);
  531. list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
  532. struct vm_area_struct *vma = avc->vma;
  533. unsigned long address = vma_address(page, vma);
  534. if (address == -EFAULT)
  535. continue;
  536. /*
  537. * If we are reclaiming on behalf of a cgroup, skip
  538. * counting on behalf of references from different
  539. * cgroups
  540. */
  541. if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
  542. continue;
  543. referenced += page_referenced_one(page, vma, address,
  544. &mapcount, vm_flags);
  545. if (!mapcount)
  546. break;
  547. }
  548. page_unlock_anon_vma(anon_vma);
  549. return referenced;
  550. }
  551. /**
  552. * page_referenced_file - referenced check for object-based rmap
  553. * @page: the page we're checking references on.
  554. * @mem_cont: target memory controller
  555. * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
  556. *
  557. * For an object-based mapped page, find all the places it is mapped and
  558. * check/clear the referenced flag. This is done by following the page->mapping
  559. * pointer, then walking the chain of vmas it holds. It returns the number
  560. * of references it found.
  561. *
  562. * This function is only called from page_referenced for object-based pages.
  563. */
  564. static int page_referenced_file(struct page *page,
  565. struct mem_cgroup *mem_cont,
  566. unsigned long *vm_flags)
  567. {
  568. unsigned int mapcount;
  569. struct address_space *mapping = page->mapping;
  570. pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  571. struct vm_area_struct *vma;
  572. struct prio_tree_iter iter;
  573. int referenced = 0;
  574. /*
  575. * The caller's checks on page->mapping and !PageAnon have made
  576. * sure that this is a file page: the check for page->mapping
  577. * excludes the case just before it gets set on an anon page.
  578. */
  579. BUG_ON(PageAnon(page));
  580. /*
  581. * The page lock not only makes sure that page->mapping cannot
  582. * suddenly be NULLified by truncation, it makes sure that the
  583. * structure at mapping cannot be freed and reused yet,
  584. * so we can safely take mapping->i_mmap_mutex.
  585. */
  586. BUG_ON(!PageLocked(page));
  587. mutex_lock(&mapping->i_mmap_mutex);
  588. /*
  589. * i_mmap_mutex does not stabilize mapcount at all, but mapcount
  590. * is more likely to be accurate if we note it after spinning.
  591. */
  592. mapcount = page_mapcount(page);
  593. vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
  594. unsigned long address = vma_address(page, vma);
  595. if (address == -EFAULT)
  596. continue;
  597. /*
  598. * If we are reclaiming on behalf of a cgroup, skip
  599. * counting on behalf of references from different
  600. * cgroups
  601. */
  602. if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
  603. continue;
  604. referenced += page_referenced_one(page, vma, address,
  605. &mapcount, vm_flags);
  606. if (!mapcount)
  607. break;
  608. }
  609. mutex_unlock(&mapping->i_mmap_mutex);
  610. return referenced;
  611. }
  612. /**
  613. * page_referenced - test if the page was referenced
  614. * @page: the page to test
  615. * @is_locked: caller holds lock on the page
  616. * @mem_cont: target memory controller
  617. * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
  618. *
  619. * Quick test_and_clear_referenced for all mappings to a page,
  620. * returns the number of ptes which referenced the page.
  621. */
  622. int page_referenced(struct page *page,
  623. int is_locked,
  624. struct mem_cgroup *mem_cont,
  625. unsigned long *vm_flags)
  626. {
  627. int referenced = 0;
  628. int we_locked = 0;
  629. *vm_flags = 0;
  630. if (page_mapped(page) && page_rmapping(page)) {
  631. if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
  632. we_locked = trylock_page(page);
  633. if (!we_locked) {
  634. referenced++;
  635. goto out;
  636. }
  637. }
  638. if (unlikely(PageKsm(page)))
  639. referenced += page_referenced_ksm(page, mem_cont,
  640. vm_flags);
  641. else if (PageAnon(page))
  642. referenced += page_referenced_anon(page, mem_cont,
  643. vm_flags);
  644. else if (page->mapping)
  645. referenced += page_referenced_file(page, mem_cont,
  646. vm_flags);
  647. if (we_locked)
  648. unlock_page(page);
  649. }
  650. out:
  651. if (page_test_and_clear_young(page_to_pfn(page)))
  652. referenced++;
  653. return referenced;
  654. }
  655. static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
  656. unsigned long address)
  657. {
  658. struct mm_struct *mm = vma->vm_mm;
  659. pte_t *pte;
  660. spinlock_t *ptl;
  661. int ret = 0;
  662. pte = page_check_address(page, mm, address, &ptl, 1);
  663. if (!pte)
  664. goto out;
  665. if (pte_dirty(*pte) || pte_write(*pte)) {
  666. pte_t entry;
  667. flush_cache_page(vma, address, pte_pfn(*pte));
  668. entry = ptep_clear_flush_notify(vma, address, pte);
  669. entry = pte_wrprotect(entry);
  670. entry = pte_mkclean(entry);
  671. set_pte_at(mm, address, pte, entry);
  672. ret = 1;
  673. }
  674. pte_unmap_unlock(pte, ptl);
  675. out:
  676. return ret;
  677. }
  678. static int page_mkclean_file(struct address_space *mapping, struct page *page)
  679. {
  680. pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  681. struct vm_area_struct *vma;
  682. struct prio_tree_iter iter;
  683. int ret = 0;
  684. BUG_ON(PageAnon(page));
  685. mutex_lock(&mapping->i_mmap_mutex);
  686. vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
  687. if (vma->vm_flags & VM_SHARED) {
  688. unsigned long address = vma_address(page, vma);
  689. if (address == -EFAULT)
  690. continue;
  691. ret += page_mkclean_one(page, vma, address);
  692. }
  693. }
  694. mutex_unlock(&mapping->i_mmap_mutex);
  695. return ret;
  696. }
  697. int page_mkclean(struct page *page)
  698. {
  699. int ret = 0;
  700. BUG_ON(!PageLocked(page));
  701. if (page_mapped(page)) {
  702. struct address_space *mapping = page_mapping(page);
  703. if (mapping) {
  704. ret = page_mkclean_file(mapping, page);
  705. if (page_test_and_clear_dirty(page_to_pfn(page), 1))
  706. ret = 1;
  707. }
  708. }
  709. return ret;
  710. }
  711. EXPORT_SYMBOL_GPL(page_mkclean);
  712. /**
  713. * page_move_anon_rmap - move a page to our anon_vma
  714. * @page: the page to move to our anon_vma
  715. * @vma: the vma the page belongs to
  716. * @address: the user virtual address mapped
  717. *
  718. * When a page belongs exclusively to one process after a COW event,
  719. * that page can be moved into the anon_vma that belongs to just that
  720. * process, so the rmap code will not search the parent or sibling
  721. * processes.
  722. */
  723. void page_move_anon_rmap(struct page *page,
  724. struct vm_area_struct *vma, unsigned long address)
  725. {
  726. struct anon_vma *anon_vma = vma->anon_vma;
  727. VM_BUG_ON(!PageLocked(page));
  728. VM_BUG_ON(!anon_vma);
  729. VM_BUG_ON(page->index != linear_page_index(vma, address));
  730. anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
  731. page->mapping = (struct address_space *) anon_vma;
  732. }
  733. /**
  734. * __page_set_anon_rmap - set up new anonymous rmap
  735. * @page: Page to add to rmap
  736. * @vma: VM area to add page to.
  737. * @address: User virtual address of the mapping
  738. * @exclusive: the page is exclusively owned by the current process
  739. */
  740. static void __page_set_anon_rmap(struct page *page,
  741. struct vm_area_struct *vma, unsigned long address, int exclusive)
  742. {
  743. struct anon_vma *anon_vma = vma->anon_vma;
  744. BUG_ON(!anon_vma);
  745. if (PageAnon(page))
  746. return;
  747. /*
  748. * If the page isn't exclusively mapped into this vma,
  749. * we must use the _oldest_ possible anon_vma for the
  750. * page mapping!
  751. */
  752. if (!exclusive)
  753. anon_vma = anon_vma->root;
  754. anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
  755. page->mapping = (struct address_space *) anon_vma;
  756. page->index = linear_page_index(vma, address);
  757. }
  758. /**
  759. * __page_check_anon_rmap - sanity check anonymous rmap addition
  760. * @page: the page to add the mapping to
  761. * @vma: the vm area in which the mapping is added
  762. * @address: the user virtual address mapped
  763. */
  764. static void __page_check_anon_rmap(struct page *page,
  765. struct vm_area_struct *vma, unsigned long address)
  766. {
  767. #ifdef CONFIG_DEBUG_VM
  768. /*
  769. * The page's anon-rmap details (mapping and index) are guaranteed to
  770. * be set up correctly at this point.
  771. *
  772. * We have exclusion against page_add_anon_rmap because the caller
  773. * always holds the page locked, except if called from page_dup_rmap,
  774. * in which case the page is already known to be setup.
  775. *
  776. * We have exclusion against page_add_new_anon_rmap because those pages
  777. * are initially only visible via the pagetables, and the pte is locked
  778. * over the call to page_add_new_anon_rmap.
  779. */
  780. BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root);
  781. BUG_ON(page->index != linear_page_index(vma, address));
  782. #endif
  783. }
  784. /**
  785. * page_add_anon_rmap - add pte mapping to an anonymous page
  786. * @page: the page to add the mapping to
  787. * @vma: the vm area in which the mapping is added
  788. * @address: the user virtual address mapped
  789. *
  790. * The caller needs to hold the pte lock, and the page must be locked in
  791. * the anon_vma case: to serialize mapping,index checking after setting,
  792. * and to ensure that PageAnon is not being upgraded racily to PageKsm
  793. * (but PageKsm is never downgraded to PageAnon).
  794. */
  795. void page_add_anon_rmap(struct page *page,
  796. struct vm_area_struct *vma, unsigned long address)
  797. {
  798. do_page_add_anon_rmap(page, vma, address, 0);
  799. }
  800. /*
  801. * Special version of the above for do_swap_page, which often runs
  802. * into pages that are exclusively owned by the current process.
  803. * Everybody else should continue to use page_add_anon_rmap above.
  804. */
  805. void do_page_add_anon_rmap(struct page *page,
  806. struct vm_area_struct *vma, unsigned long address, int exclusive)
  807. {
  808. int first = atomic_inc_and_test(&page->_mapcount);
  809. if (first) {
  810. if (!PageTransHuge(page))
  811. __inc_zone_page_state(page, NR_ANON_PAGES);
  812. else
  813. __inc_zone_page_state(page,
  814. NR_ANON_TRANSPARENT_HUGEPAGES);
  815. }
  816. if (unlikely(PageKsm(page)))
  817. return;
  818. VM_BUG_ON(!PageLocked(page));
  819. VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
  820. if (first)
  821. __page_set_anon_rmap(page, vma, address, exclusive);
  822. else
  823. __page_check_anon_rmap(page, vma, address);
  824. }
  825. /**
  826. * page_add_new_anon_rmap - add pte mapping to a new anonymous page
  827. * @page: the page to add the mapping to
  828. * @vma: the vm area in which the mapping is added
  829. * @address: the user virtual address mapped
  830. *
  831. * Same as page_add_anon_rmap but must only be called on *new* pages.
  832. * This means the inc-and-test can be bypassed.
  833. * Page does not have to be locked.
  834. */
  835. void page_add_new_anon_rmap(struct page *page,
  836. struct vm_area_struct *vma, unsigned long address)
  837. {
  838. VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
  839. SetPageSwapBacked(page);
  840. atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
  841. if (!PageTransHuge(page))
  842. __inc_zone_page_state(page, NR_ANON_PAGES);
  843. else
  844. __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
  845. __page_set_anon_rmap(page, vma, address, 1);
  846. if (page_evictable(page, vma))
  847. lru_cache_add_lru(page, LRU_ACTIVE_ANON);
  848. else
  849. add_page_to_unevictable_list(page);
  850. }
  851. /**
  852. * page_add_file_rmap - add pte mapping to a file page
  853. * @page: the page to add the mapping to
  854. *
  855. * The caller needs to hold the pte lock.
  856. */
  857. void page_add_file_rmap(struct page *page)
  858. {
  859. if (atomic_inc_and_test(&page->_mapcount)) {
  860. __inc_zone_page_state(page, NR_FILE_MAPPED);
  861. mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_MAPPED);
  862. }
  863. }
  864. /**
  865. * page_remove_rmap - take down pte mapping from a page
  866. * @page: page to remove mapping from
  867. *
  868. * The caller needs to hold the pte lock.
  869. */
  870. void page_remove_rmap(struct page *page)
  871. {
  872. /* page still mapped by someone else? */
  873. if (!atomic_add_negative(-1, &page->_mapcount))
  874. return;
  875. /*
  876. * Now that the last pte has gone, s390 must transfer dirty
  877. * flag from storage key to struct page. We can usually skip
  878. * this if the page is anon, so about to be freed; but perhaps
  879. * not if it's in swapcache - there might be another pte slot
  880. * containing the swap entry, but page not yet written to swap.
  881. */
  882. if ((!PageAnon(page) || PageSwapCache(page)) &&
  883. page_test_and_clear_dirty(page_to_pfn(page), 1))
  884. set_page_dirty(page);
  885. /*
  886. * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
  887. * and not charged by memcg for now.
  888. */
  889. if (unlikely(PageHuge(page)))
  890. return;
  891. if (PageAnon(page)) {
  892. mem_cgroup_uncharge_page(page);
  893. if (!PageTransHuge(page))
  894. __dec_zone_page_state(page, NR_ANON_PAGES);
  895. else
  896. __dec_zone_page_state(page,
  897. NR_ANON_TRANSPARENT_HUGEPAGES);
  898. } else {
  899. __dec_zone_page_state(page, NR_FILE_MAPPED);
  900. mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_MAPPED);
  901. }
  902. /*
  903. * It would be tidy to reset the PageAnon mapping here,
  904. * but that might overwrite a racing page_add_anon_rmap
  905. * which increments mapcount after us but sets mapping
  906. * before us: so leave the reset to free_hot_cold_page,
  907. * and remember that it's only reliable while mapped.
  908. * Leaving it set also helps swapoff to reinstate ptes
  909. * faster for those pages still in swapcache.
  910. */
  911. }
  912. /*
  913. * Subfunctions of try_to_unmap: try_to_unmap_one called
  914. * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
  915. */
  916. int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
  917. unsigned long address, enum ttu_flags flags)
  918. {
  919. struct mm_struct *mm = vma->vm_mm;
  920. pte_t *pte;
  921. pte_t pteval;
  922. spinlock_t *ptl;
  923. int ret = SWAP_AGAIN;
  924. pte = page_check_address(page, mm, address, &ptl, 0);
  925. if (!pte)
  926. goto out;
  927. /*
  928. * If the page is mlock()d, we cannot swap it out.
  929. * If it's recently referenced (perhaps page_referenced
  930. * skipped over this mm) then we should reactivate it.
  931. */
  932. if (!(flags & TTU_IGNORE_MLOCK)) {
  933. if (vma->vm_flags & VM_LOCKED)
  934. goto out_mlock;
  935. if (TTU_ACTION(flags) == TTU_MUNLOCK)
  936. goto out_unmap;
  937. }
  938. if (!(flags & TTU_IGNORE_ACCESS)) {
  939. if (ptep_clear_flush_young_notify(vma, address, pte)) {
  940. ret = SWAP_FAIL;
  941. goto out_unmap;
  942. }
  943. }
  944. /* Nuke the page table entry. */
  945. flush_cache_page(vma, address, page_to_pfn(page));
  946. pteval = ptep_clear_flush_notify(vma, address, pte);
  947. /* Move the dirty bit to the physical page now the pte is gone. */
  948. if (pte_dirty(pteval))
  949. set_page_dirty(page);
  950. /* Update high watermark before we lower rss */
  951. update_hiwater_rss(mm);
  952. if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
  953. if (PageAnon(page))
  954. dec_mm_counter(mm, MM_ANONPAGES);
  955. else
  956. dec_mm_counter(mm, MM_FILEPAGES);
  957. set_pte_at(mm, address, pte,
  958. swp_entry_to_pte(make_hwpoison_entry(page)));
  959. } else if (PageAnon(page)) {
  960. swp_entry_t entry = { .val = page_private(page) };
  961. if (PageSwapCache(page)) {
  962. /*
  963. * Store the swap location in the pte.
  964. * See handle_pte_fault() ...
  965. */
  966. if (swap_duplicate(entry) < 0) {
  967. set_pte_at(mm, address, pte, pteval);
  968. ret = SWAP_FAIL;
  969. goto out_unmap;
  970. }
  971. if (list_empty(&mm->mmlist)) {
  972. spin_lock(&mmlist_lock);
  973. if (list_empty(&mm->mmlist))
  974. list_add(&mm->mmlist, &init_mm.mmlist);
  975. spin_unlock(&mmlist_lock);
  976. }
  977. dec_mm_counter(mm, MM_ANONPAGES);
  978. inc_mm_counter(mm, MM_SWAPENTS);
  979. } else if (PAGE_MIGRATION) {
  980. /*
  981. * Store the pfn of the page in a special migration
  982. * pte. do_swap_page() will wait until the migration
  983. * pte is removed and then restart fault handling.
  984. */
  985. BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION);
  986. entry = make_migration_entry(page, pte_write(pteval));
  987. }
  988. set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
  989. BUG_ON(pte_file(*pte));
  990. } else if (PAGE_MIGRATION && (TTU_ACTION(flags) == TTU_MIGRATION)) {
  991. /* Establish migration entry for a file page */
  992. swp_entry_t entry;
  993. entry = make_migration_entry(page, pte_write(pteval));
  994. set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
  995. } else
  996. dec_mm_counter(mm, MM_FILEPAGES);
  997. page_remove_rmap(page);
  998. page_cache_release(page);
  999. out_unmap:
  1000. pte_unmap_unlock(pte, ptl);
  1001. out:
  1002. return ret;
  1003. out_mlock:
  1004. pte_unmap_unlock(pte, ptl);
  1005. /*
  1006. * We need mmap_sem locking, Otherwise VM_LOCKED check makes
  1007. * unstable result and race. Plus, We can't wait here because
  1008. * we now hold anon_vma->lock or mapping->i_mmap_mutex.
  1009. * if trylock failed, the page remain in evictable lru and later
  1010. * vmscan could retry to move the page to unevictable lru if the
  1011. * page is actually mlocked.
  1012. */
  1013. if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
  1014. if (vma->vm_flags & VM_LOCKED) {
  1015. mlock_vma_page(page);
  1016. ret = SWAP_MLOCK;
  1017. }
  1018. up_read(&vma->vm_mm->mmap_sem);
  1019. }
  1020. return ret;
  1021. }
  1022. /*
  1023. * objrmap doesn't work for nonlinear VMAs because the assumption that
  1024. * offset-into-file correlates with offset-into-virtual-addresses does not hold.
  1025. * Consequently, given a particular page and its ->index, we cannot locate the
  1026. * ptes which are mapping that page without an exhaustive linear search.
  1027. *
  1028. * So what this code does is a mini "virtual scan" of each nonlinear VMA which
  1029. * maps the file to which the target page belongs. The ->vm_private_data field
  1030. * holds the current cursor into that scan. Successive searches will circulate
  1031. * around the vma's virtual address space.
  1032. *
  1033. * So as more replacement pressure is applied to the pages in a nonlinear VMA,
  1034. * more scanning pressure is placed against them as well. Eventually pages
  1035. * will become fully unmapped and are eligible for eviction.
  1036. *
  1037. * For very sparsely populated VMAs this is a little inefficient - chances are
  1038. * there there won't be many ptes located within the scan cluster. In this case
  1039. * maybe we could scan further - to the end of the pte page, perhaps.
  1040. *
  1041. * Mlocked pages: check VM_LOCKED under mmap_sem held for read, if we can
  1042. * acquire it without blocking. If vma locked, mlock the pages in the cluster,
  1043. * rather than unmapping them. If we encounter the "check_page" that vmscan is
  1044. * trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN.
  1045. */
  1046. #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
  1047. #define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
  1048. static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
  1049. struct vm_area_struct *vma, struct page *check_page)
  1050. {
  1051. struct mm_struct *mm = vma->vm_mm;
  1052. pgd_t *pgd;
  1053. pud_t *pud;
  1054. pmd_t *pmd;
  1055. pte_t *pte;
  1056. pte_t pteval;
  1057. spinlock_t *ptl;
  1058. struct page *page;
  1059. unsigned long address;
  1060. unsigned long end;
  1061. int ret = SWAP_AGAIN;
  1062. int locked_vma = 0;
  1063. address = (vma->vm_start + cursor) & CLUSTER_MASK;
  1064. end = address + CLUSTER_SIZE;
  1065. if (address < vma->vm_start)
  1066. address = vma->vm_start;
  1067. if (end > vma->vm_end)
  1068. end = vma->vm_end;
  1069. pgd = pgd_offset(mm, address);
  1070. if (!pgd_present(*pgd))
  1071. return ret;
  1072. pud = pud_offset(pgd, address);
  1073. if (!pud_present(*pud))
  1074. return ret;
  1075. pmd = pmd_offset(pud, address);
  1076. if (!pmd_present(*pmd))
  1077. return ret;
  1078. /*
  1079. * If we can acquire the mmap_sem for read, and vma is VM_LOCKED,
  1080. * keep the sem while scanning the cluster for mlocking pages.
  1081. */
  1082. if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
  1083. locked_vma = (vma->vm_flags & VM_LOCKED);
  1084. if (!locked_vma)
  1085. up_read(&vma->vm_mm->mmap_sem); /* don't need it */
  1086. }
  1087. pte = pte_offset_map_lock(mm, pmd, address, &ptl);
  1088. /* Update high watermark before we lower rss */
  1089. update_hiwater_rss(mm);
  1090. for (; address < end; pte++, address += PAGE_SIZE) {
  1091. if (!pte_present(*pte))
  1092. continue;
  1093. page = vm_normal_page(vma, address, *pte);
  1094. BUG_ON(!page || PageAnon(page));
  1095. if (locked_vma) {
  1096. mlock_vma_page(page); /* no-op if already mlocked */
  1097. if (page == check_page)
  1098. ret = SWAP_MLOCK;
  1099. continue; /* don't unmap */
  1100. }
  1101. if (ptep_clear_flush_young_notify(vma, address, pte))
  1102. continue;
  1103. /* Nuke the page table entry. */
  1104. flush_cache_page(vma, address, pte_pfn(*pte));
  1105. pteval = ptep_clear_flush_notify(vma, address, pte);
  1106. /* If nonlinear, store the file page offset in the pte. */
  1107. if (page->index != linear_page_index(vma, address))
  1108. set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
  1109. /* Move the dirty bit to the physical page now the pte is gone. */
  1110. if (pte_dirty(pteval))
  1111. set_page_dirty(page);
  1112. page_remove_rmap(page);
  1113. page_cache_release(page);
  1114. dec_mm_counter(mm, MM_FILEPAGES);
  1115. (*mapcount)--;
  1116. }
  1117. pte_unmap_unlock(pte - 1, ptl);
  1118. if (locked_vma)
  1119. up_read(&vma->vm_mm->mmap_sem);
  1120. return ret;
  1121. }
  1122. bool is_vma_temporary_stack(struct vm_area_struct *vma)
  1123. {
  1124. int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
  1125. if (!maybe_stack)
  1126. return false;
  1127. if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
  1128. VM_STACK_INCOMPLETE_SETUP)
  1129. return true;
  1130. return false;
  1131. }
  1132. /**
  1133. * try_to_unmap_anon - unmap or unlock anonymous page using the object-based
  1134. * rmap method
  1135. * @page: the page to unmap/unlock
  1136. * @flags: action and flags
  1137. *
  1138. * Find all the mappings of a page using the mapping pointer and the vma chains
  1139. * contained in the anon_vma struct it points to.
  1140. *
  1141. * This function is only called from try_to_unmap/try_to_munlock for
  1142. * anonymous pages.
  1143. * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
  1144. * where the page was found will be held for write. So, we won't recheck
  1145. * vm_flags for that VMA. That should be OK, because that vma shouldn't be
  1146. * 'LOCKED.
  1147. */
  1148. static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
  1149. {
  1150. struct anon_vma *anon_vma;
  1151. struct anon_vma_chain *avc;
  1152. int ret = SWAP_AGAIN;
  1153. anon_vma = page_lock_anon_vma(page);
  1154. if (!anon_vma)
  1155. return ret;
  1156. list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
  1157. struct vm_area_struct *vma = avc->vma;
  1158. unsigned long address;
  1159. /*
  1160. * During exec, a temporary VMA is setup and later moved.
  1161. * The VMA is moved under the anon_vma lock but not the
  1162. * page tables leading to a race where migration cannot
  1163. * find the migration ptes. Rather than increasing the
  1164. * locking requirements of exec(), migration skips
  1165. * temporary VMAs until after exec() completes.
  1166. */
  1167. if (PAGE_MIGRATION && (flags & TTU_MIGRATION) &&
  1168. is_vma_temporary_stack(vma))
  1169. continue;
  1170. address = vma_address(page, vma);
  1171. if (address == -EFAULT)
  1172. continue;
  1173. ret = try_to_unmap_one(page, vma, address, flags);
  1174. if (ret != SWAP_AGAIN || !page_mapped(page))
  1175. break;
  1176. }
  1177. page_unlock_anon_vma(anon_vma);
  1178. return ret;
  1179. }
  1180. /**
  1181. * try_to_unmap_file - unmap/unlock file page using the object-based rmap method
  1182. * @page: the page to unmap/unlock
  1183. * @flags: action and flags
  1184. *
  1185. * Find all the mappings of a page using the mapping pointer and the vma chains
  1186. * contained in the address_space struct it points to.
  1187. *
  1188. * This function is only called from try_to_unmap/try_to_munlock for
  1189. * object-based pages.
  1190. * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
  1191. * where the page was found will be held for write. So, we won't recheck
  1192. * vm_flags for that VMA. That should be OK, because that vma shouldn't be
  1193. * 'LOCKED.
  1194. */
  1195. static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
  1196. {
  1197. struct address_space *mapping = page->mapping;
  1198. pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  1199. struct vm_area_struct *vma;
  1200. struct prio_tree_iter iter;
  1201. int ret = SWAP_AGAIN;
  1202. unsigned long cursor;
  1203. unsigned long max_nl_cursor = 0;
  1204. unsigned long max_nl_size = 0;
  1205. unsigned int mapcount;
  1206. mutex_lock(&mapping->i_mmap_mutex);
  1207. vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
  1208. unsigned long address = vma_address(page, vma);
  1209. if (address == -EFAULT)
  1210. continue;
  1211. ret = try_to_unmap_one(page, vma, address, flags);
  1212. if (ret != SWAP_AGAIN || !page_mapped(page))
  1213. goto out;
  1214. }
  1215. if (list_empty(&mapping->i_mmap_nonlinear))
  1216. goto out;
  1217. /*
  1218. * We don't bother to try to find the munlocked page in nonlinears.
  1219. * It's costly. Instead, later, page reclaim logic may call
  1220. * try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily.
  1221. */
  1222. if (TTU_ACTION(flags) == TTU_MUNLOCK)
  1223. goto out;
  1224. list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
  1225. shared.vm_set.list) {
  1226. cursor = (unsigned long) vma->vm_private_data;
  1227. if (cursor > max_nl_cursor)
  1228. max_nl_cursor = cursor;
  1229. cursor = vma->vm_end - vma->vm_start;
  1230. if (cursor > max_nl_size)
  1231. max_nl_size = cursor;
  1232. }
  1233. if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */
  1234. ret = SWAP_FAIL;
  1235. goto out;
  1236. }
  1237. /*
  1238. * We don't try to search for this page in the nonlinear vmas,
  1239. * and page_referenced wouldn't have found it anyway. Instead
  1240. * just walk the nonlinear vmas trying to age and unmap some.
  1241. * The mapcount of the page we came in with is irrelevant,
  1242. * but even so use it as a guide to how hard we should try?
  1243. */
  1244. mapcount = page_mapcount(page);
  1245. if (!mapcount)
  1246. goto out;
  1247. cond_resched();
  1248. max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
  1249. if (max_nl_cursor == 0)
  1250. max_nl_cursor = CLUSTER_SIZE;
  1251. do {
  1252. list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
  1253. shared.vm_set.list) {
  1254. cursor = (unsigned long) vma->vm_private_data;
  1255. while ( cursor < max_nl_cursor &&
  1256. cursor < vma->vm_end - vma->vm_start) {
  1257. if (try_to_unmap_cluster(cursor, &mapcount,
  1258. vma, page) == SWAP_MLOCK)
  1259. ret = SWAP_MLOCK;
  1260. cursor += CLUSTER_SIZE;
  1261. vma->vm_private_data = (void *) cursor;
  1262. if ((int)mapcount <= 0)
  1263. goto out;
  1264. }
  1265. vma->vm_private_data = (void *) max_nl_cursor;
  1266. }
  1267. cond_resched();
  1268. max_nl_cursor += CLUSTER_SIZE;
  1269. } while (max_nl_cursor <= max_nl_size);
  1270. /*
  1271. * Don't loop forever (perhaps all the remaining pages are
  1272. * in locked vmas). Reset cursor on all unreserved nonlinear
  1273. * vmas, now forgetting on which ones it had fallen behind.
  1274. */
  1275. list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
  1276. vma->vm_private_data = NULL;
  1277. out:
  1278. mutex_unlock(&mapping->i_mmap_mutex);
  1279. return ret;
  1280. }
  1281. /**
  1282. * try_to_unmap - try to remove all page table mappings to a page
  1283. * @page: the page to get unmapped
  1284. * @flags: action and flags
  1285. *
  1286. * Tries to remove all the page table entries which are mapping this
  1287. * page, used in the pageout path. Caller must hold the page lock.
  1288. * Return values are:
  1289. *
  1290. * SWAP_SUCCESS - we succeeded in removing all mappings
  1291. * SWAP_AGAIN - we missed a mapping, try again later
  1292. * SWAP_FAIL - the page is unswappable
  1293. * SWAP_MLOCK - page is mlocked.
  1294. */
  1295. int try_to_unmap(struct page *page, enum ttu_flags flags)
  1296. {
  1297. int ret;
  1298. BUG_ON(!PageLocked(page));
  1299. VM_BUG_ON(!PageHuge(page) && PageTransHuge(page));
  1300. if (unlikely(PageKsm(page)))
  1301. ret = try_to_unmap_ksm(page, flags);
  1302. else if (PageAnon(page))
  1303. ret = try_to_unmap_anon(page, flags);
  1304. else
  1305. ret = try_to_unmap_file(page, flags);
  1306. if (ret != SWAP_MLOCK && !page_mapped(page))
  1307. ret = SWAP_SUCCESS;
  1308. return ret;
  1309. }
  1310. /**
  1311. * try_to_munlock - try to munlock a page
  1312. * @page: the page to be munlocked
  1313. *
  1314. * Called from munlock code. Checks all of the VMAs mapping the page
  1315. * to make sure nobody else has this page mlocked. The page will be
  1316. * returned with PG_mlocked cleared if no other vmas have it mlocked.
  1317. *
  1318. * Return values are:
  1319. *
  1320. * SWAP_AGAIN - no vma is holding page mlocked, or,
  1321. * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem
  1322. * SWAP_FAIL - page cannot be located at present
  1323. * SWAP_MLOCK - page is now mlocked.
  1324. */
  1325. int try_to_munlock(struct page *page)
  1326. {
  1327. VM_BUG_ON(!PageLocked(page) || PageLRU(page));
  1328. if (unlikely(PageKsm(page)))
  1329. return try_to_unmap_ksm(page, TTU_MUNLOCK);
  1330. else if (PageAnon(page))
  1331. return try_to_unmap_anon(page, TTU_MUNLOCK);
  1332. else
  1333. return try_to_unmap_file(page, TTU_MUNLOCK);
  1334. }
  1335. void __put_anon_vma(struct anon_vma *anon_vma)
  1336. {
  1337. struct anon_vma *root = anon_vma->root;
  1338. if (root != anon_vma && atomic_dec_and_test(&root->refcount))
  1339. anon_vma_free(root);
  1340. anon_vma_free(anon_vma);
  1341. }
  1342. #ifdef CONFIG_MIGRATION
  1343. /*
  1344. * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file():
  1345. * Called by migrate.c to remove migration ptes, but might be used more later.
  1346. */
  1347. static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
  1348. struct vm_area_struct *, unsigned long, void *), void *arg)
  1349. {
  1350. struct anon_vma *anon_vma;
  1351. struct anon_vma_chain *avc;
  1352. int ret = SWAP_AGAIN;
  1353. /*
  1354. * Note: remove_migration_ptes() cannot use page_lock_anon_vma()
  1355. * because that depends on page_mapped(); but not all its usages
  1356. * are holding mmap_sem. Users without mmap_sem are required to
  1357. * take a reference count to prevent the anon_vma disappearing
  1358. */
  1359. anon_vma = page_anon_vma(page);
  1360. if (!anon_vma)
  1361. return ret;
  1362. anon_vma_lock(anon_vma);
  1363. list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
  1364. struct vm_area_struct *vma = avc->vma;
  1365. unsigned long address = vma_address(page, vma);
  1366. if (address == -EFAULT)
  1367. continue;
  1368. ret = rmap_one(page, vma, address, arg);
  1369. if (ret != SWAP_AGAIN)
  1370. break;
  1371. }
  1372. anon_vma_unlock(anon_vma);
  1373. return ret;
  1374. }
  1375. static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
  1376. struct vm_area_struct *, unsigned long, void *), void *arg)
  1377. {
  1378. struct address_space *mapping = page->mapping;
  1379. pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  1380. struct vm_area_struct *vma;
  1381. struct prio_tree_iter iter;
  1382. int ret = SWAP_AGAIN;
  1383. if (!mapping)
  1384. return ret;
  1385. mutex_lock(&mapping->i_mmap_mutex);
  1386. vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
  1387. unsigned long address = vma_address(page, vma);
  1388. if (address == -EFAULT)
  1389. continue;
  1390. ret = rmap_one(page, vma, address, arg);
  1391. if (ret != SWAP_AGAIN)
  1392. break;
  1393. }
  1394. /*
  1395. * No nonlinear handling: being always shared, nonlinear vmas
  1396. * never contain migration ptes. Decide what to do about this
  1397. * limitation to linear when we need rmap_walk() on nonlinear.
  1398. */
  1399. mutex_unlock(&mapping->i_mmap_mutex);
  1400. return ret;
  1401. }
  1402. int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
  1403. struct vm_area_struct *, unsigned long, void *), void *arg)
  1404. {
  1405. VM_BUG_ON(!PageLocked(page));
  1406. if (unlikely(PageKsm(page)))
  1407. return rmap_walk_ksm(page, rmap_one, arg);
  1408. else if (PageAnon(page))
  1409. return rmap_walk_anon(page, rmap_one, arg);
  1410. else
  1411. return rmap_walk_file(page, rmap_one, arg);
  1412. }
  1413. #endif /* CONFIG_MIGRATION */
  1414. #ifdef CONFIG_HUGETLB_PAGE
  1415. /*
  1416. * The following three functions are for anonymous (private mapped) hugepages.
  1417. * Unlike common anonymous pages, anonymous hugepages have no accounting code
  1418. * and no lru code, because we handle hugepages differently from common pages.
  1419. */
  1420. static void __hugepage_set_anon_rmap(struct page *page,
  1421. struct vm_area_struct *vma, unsigned long address, int exclusive)
  1422. {
  1423. struct anon_vma *anon_vma = vma->anon_vma;
  1424. BUG_ON(!anon_vma);
  1425. if (PageAnon(page))
  1426. return;
  1427. if (!exclusive)
  1428. anon_vma = anon_vma->root;
  1429. anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
  1430. page->mapping = (struct address_space *) anon_vma;
  1431. page->index = linear_page_index(vma, address);
  1432. }
  1433. void hugepage_add_anon_rmap(struct page *page,
  1434. struct vm_area_struct *vma, unsigned long address)
  1435. {
  1436. struct anon_vma *anon_vma = vma->anon_vma;
  1437. int first;
  1438. BUG_ON(!PageLocked(page));
  1439. BUG_ON(!anon_vma);
  1440. BUG_ON(address < vma->vm_start || address >= vma->vm_end);
  1441. first = atomic_inc_and_test(&page->_mapcount);
  1442. if (first)
  1443. __hugepage_set_anon_rmap(page, vma, address, 0);
  1444. }
  1445. void hugepage_add_new_anon_rmap(struct page *page,
  1446. struct vm_area_struct *vma, unsigned long address)
  1447. {
  1448. BUG_ON(address < vma->vm_start || address >= vma->vm_end);
  1449. atomic_set(&page->_mapcount, 0);
  1450. __hugepage_set_anon_rmap(page, vma, address, 1);
  1451. }
  1452. #endif /* CONFIG_HUGETLB_PAGE */