vmscan.c 70 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583
  1. /*
  2. * linux/mm/vmscan.c
  3. *
  4. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  5. *
  6. * Swap reorganised 29.12.95, Stephen Tweedie.
  7. * kswapd added: 7.1.96 sct
  8. * Removed kswapd_ctl limits, and swap out as many pages as needed
  9. * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
  10. * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
  11. * Multiqueue VM started 5.8.00, Rik van Riel.
  12. */
  13. #include <linux/mm.h>
  14. #include <linux/module.h>
  15. #include <linux/slab.h>
  16. #include <linux/kernel_stat.h>
  17. #include <linux/swap.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/init.h>
  20. #include <linux/highmem.h>
  21. #include <linux/vmstat.h>
  22. #include <linux/file.h>
  23. #include <linux/writeback.h>
  24. #include <linux/blkdev.h>
  25. #include <linux/buffer_head.h> /* for try_to_release_page(),
  26. buffer_heads_over_limit */
  27. #include <linux/mm_inline.h>
  28. #include <linux/pagevec.h>
  29. #include <linux/backing-dev.h>
  30. #include <linux/rmap.h>
  31. #include <linux/topology.h>
  32. #include <linux/cpu.h>
  33. #include <linux/cpuset.h>
  34. #include <linux/notifier.h>
  35. #include <linux/rwsem.h>
  36. #include <linux/delay.h>
  37. #include <linux/kthread.h>
  38. #include <linux/freezer.h>
  39. #include <linux/memcontrol.h>
  40. #include <linux/delayacct.h>
  41. #include <linux/sysctl.h>
  42. #include <asm/tlbflush.h>
  43. #include <asm/div64.h>
  44. #include <linux/swapops.h>
  45. #include "internal.h"
  46. struct scan_control {
  47. /* Incremented by the number of inactive pages that were scanned */
  48. unsigned long nr_scanned;
  49. /* This context's GFP mask */
  50. gfp_t gfp_mask;
  51. int may_writepage;
  52. /* Can pages be swapped as part of reclaim? */
  53. int may_swap;
  54. /* This context's SWAP_CLUSTER_MAX. If freeing memory for
  55. * suspend, we effectively ignore SWAP_CLUSTER_MAX.
  56. * In this context, it doesn't matter that we scan the
  57. * whole list at once. */
  58. int swap_cluster_max;
  59. int swappiness;
  60. int all_unreclaimable;
  61. int order;
  62. /* Which cgroup do we reclaim from */
  63. struct mem_cgroup *mem_cgroup;
  64. /* Pluggable isolate pages callback */
  65. unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
  66. unsigned long *scanned, int order, int mode,
  67. struct zone *z, struct mem_cgroup *mem_cont,
  68. int active, int file);
  69. };
  70. #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
  71. #ifdef ARCH_HAS_PREFETCH
  72. #define prefetch_prev_lru_page(_page, _base, _field) \
  73. do { \
  74. if ((_page)->lru.prev != _base) { \
  75. struct page *prev; \
  76. \
  77. prev = lru_to_page(&(_page->lru)); \
  78. prefetch(&prev->_field); \
  79. } \
  80. } while (0)
  81. #else
  82. #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
  83. #endif
  84. #ifdef ARCH_HAS_PREFETCHW
  85. #define prefetchw_prev_lru_page(_page, _base, _field) \
  86. do { \
  87. if ((_page)->lru.prev != _base) { \
  88. struct page *prev; \
  89. \
  90. prev = lru_to_page(&(_page->lru)); \
  91. prefetchw(&prev->_field); \
  92. } \
  93. } while (0)
  94. #else
  95. #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
  96. #endif
  97. /*
  98. * From 0 .. 100. Higher means more swappy.
  99. */
  100. int vm_swappiness = 60;
  101. long vm_total_pages; /* The total number of pages which the VM controls */
  102. static LIST_HEAD(shrinker_list);
  103. static DECLARE_RWSEM(shrinker_rwsem);
  104. #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  105. #define scan_global_lru(sc) (!(sc)->mem_cgroup)
  106. #else
  107. #define scan_global_lru(sc) (1)
  108. #endif
  109. /*
  110. * Add a shrinker callback to be called from the vm
  111. */
  112. void register_shrinker(struct shrinker *shrinker)
  113. {
  114. shrinker->nr = 0;
  115. down_write(&shrinker_rwsem);
  116. list_add_tail(&shrinker->list, &shrinker_list);
  117. up_write(&shrinker_rwsem);
  118. }
  119. EXPORT_SYMBOL(register_shrinker);
  120. /*
  121. * Remove one
  122. */
  123. void unregister_shrinker(struct shrinker *shrinker)
  124. {
  125. down_write(&shrinker_rwsem);
  126. list_del(&shrinker->list);
  127. up_write(&shrinker_rwsem);
  128. }
  129. EXPORT_SYMBOL(unregister_shrinker);
  130. #define SHRINK_BATCH 128
  131. /*
  132. * Call the shrink functions to age shrinkable caches
  133. *
  134. * Here we assume it costs one seek to replace a lru page and that it also
  135. * takes a seek to recreate a cache object. With this in mind we age equal
  136. * percentages of the lru and ageable caches. This should balance the seeks
  137. * generated by these structures.
  138. *
  139. * If the vm encountered mapped pages on the LRU it increase the pressure on
  140. * slab to avoid swapping.
  141. *
  142. * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
  143. *
  144. * `lru_pages' represents the number of on-LRU pages in all the zones which
  145. * are eligible for the caller's allocation attempt. It is used for balancing
  146. * slab reclaim versus page reclaim.
  147. *
  148. * Returns the number of slab objects which we shrunk.
  149. */
  150. unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
  151. unsigned long lru_pages)
  152. {
  153. struct shrinker *shrinker;
  154. unsigned long ret = 0;
  155. if (scanned == 0)
  156. scanned = SWAP_CLUSTER_MAX;
  157. if (!down_read_trylock(&shrinker_rwsem))
  158. return 1; /* Assume we'll be able to shrink next time */
  159. list_for_each_entry(shrinker, &shrinker_list, list) {
  160. unsigned long long delta;
  161. unsigned long total_scan;
  162. unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
  163. delta = (4 * scanned) / shrinker->seeks;
  164. delta *= max_pass;
  165. do_div(delta, lru_pages + 1);
  166. shrinker->nr += delta;
  167. if (shrinker->nr < 0) {
  168. printk(KERN_ERR "%s: nr=%ld\n",
  169. __func__, shrinker->nr);
  170. shrinker->nr = max_pass;
  171. }
  172. /*
  173. * Avoid risking looping forever due to too large nr value:
  174. * never try to free more than twice the estimate number of
  175. * freeable entries.
  176. */
  177. if (shrinker->nr > max_pass * 2)
  178. shrinker->nr = max_pass * 2;
  179. total_scan = shrinker->nr;
  180. shrinker->nr = 0;
  181. while (total_scan >= SHRINK_BATCH) {
  182. long this_scan = SHRINK_BATCH;
  183. int shrink_ret;
  184. int nr_before;
  185. nr_before = (*shrinker->shrink)(0, gfp_mask);
  186. shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
  187. if (shrink_ret == -1)
  188. break;
  189. if (shrink_ret < nr_before)
  190. ret += nr_before - shrink_ret;
  191. count_vm_events(SLABS_SCANNED, this_scan);
  192. total_scan -= this_scan;
  193. cond_resched();
  194. }
  195. shrinker->nr += total_scan;
  196. }
  197. up_read(&shrinker_rwsem);
  198. return ret;
  199. }
  200. /* Called without lock on whether page is mapped, so answer is unstable */
  201. static inline int page_mapping_inuse(struct page *page)
  202. {
  203. struct address_space *mapping;
  204. /* Page is in somebody's page tables. */
  205. if (page_mapped(page))
  206. return 1;
  207. /* Be more reluctant to reclaim swapcache than pagecache */
  208. if (PageSwapCache(page))
  209. return 1;
  210. mapping = page_mapping(page);
  211. if (!mapping)
  212. return 0;
  213. /* File is mmap'd by somebody? */
  214. return mapping_mapped(mapping);
  215. }
  216. static inline int is_page_cache_freeable(struct page *page)
  217. {
  218. return page_count(page) - !!PagePrivate(page) == 2;
  219. }
  220. static int may_write_to_queue(struct backing_dev_info *bdi)
  221. {
  222. if (current->flags & PF_SWAPWRITE)
  223. return 1;
  224. if (!bdi_write_congested(bdi))
  225. return 1;
  226. if (bdi == current->backing_dev_info)
  227. return 1;
  228. return 0;
  229. }
  230. /*
  231. * We detected a synchronous write error writing a page out. Probably
  232. * -ENOSPC. We need to propagate that into the address_space for a subsequent
  233. * fsync(), msync() or close().
  234. *
  235. * The tricky part is that after writepage we cannot touch the mapping: nothing
  236. * prevents it from being freed up. But we have a ref on the page and once
  237. * that page is locked, the mapping is pinned.
  238. *
  239. * We're allowed to run sleeping lock_page() here because we know the caller has
  240. * __GFP_FS.
  241. */
  242. static void handle_write_error(struct address_space *mapping,
  243. struct page *page, int error)
  244. {
  245. lock_page(page);
  246. if (page_mapping(page) == mapping)
  247. mapping_set_error(mapping, error);
  248. unlock_page(page);
  249. }
  250. /* Request for sync pageout. */
  251. enum pageout_io {
  252. PAGEOUT_IO_ASYNC,
  253. PAGEOUT_IO_SYNC,
  254. };
  255. /* possible outcome of pageout() */
  256. typedef enum {
  257. /* failed to write page out, page is locked */
  258. PAGE_KEEP,
  259. /* move page to the active list, page is locked */
  260. PAGE_ACTIVATE,
  261. /* page has been sent to the disk successfully, page is unlocked */
  262. PAGE_SUCCESS,
  263. /* page is clean and locked */
  264. PAGE_CLEAN,
  265. } pageout_t;
  266. /*
  267. * pageout is called by shrink_page_list() for each dirty page.
  268. * Calls ->writepage().
  269. */
  270. static pageout_t pageout(struct page *page, struct address_space *mapping,
  271. enum pageout_io sync_writeback)
  272. {
  273. /*
  274. * If the page is dirty, only perform writeback if that write
  275. * will be non-blocking. To prevent this allocation from being
  276. * stalled by pagecache activity. But note that there may be
  277. * stalls if we need to run get_block(). We could test
  278. * PagePrivate for that.
  279. *
  280. * If this process is currently in generic_file_write() against
  281. * this page's queue, we can perform writeback even if that
  282. * will block.
  283. *
  284. * If the page is swapcache, write it back even if that would
  285. * block, for some throttling. This happens by accident, because
  286. * swap_backing_dev_info is bust: it doesn't reflect the
  287. * congestion state of the swapdevs. Easy to fix, if needed.
  288. * See swapfile.c:page_queue_congested().
  289. */
  290. if (!is_page_cache_freeable(page))
  291. return PAGE_KEEP;
  292. if (!mapping) {
  293. /*
  294. * Some data journaling orphaned pages can have
  295. * page->mapping == NULL while being dirty with clean buffers.
  296. */
  297. if (PagePrivate(page)) {
  298. if (try_to_free_buffers(page)) {
  299. ClearPageDirty(page);
  300. printk("%s: orphaned page\n", __func__);
  301. return PAGE_CLEAN;
  302. }
  303. }
  304. return PAGE_KEEP;
  305. }
  306. if (mapping->a_ops->writepage == NULL)
  307. return PAGE_ACTIVATE;
  308. if (!may_write_to_queue(mapping->backing_dev_info))
  309. return PAGE_KEEP;
  310. if (clear_page_dirty_for_io(page)) {
  311. int res;
  312. struct writeback_control wbc = {
  313. .sync_mode = WB_SYNC_NONE,
  314. .nr_to_write = SWAP_CLUSTER_MAX,
  315. .range_start = 0,
  316. .range_end = LLONG_MAX,
  317. .nonblocking = 1,
  318. .for_reclaim = 1,
  319. };
  320. SetPageReclaim(page);
  321. res = mapping->a_ops->writepage(page, &wbc);
  322. if (res < 0)
  323. handle_write_error(mapping, page, res);
  324. if (res == AOP_WRITEPAGE_ACTIVATE) {
  325. ClearPageReclaim(page);
  326. return PAGE_ACTIVATE;
  327. }
  328. /*
  329. * Wait on writeback if requested to. This happens when
  330. * direct reclaiming a large contiguous area and the
  331. * first attempt to free a range of pages fails.
  332. */
  333. if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
  334. wait_on_page_writeback(page);
  335. if (!PageWriteback(page)) {
  336. /* synchronous write or broken a_ops? */
  337. ClearPageReclaim(page);
  338. }
  339. inc_zone_page_state(page, NR_VMSCAN_WRITE);
  340. return PAGE_SUCCESS;
  341. }
  342. return PAGE_CLEAN;
  343. }
  344. /*
  345. * Same as remove_mapping, but if the page is removed from the mapping, it
  346. * gets returned with a refcount of 0.
  347. */
  348. static int __remove_mapping(struct address_space *mapping, struct page *page)
  349. {
  350. BUG_ON(!PageLocked(page));
  351. BUG_ON(mapping != page_mapping(page));
  352. spin_lock_irq(&mapping->tree_lock);
  353. /*
  354. * The non racy check for a busy page.
  355. *
  356. * Must be careful with the order of the tests. When someone has
  357. * a ref to the page, it may be possible that they dirty it then
  358. * drop the reference. So if PageDirty is tested before page_count
  359. * here, then the following race may occur:
  360. *
  361. * get_user_pages(&page);
  362. * [user mapping goes away]
  363. * write_to(page);
  364. * !PageDirty(page) [good]
  365. * SetPageDirty(page);
  366. * put_page(page);
  367. * !page_count(page) [good, discard it]
  368. *
  369. * [oops, our write_to data is lost]
  370. *
  371. * Reversing the order of the tests ensures such a situation cannot
  372. * escape unnoticed. The smp_rmb is needed to ensure the page->flags
  373. * load is not satisfied before that of page->_count.
  374. *
  375. * Note that if SetPageDirty is always performed via set_page_dirty,
  376. * and thus under tree_lock, then this ordering is not required.
  377. */
  378. if (!page_freeze_refs(page, 2))
  379. goto cannot_free;
  380. /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
  381. if (unlikely(PageDirty(page))) {
  382. page_unfreeze_refs(page, 2);
  383. goto cannot_free;
  384. }
  385. if (PageSwapCache(page)) {
  386. swp_entry_t swap = { .val = page_private(page) };
  387. __delete_from_swap_cache(page);
  388. spin_unlock_irq(&mapping->tree_lock);
  389. swap_free(swap);
  390. } else {
  391. __remove_from_page_cache(page);
  392. spin_unlock_irq(&mapping->tree_lock);
  393. }
  394. return 1;
  395. cannot_free:
  396. spin_unlock_irq(&mapping->tree_lock);
  397. return 0;
  398. }
  399. /*
  400. * Attempt to detach a locked page from its ->mapping. If it is dirty or if
  401. * someone else has a ref on the page, abort and return 0. If it was
  402. * successfully detached, return 1. Assumes the caller has a single ref on
  403. * this page.
  404. */
  405. int remove_mapping(struct address_space *mapping, struct page *page)
  406. {
  407. if (__remove_mapping(mapping, page)) {
  408. /*
  409. * Unfreezing the refcount with 1 rather than 2 effectively
  410. * drops the pagecache ref for us without requiring another
  411. * atomic operation.
  412. */
  413. page_unfreeze_refs(page, 1);
  414. return 1;
  415. }
  416. return 0;
  417. }
  418. /**
  419. * putback_lru_page - put previously isolated page onto appropriate LRU list
  420. * @page: page to be put back to appropriate lru list
  421. *
  422. * Add previously isolated @page to appropriate LRU list.
  423. * Page may still be unevictable for other reasons.
  424. *
  425. * lru_lock must not be held, interrupts must be enabled.
  426. */
  427. #ifdef CONFIG_UNEVICTABLE_LRU
  428. void putback_lru_page(struct page *page)
  429. {
  430. int lru;
  431. int active = !!TestClearPageActive(page);
  432. int was_unevictable = PageUnevictable(page);
  433. VM_BUG_ON(PageLRU(page));
  434. redo:
  435. ClearPageUnevictable(page);
  436. if (page_evictable(page, NULL)) {
  437. /*
  438. * For evictable pages, we can use the cache.
  439. * In event of a race, worst case is we end up with an
  440. * unevictable page on [in]active list.
  441. * We know how to handle that.
  442. */
  443. lru = active + page_is_file_cache(page);
  444. lru_cache_add_lru(page, lru);
  445. } else {
  446. /*
  447. * Put unevictable pages directly on zone's unevictable
  448. * list.
  449. */
  450. lru = LRU_UNEVICTABLE;
  451. add_page_to_unevictable_list(page);
  452. }
  453. mem_cgroup_move_lists(page, lru);
  454. /*
  455. * page's status can change while we move it among lru. If an evictable
  456. * page is on unevictable list, it never be freed. To avoid that,
  457. * check after we added it to the list, again.
  458. */
  459. if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) {
  460. if (!isolate_lru_page(page)) {
  461. put_page(page);
  462. goto redo;
  463. }
  464. /* This means someone else dropped this page from LRU
  465. * So, it will be freed or putback to LRU again. There is
  466. * nothing to do here.
  467. */
  468. }
  469. if (was_unevictable && lru != LRU_UNEVICTABLE)
  470. count_vm_event(UNEVICTABLE_PGRESCUED);
  471. else if (!was_unevictable && lru == LRU_UNEVICTABLE)
  472. count_vm_event(UNEVICTABLE_PGCULLED);
  473. put_page(page); /* drop ref from isolate */
  474. }
  475. #else /* CONFIG_UNEVICTABLE_LRU */
  476. void putback_lru_page(struct page *page)
  477. {
  478. int lru;
  479. VM_BUG_ON(PageLRU(page));
  480. lru = !!TestClearPageActive(page) + page_is_file_cache(page);
  481. lru_cache_add_lru(page, lru);
  482. mem_cgroup_move_lists(page, lru);
  483. put_page(page);
  484. }
  485. #endif /* CONFIG_UNEVICTABLE_LRU */
  486. /*
  487. * shrink_page_list() returns the number of reclaimed pages
  488. */
  489. static unsigned long shrink_page_list(struct list_head *page_list,
  490. struct scan_control *sc,
  491. enum pageout_io sync_writeback)
  492. {
  493. LIST_HEAD(ret_pages);
  494. struct pagevec freed_pvec;
  495. int pgactivate = 0;
  496. unsigned long nr_reclaimed = 0;
  497. cond_resched();
  498. pagevec_init(&freed_pvec, 1);
  499. while (!list_empty(page_list)) {
  500. struct address_space *mapping;
  501. struct page *page;
  502. int may_enter_fs;
  503. int referenced;
  504. cond_resched();
  505. page = lru_to_page(page_list);
  506. list_del(&page->lru);
  507. if (!trylock_page(page))
  508. goto keep;
  509. VM_BUG_ON(PageActive(page));
  510. sc->nr_scanned++;
  511. if (unlikely(!page_evictable(page, NULL)))
  512. goto cull_mlocked;
  513. if (!sc->may_swap && page_mapped(page))
  514. goto keep_locked;
  515. /* Double the slab pressure for mapped and swapcache pages */
  516. if (page_mapped(page) || PageSwapCache(page))
  517. sc->nr_scanned++;
  518. may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
  519. (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
  520. if (PageWriteback(page)) {
  521. /*
  522. * Synchronous reclaim is performed in two passes,
  523. * first an asynchronous pass over the list to
  524. * start parallel writeback, and a second synchronous
  525. * pass to wait for the IO to complete. Wait here
  526. * for any page for which writeback has already
  527. * started.
  528. */
  529. if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
  530. wait_on_page_writeback(page);
  531. else
  532. goto keep_locked;
  533. }
  534. referenced = page_referenced(page, 1, sc->mem_cgroup);
  535. /* In active use or really unfreeable? Activate it. */
  536. if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
  537. referenced && page_mapping_inuse(page))
  538. goto activate_locked;
  539. /*
  540. * Anonymous process memory has backing store?
  541. * Try to allocate it some swap space here.
  542. */
  543. if (PageAnon(page) && !PageSwapCache(page)) {
  544. if (!(sc->gfp_mask & __GFP_IO))
  545. goto keep_locked;
  546. if (!add_to_swap(page))
  547. goto activate_locked;
  548. may_enter_fs = 1;
  549. }
  550. mapping = page_mapping(page);
  551. /*
  552. * The page is mapped into the page tables of one or more
  553. * processes. Try to unmap it here.
  554. */
  555. if (page_mapped(page) && mapping) {
  556. switch (try_to_unmap(page, 0)) {
  557. case SWAP_FAIL:
  558. goto activate_locked;
  559. case SWAP_AGAIN:
  560. goto keep_locked;
  561. case SWAP_MLOCK:
  562. goto cull_mlocked;
  563. case SWAP_SUCCESS:
  564. ; /* try to free the page below */
  565. }
  566. }
  567. if (PageDirty(page)) {
  568. if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
  569. goto keep_locked;
  570. if (!may_enter_fs)
  571. goto keep_locked;
  572. if (!sc->may_writepage)
  573. goto keep_locked;
  574. /* Page is dirty, try to write it out here */
  575. switch (pageout(page, mapping, sync_writeback)) {
  576. case PAGE_KEEP:
  577. goto keep_locked;
  578. case PAGE_ACTIVATE:
  579. goto activate_locked;
  580. case PAGE_SUCCESS:
  581. if (PageWriteback(page) || PageDirty(page))
  582. goto keep;
  583. /*
  584. * A synchronous write - probably a ramdisk. Go
  585. * ahead and try to reclaim the page.
  586. */
  587. if (!trylock_page(page))
  588. goto keep;
  589. if (PageDirty(page) || PageWriteback(page))
  590. goto keep_locked;
  591. mapping = page_mapping(page);
  592. case PAGE_CLEAN:
  593. ; /* try to free the page below */
  594. }
  595. }
  596. /*
  597. * If the page has buffers, try to free the buffer mappings
  598. * associated with this page. If we succeed we try to free
  599. * the page as well.
  600. *
  601. * We do this even if the page is PageDirty().
  602. * try_to_release_page() does not perform I/O, but it is
  603. * possible for a page to have PageDirty set, but it is actually
  604. * clean (all its buffers are clean). This happens if the
  605. * buffers were written out directly, with submit_bh(). ext3
  606. * will do this, as well as the blockdev mapping.
  607. * try_to_release_page() will discover that cleanness and will
  608. * drop the buffers and mark the page clean - it can be freed.
  609. *
  610. * Rarely, pages can have buffers and no ->mapping. These are
  611. * the pages which were not successfully invalidated in
  612. * truncate_complete_page(). We try to drop those buffers here
  613. * and if that worked, and the page is no longer mapped into
  614. * process address space (page_count == 1) it can be freed.
  615. * Otherwise, leave the page on the LRU so it is swappable.
  616. */
  617. if (PagePrivate(page)) {
  618. if (!try_to_release_page(page, sc->gfp_mask))
  619. goto activate_locked;
  620. if (!mapping && page_count(page) == 1) {
  621. unlock_page(page);
  622. if (put_page_testzero(page))
  623. goto free_it;
  624. else {
  625. /*
  626. * rare race with speculative reference.
  627. * the speculative reference will free
  628. * this page shortly, so we may
  629. * increment nr_reclaimed here (and
  630. * leave it off the LRU).
  631. */
  632. nr_reclaimed++;
  633. continue;
  634. }
  635. }
  636. }
  637. if (!mapping || !__remove_mapping(mapping, page))
  638. goto keep_locked;
  639. /*
  640. * At this point, we have no other references and there is
  641. * no way to pick any more up (removed from LRU, removed
  642. * from pagecache). Can use non-atomic bitops now (and
  643. * we obviously don't have to worry about waking up a process
  644. * waiting on the page lock, because there are no references.
  645. */
  646. __clear_page_locked(page);
  647. free_it:
  648. nr_reclaimed++;
  649. if (!pagevec_add(&freed_pvec, page)) {
  650. __pagevec_free(&freed_pvec);
  651. pagevec_reinit(&freed_pvec);
  652. }
  653. continue;
  654. cull_mlocked:
  655. if (PageSwapCache(page))
  656. try_to_free_swap(page);
  657. unlock_page(page);
  658. putback_lru_page(page);
  659. continue;
  660. activate_locked:
  661. /* Not a candidate for swapping, so reclaim swap space. */
  662. if (PageSwapCache(page) && vm_swap_full())
  663. try_to_free_swap(page);
  664. VM_BUG_ON(PageActive(page));
  665. SetPageActive(page);
  666. pgactivate++;
  667. keep_locked:
  668. unlock_page(page);
  669. keep:
  670. list_add(&page->lru, &ret_pages);
  671. VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
  672. }
  673. list_splice(&ret_pages, page_list);
  674. if (pagevec_count(&freed_pvec))
  675. __pagevec_free(&freed_pvec);
  676. count_vm_events(PGACTIVATE, pgactivate);
  677. return nr_reclaimed;
  678. }
  679. /* LRU Isolation modes. */
  680. #define ISOLATE_INACTIVE 0 /* Isolate inactive pages. */
  681. #define ISOLATE_ACTIVE 1 /* Isolate active pages. */
  682. #define ISOLATE_BOTH 2 /* Isolate both active and inactive pages. */
  683. /*
  684. * Attempt to remove the specified page from its LRU. Only take this page
  685. * if it is of the appropriate PageActive status. Pages which are being
  686. * freed elsewhere are also ignored.
  687. *
  688. * page: page to consider
  689. * mode: one of the LRU isolation modes defined above
  690. *
  691. * returns 0 on success, -ve errno on failure.
  692. */
  693. int __isolate_lru_page(struct page *page, int mode, int file)
  694. {
  695. int ret = -EINVAL;
  696. /* Only take pages on the LRU. */
  697. if (!PageLRU(page))
  698. return ret;
  699. /*
  700. * When checking the active state, we need to be sure we are
  701. * dealing with comparible boolean values. Take the logical not
  702. * of each.
  703. */
  704. if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
  705. return ret;
  706. if (mode != ISOLATE_BOTH && (!page_is_file_cache(page) != !file))
  707. return ret;
  708. /*
  709. * When this function is being called for lumpy reclaim, we
  710. * initially look into all LRU pages, active, inactive and
  711. * unevictable; only give shrink_page_list evictable pages.
  712. */
  713. if (PageUnevictable(page))
  714. return ret;
  715. ret = -EBUSY;
  716. if (likely(get_page_unless_zero(page))) {
  717. /*
  718. * Be careful not to clear PageLRU until after we're
  719. * sure the page is not being freed elsewhere -- the
  720. * page release code relies on it.
  721. */
  722. ClearPageLRU(page);
  723. ret = 0;
  724. }
  725. return ret;
  726. }
  727. /*
  728. * zone->lru_lock is heavily contended. Some of the functions that
  729. * shrink the lists perform better by taking out a batch of pages
  730. * and working on them outside the LRU lock.
  731. *
  732. * For pagecache intensive workloads, this function is the hottest
  733. * spot in the kernel (apart from copy_*_user functions).
  734. *
  735. * Appropriate locks must be held before calling this function.
  736. *
  737. * @nr_to_scan: The number of pages to look through on the list.
  738. * @src: The LRU list to pull pages off.
  739. * @dst: The temp list to put pages on to.
  740. * @scanned: The number of pages that were scanned.
  741. * @order: The caller's attempted allocation order
  742. * @mode: One of the LRU isolation modes
  743. * @file: True [1] if isolating file [!anon] pages
  744. *
  745. * returns how many pages were moved onto *@dst.
  746. */
  747. static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
  748. struct list_head *src, struct list_head *dst,
  749. unsigned long *scanned, int order, int mode, int file)
  750. {
  751. unsigned long nr_taken = 0;
  752. unsigned long scan;
  753. for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
  754. struct page *page;
  755. unsigned long pfn;
  756. unsigned long end_pfn;
  757. unsigned long page_pfn;
  758. int zone_id;
  759. page = lru_to_page(src);
  760. prefetchw_prev_lru_page(page, src, flags);
  761. VM_BUG_ON(!PageLRU(page));
  762. switch (__isolate_lru_page(page, mode, file)) {
  763. case 0:
  764. list_move(&page->lru, dst);
  765. nr_taken++;
  766. break;
  767. case -EBUSY:
  768. /* else it is being freed elsewhere */
  769. list_move(&page->lru, src);
  770. continue;
  771. default:
  772. BUG();
  773. }
  774. if (!order)
  775. continue;
  776. /*
  777. * Attempt to take all pages in the order aligned region
  778. * surrounding the tag page. Only take those pages of
  779. * the same active state as that tag page. We may safely
  780. * round the target page pfn down to the requested order
  781. * as the mem_map is guarenteed valid out to MAX_ORDER,
  782. * where that page is in a different zone we will detect
  783. * it from its zone id and abort this block scan.
  784. */
  785. zone_id = page_zone_id(page);
  786. page_pfn = page_to_pfn(page);
  787. pfn = page_pfn & ~((1 << order) - 1);
  788. end_pfn = pfn + (1 << order);
  789. for (; pfn < end_pfn; pfn++) {
  790. struct page *cursor_page;
  791. /* The target page is in the block, ignore it. */
  792. if (unlikely(pfn == page_pfn))
  793. continue;
  794. /* Avoid holes within the zone. */
  795. if (unlikely(!pfn_valid_within(pfn)))
  796. break;
  797. cursor_page = pfn_to_page(pfn);
  798. /* Check that we have not crossed a zone boundary. */
  799. if (unlikely(page_zone_id(cursor_page) != zone_id))
  800. continue;
  801. switch (__isolate_lru_page(cursor_page, mode, file)) {
  802. case 0:
  803. list_move(&cursor_page->lru, dst);
  804. nr_taken++;
  805. scan++;
  806. break;
  807. case -EBUSY:
  808. /* else it is being freed elsewhere */
  809. list_move(&cursor_page->lru, src);
  810. default:
  811. break; /* ! on LRU or wrong list */
  812. }
  813. }
  814. }
  815. *scanned = scan;
  816. return nr_taken;
  817. }
  818. static unsigned long isolate_pages_global(unsigned long nr,
  819. struct list_head *dst,
  820. unsigned long *scanned, int order,
  821. int mode, struct zone *z,
  822. struct mem_cgroup *mem_cont,
  823. int active, int file)
  824. {
  825. int lru = LRU_BASE;
  826. if (active)
  827. lru += LRU_ACTIVE;
  828. if (file)
  829. lru += LRU_FILE;
  830. return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order,
  831. mode, !!file);
  832. }
  833. /*
  834. * clear_active_flags() is a helper for shrink_active_list(), clearing
  835. * any active bits from the pages in the list.
  836. */
  837. static unsigned long clear_active_flags(struct list_head *page_list,
  838. unsigned int *count)
  839. {
  840. int nr_active = 0;
  841. int lru;
  842. struct page *page;
  843. list_for_each_entry(page, page_list, lru) {
  844. lru = page_is_file_cache(page);
  845. if (PageActive(page)) {
  846. lru += LRU_ACTIVE;
  847. ClearPageActive(page);
  848. nr_active++;
  849. }
  850. count[lru]++;
  851. }
  852. return nr_active;
  853. }
  854. /**
  855. * isolate_lru_page - tries to isolate a page from its LRU list
  856. * @page: page to isolate from its LRU list
  857. *
  858. * Isolates a @page from an LRU list, clears PageLRU and adjusts the
  859. * vmstat statistic corresponding to whatever LRU list the page was on.
  860. *
  861. * Returns 0 if the page was removed from an LRU list.
  862. * Returns -EBUSY if the page was not on an LRU list.
  863. *
  864. * The returned page will have PageLRU() cleared. If it was found on
  865. * the active list, it will have PageActive set. If it was found on
  866. * the unevictable list, it will have the PageUnevictable bit set. That flag
  867. * may need to be cleared by the caller before letting the page go.
  868. *
  869. * The vmstat statistic corresponding to the list on which the page was
  870. * found will be decremented.
  871. *
  872. * Restrictions:
  873. * (1) Must be called with an elevated refcount on the page. This is a
  874. * fundamentnal difference from isolate_lru_pages (which is called
  875. * without a stable reference).
  876. * (2) the lru_lock must not be held.
  877. * (3) interrupts must be enabled.
  878. */
  879. int isolate_lru_page(struct page *page)
  880. {
  881. int ret = -EBUSY;
  882. if (PageLRU(page)) {
  883. struct zone *zone = page_zone(page);
  884. spin_lock_irq(&zone->lru_lock);
  885. if (PageLRU(page) && get_page_unless_zero(page)) {
  886. int lru = page_lru(page);
  887. ret = 0;
  888. ClearPageLRU(page);
  889. del_page_from_lru_list(zone, page, lru);
  890. }
  891. spin_unlock_irq(&zone->lru_lock);
  892. }
  893. return ret;
  894. }
  895. /*
  896. * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
  897. * of reclaimed pages
  898. */
  899. static unsigned long shrink_inactive_list(unsigned long max_scan,
  900. struct zone *zone, struct scan_control *sc,
  901. int priority, int file)
  902. {
  903. LIST_HEAD(page_list);
  904. struct pagevec pvec;
  905. unsigned long nr_scanned = 0;
  906. unsigned long nr_reclaimed = 0;
  907. pagevec_init(&pvec, 1);
  908. lru_add_drain();
  909. spin_lock_irq(&zone->lru_lock);
  910. do {
  911. struct page *page;
  912. unsigned long nr_taken;
  913. unsigned long nr_scan;
  914. unsigned long nr_freed;
  915. unsigned long nr_active;
  916. unsigned int count[NR_LRU_LISTS] = { 0, };
  917. int mode = ISOLATE_INACTIVE;
  918. /*
  919. * If we need a large contiguous chunk of memory, or have
  920. * trouble getting a small set of contiguous pages, we
  921. * will reclaim both active and inactive pages.
  922. *
  923. * We use the same threshold as pageout congestion_wait below.
  924. */
  925. if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
  926. mode = ISOLATE_BOTH;
  927. else if (sc->order && priority < DEF_PRIORITY - 2)
  928. mode = ISOLATE_BOTH;
  929. nr_taken = sc->isolate_pages(sc->swap_cluster_max,
  930. &page_list, &nr_scan, sc->order, mode,
  931. zone, sc->mem_cgroup, 0, file);
  932. nr_active = clear_active_flags(&page_list, count);
  933. __count_vm_events(PGDEACTIVATE, nr_active);
  934. __mod_zone_page_state(zone, NR_ACTIVE_FILE,
  935. -count[LRU_ACTIVE_FILE]);
  936. __mod_zone_page_state(zone, NR_INACTIVE_FILE,
  937. -count[LRU_INACTIVE_FILE]);
  938. __mod_zone_page_state(zone, NR_ACTIVE_ANON,
  939. -count[LRU_ACTIVE_ANON]);
  940. __mod_zone_page_state(zone, NR_INACTIVE_ANON,
  941. -count[LRU_INACTIVE_ANON]);
  942. if (scan_global_lru(sc)) {
  943. zone->pages_scanned += nr_scan;
  944. zone->recent_scanned[0] += count[LRU_INACTIVE_ANON];
  945. zone->recent_scanned[0] += count[LRU_ACTIVE_ANON];
  946. zone->recent_scanned[1] += count[LRU_INACTIVE_FILE];
  947. zone->recent_scanned[1] += count[LRU_ACTIVE_FILE];
  948. }
  949. spin_unlock_irq(&zone->lru_lock);
  950. nr_scanned += nr_scan;
  951. nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
  952. /*
  953. * If we are direct reclaiming for contiguous pages and we do
  954. * not reclaim everything in the list, try again and wait
  955. * for IO to complete. This will stall high-order allocations
  956. * but that should be acceptable to the caller
  957. */
  958. if (nr_freed < nr_taken && !current_is_kswapd() &&
  959. sc->order > PAGE_ALLOC_COSTLY_ORDER) {
  960. congestion_wait(WRITE, HZ/10);
  961. /*
  962. * The attempt at page out may have made some
  963. * of the pages active, mark them inactive again.
  964. */
  965. nr_active = clear_active_flags(&page_list, count);
  966. count_vm_events(PGDEACTIVATE, nr_active);
  967. nr_freed += shrink_page_list(&page_list, sc,
  968. PAGEOUT_IO_SYNC);
  969. }
  970. nr_reclaimed += nr_freed;
  971. local_irq_disable();
  972. if (current_is_kswapd()) {
  973. __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
  974. __count_vm_events(KSWAPD_STEAL, nr_freed);
  975. } else if (scan_global_lru(sc))
  976. __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
  977. __count_zone_vm_events(PGSTEAL, zone, nr_freed);
  978. if (nr_taken == 0)
  979. goto done;
  980. spin_lock(&zone->lru_lock);
  981. /*
  982. * Put back any unfreeable pages.
  983. */
  984. while (!list_empty(&page_list)) {
  985. int lru;
  986. page = lru_to_page(&page_list);
  987. VM_BUG_ON(PageLRU(page));
  988. list_del(&page->lru);
  989. if (unlikely(!page_evictable(page, NULL))) {
  990. spin_unlock_irq(&zone->lru_lock);
  991. putback_lru_page(page);
  992. spin_lock_irq(&zone->lru_lock);
  993. continue;
  994. }
  995. SetPageLRU(page);
  996. lru = page_lru(page);
  997. add_page_to_lru_list(zone, page, lru);
  998. mem_cgroup_move_lists(page, lru);
  999. if (PageActive(page) && scan_global_lru(sc)) {
  1000. int file = !!page_is_file_cache(page);
  1001. zone->recent_rotated[file]++;
  1002. }
  1003. if (!pagevec_add(&pvec, page)) {
  1004. spin_unlock_irq(&zone->lru_lock);
  1005. __pagevec_release(&pvec);
  1006. spin_lock_irq(&zone->lru_lock);
  1007. }
  1008. }
  1009. } while (nr_scanned < max_scan);
  1010. spin_unlock(&zone->lru_lock);
  1011. done:
  1012. local_irq_enable();
  1013. pagevec_release(&pvec);
  1014. return nr_reclaimed;
  1015. }
  1016. /*
  1017. * We are about to scan this zone at a certain priority level. If that priority
  1018. * level is smaller (ie: more urgent) than the previous priority, then note
  1019. * that priority level within the zone. This is done so that when the next
  1020. * process comes in to scan this zone, it will immediately start out at this
  1021. * priority level rather than having to build up its own scanning priority.
  1022. * Here, this priority affects only the reclaim-mapped threshold.
  1023. */
  1024. static inline void note_zone_scanning_priority(struct zone *zone, int priority)
  1025. {
  1026. if (priority < zone->prev_priority)
  1027. zone->prev_priority = priority;
  1028. }
  1029. static inline int zone_is_near_oom(struct zone *zone)
  1030. {
  1031. return zone->pages_scanned >= (zone_lru_pages(zone) * 3);
  1032. }
  1033. /*
  1034. * This moves pages from the active list to the inactive list.
  1035. *
  1036. * We move them the other way if the page is referenced by one or more
  1037. * processes, from rmap.
  1038. *
  1039. * If the pages are mostly unmapped, the processing is fast and it is
  1040. * appropriate to hold zone->lru_lock across the whole operation. But if
  1041. * the pages are mapped, the processing is slow (page_referenced()) so we
  1042. * should drop zone->lru_lock around each page. It's impossible to balance
  1043. * this, so instead we remove the pages from the LRU while processing them.
  1044. * It is safe to rely on PG_active against the non-LRU pages in here because
  1045. * nobody will play with that bit on a non-LRU page.
  1046. *
  1047. * The downside is that we have to touch page->_count against each page.
  1048. * But we had to alter page->flags anyway.
  1049. */
  1050. static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
  1051. struct scan_control *sc, int priority, int file)
  1052. {
  1053. unsigned long pgmoved;
  1054. int pgdeactivate = 0;
  1055. unsigned long pgscanned;
  1056. LIST_HEAD(l_hold); /* The pages which were snipped off */
  1057. LIST_HEAD(l_inactive);
  1058. struct page *page;
  1059. struct pagevec pvec;
  1060. enum lru_list lru;
  1061. lru_add_drain();
  1062. spin_lock_irq(&zone->lru_lock);
  1063. pgmoved = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
  1064. ISOLATE_ACTIVE, zone,
  1065. sc->mem_cgroup, 1, file);
  1066. /*
  1067. * zone->pages_scanned is used for detect zone's oom
  1068. * mem_cgroup remembers nr_scan by itself.
  1069. */
  1070. if (scan_global_lru(sc)) {
  1071. zone->pages_scanned += pgscanned;
  1072. zone->recent_scanned[!!file] += pgmoved;
  1073. }
  1074. if (file)
  1075. __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved);
  1076. else
  1077. __mod_zone_page_state(zone, NR_ACTIVE_ANON, -pgmoved);
  1078. spin_unlock_irq(&zone->lru_lock);
  1079. pgmoved = 0;
  1080. while (!list_empty(&l_hold)) {
  1081. cond_resched();
  1082. page = lru_to_page(&l_hold);
  1083. list_del(&page->lru);
  1084. if (unlikely(!page_evictable(page, NULL))) {
  1085. putback_lru_page(page);
  1086. continue;
  1087. }
  1088. /* page_referenced clears PageReferenced */
  1089. if (page_mapping_inuse(page) &&
  1090. page_referenced(page, 0, sc->mem_cgroup))
  1091. pgmoved++;
  1092. list_add(&page->lru, &l_inactive);
  1093. }
  1094. spin_lock_irq(&zone->lru_lock);
  1095. /*
  1096. * Count referenced pages from currently used mappings as
  1097. * rotated, even though they are moved to the inactive list.
  1098. * This helps balance scan pressure between file and anonymous
  1099. * pages in get_scan_ratio.
  1100. */
  1101. zone->recent_rotated[!!file] += pgmoved;
  1102. /*
  1103. * Move the pages to the [file or anon] inactive list.
  1104. */
  1105. pagevec_init(&pvec, 1);
  1106. pgmoved = 0;
  1107. lru = LRU_BASE + file * LRU_FILE;
  1108. while (!list_empty(&l_inactive)) {
  1109. page = lru_to_page(&l_inactive);
  1110. prefetchw_prev_lru_page(page, &l_inactive, flags);
  1111. VM_BUG_ON(PageLRU(page));
  1112. SetPageLRU(page);
  1113. VM_BUG_ON(!PageActive(page));
  1114. ClearPageActive(page);
  1115. list_move(&page->lru, &zone->lru[lru].list);
  1116. mem_cgroup_move_lists(page, lru);
  1117. pgmoved++;
  1118. if (!pagevec_add(&pvec, page)) {
  1119. __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
  1120. spin_unlock_irq(&zone->lru_lock);
  1121. pgdeactivate += pgmoved;
  1122. pgmoved = 0;
  1123. if (buffer_heads_over_limit)
  1124. pagevec_strip(&pvec);
  1125. __pagevec_release(&pvec);
  1126. spin_lock_irq(&zone->lru_lock);
  1127. }
  1128. }
  1129. __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
  1130. pgdeactivate += pgmoved;
  1131. if (buffer_heads_over_limit) {
  1132. spin_unlock_irq(&zone->lru_lock);
  1133. pagevec_strip(&pvec);
  1134. spin_lock_irq(&zone->lru_lock);
  1135. }
  1136. __count_zone_vm_events(PGREFILL, zone, pgscanned);
  1137. __count_vm_events(PGDEACTIVATE, pgdeactivate);
  1138. spin_unlock_irq(&zone->lru_lock);
  1139. if (vm_swap_full())
  1140. pagevec_swap_free(&pvec);
  1141. pagevec_release(&pvec);
  1142. }
  1143. static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
  1144. struct zone *zone, struct scan_control *sc, int priority)
  1145. {
  1146. int file = is_file_lru(lru);
  1147. if (lru == LRU_ACTIVE_FILE) {
  1148. shrink_active_list(nr_to_scan, zone, sc, priority, file);
  1149. return 0;
  1150. }
  1151. if (lru == LRU_ACTIVE_ANON &&
  1152. (!scan_global_lru(sc) || inactive_anon_is_low(zone))) {
  1153. shrink_active_list(nr_to_scan, zone, sc, priority, file);
  1154. return 0;
  1155. }
  1156. return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
  1157. }
  1158. /*
  1159. * Determine how aggressively the anon and file LRU lists should be
  1160. * scanned. The relative value of each set of LRU lists is determined
  1161. * by looking at the fraction of the pages scanned we did rotate back
  1162. * onto the active list instead of evict.
  1163. *
  1164. * percent[0] specifies how much pressure to put on ram/swap backed
  1165. * memory, while percent[1] determines pressure on the file LRUs.
  1166. */
  1167. static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
  1168. unsigned long *percent)
  1169. {
  1170. unsigned long anon, file, free;
  1171. unsigned long anon_prio, file_prio;
  1172. unsigned long ap, fp;
  1173. /* If we have no swap space, do not bother scanning anon pages. */
  1174. if (nr_swap_pages <= 0) {
  1175. percent[0] = 0;
  1176. percent[1] = 100;
  1177. return;
  1178. }
  1179. anon = zone_page_state(zone, NR_ACTIVE_ANON) +
  1180. zone_page_state(zone, NR_INACTIVE_ANON);
  1181. file = zone_page_state(zone, NR_ACTIVE_FILE) +
  1182. zone_page_state(zone, NR_INACTIVE_FILE);
  1183. free = zone_page_state(zone, NR_FREE_PAGES);
  1184. /* If we have very few page cache pages, force-scan anon pages. */
  1185. if (unlikely(file + free <= zone->pages_high)) {
  1186. percent[0] = 100;
  1187. percent[1] = 0;
  1188. return;
  1189. }
  1190. /*
  1191. * OK, so we have swap space and a fair amount of page cache
  1192. * pages. We use the recently rotated / recently scanned
  1193. * ratios to determine how valuable each cache is.
  1194. *
  1195. * Because workloads change over time (and to avoid overflow)
  1196. * we keep these statistics as a floating average, which ends
  1197. * up weighing recent references more than old ones.
  1198. *
  1199. * anon in [0], file in [1]
  1200. */
  1201. if (unlikely(zone->recent_scanned[0] > anon / 4)) {
  1202. spin_lock_irq(&zone->lru_lock);
  1203. zone->recent_scanned[0] /= 2;
  1204. zone->recent_rotated[0] /= 2;
  1205. spin_unlock_irq(&zone->lru_lock);
  1206. }
  1207. if (unlikely(zone->recent_scanned[1] > file / 4)) {
  1208. spin_lock_irq(&zone->lru_lock);
  1209. zone->recent_scanned[1] /= 2;
  1210. zone->recent_rotated[1] /= 2;
  1211. spin_unlock_irq(&zone->lru_lock);
  1212. }
  1213. /*
  1214. * With swappiness at 100, anonymous and file have the same priority.
  1215. * This scanning priority is essentially the inverse of IO cost.
  1216. */
  1217. anon_prio = sc->swappiness;
  1218. file_prio = 200 - sc->swappiness;
  1219. /*
  1220. * The amount of pressure on anon vs file pages is inversely
  1221. * proportional to the fraction of recently scanned pages on
  1222. * each list that were recently referenced and in active use.
  1223. */
  1224. ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1);
  1225. ap /= zone->recent_rotated[0] + 1;
  1226. fp = (file_prio + 1) * (zone->recent_scanned[1] + 1);
  1227. fp /= zone->recent_rotated[1] + 1;
  1228. /* Normalize to percentages */
  1229. percent[0] = 100 * ap / (ap + fp + 1);
  1230. percent[1] = 100 - percent[0];
  1231. }
  1232. /*
  1233. * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
  1234. */
  1235. static unsigned long shrink_zone(int priority, struct zone *zone,
  1236. struct scan_control *sc)
  1237. {
  1238. unsigned long nr[NR_LRU_LISTS];
  1239. unsigned long nr_to_scan;
  1240. unsigned long nr_reclaimed = 0;
  1241. unsigned long percent[2]; /* anon @ 0; file @ 1 */
  1242. enum lru_list l;
  1243. get_scan_ratio(zone, sc, percent);
  1244. for_each_evictable_lru(l) {
  1245. if (scan_global_lru(sc)) {
  1246. int file = is_file_lru(l);
  1247. int scan;
  1248. scan = zone_page_state(zone, NR_LRU_BASE + l);
  1249. if (priority) {
  1250. scan >>= priority;
  1251. scan = (scan * percent[file]) / 100;
  1252. }
  1253. zone->lru[l].nr_scan += scan;
  1254. nr[l] = zone->lru[l].nr_scan;
  1255. if (nr[l] >= sc->swap_cluster_max)
  1256. zone->lru[l].nr_scan = 0;
  1257. else
  1258. nr[l] = 0;
  1259. } else {
  1260. /*
  1261. * This reclaim occurs not because zone memory shortage
  1262. * but because memory controller hits its limit.
  1263. * Don't modify zone reclaim related data.
  1264. */
  1265. nr[l] = mem_cgroup_calc_reclaim(sc->mem_cgroup, zone,
  1266. priority, l);
  1267. }
  1268. }
  1269. while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
  1270. nr[LRU_INACTIVE_FILE]) {
  1271. for_each_evictable_lru(l) {
  1272. if (nr[l]) {
  1273. nr_to_scan = min(nr[l],
  1274. (unsigned long)sc->swap_cluster_max);
  1275. nr[l] -= nr_to_scan;
  1276. nr_reclaimed += shrink_list(l, nr_to_scan,
  1277. zone, sc, priority);
  1278. }
  1279. }
  1280. }
  1281. /*
  1282. * Even if we did not try to evict anon pages at all, we want to
  1283. * rebalance the anon lru active/inactive ratio.
  1284. */
  1285. if (!scan_global_lru(sc) || inactive_anon_is_low(zone))
  1286. shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
  1287. else if (!scan_global_lru(sc))
  1288. shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
  1289. throttle_vm_writeout(sc->gfp_mask);
  1290. return nr_reclaimed;
  1291. }
  1292. /*
  1293. * This is the direct reclaim path, for page-allocating processes. We only
  1294. * try to reclaim pages from zones which will satisfy the caller's allocation
  1295. * request.
  1296. *
  1297. * We reclaim from a zone even if that zone is over pages_high. Because:
  1298. * a) The caller may be trying to free *extra* pages to satisfy a higher-order
  1299. * allocation or
  1300. * b) The zones may be over pages_high but they must go *over* pages_high to
  1301. * satisfy the `incremental min' zone defense algorithm.
  1302. *
  1303. * Returns the number of reclaimed pages.
  1304. *
  1305. * If a zone is deemed to be full of pinned pages then just give it a light
  1306. * scan then give up on it.
  1307. */
  1308. static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
  1309. struct scan_control *sc)
  1310. {
  1311. enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
  1312. unsigned long nr_reclaimed = 0;
  1313. struct zoneref *z;
  1314. struct zone *zone;
  1315. sc->all_unreclaimable = 1;
  1316. for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
  1317. if (!populated_zone(zone))
  1318. continue;
  1319. /*
  1320. * Take care memory controller reclaiming has small influence
  1321. * to global LRU.
  1322. */
  1323. if (scan_global_lru(sc)) {
  1324. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  1325. continue;
  1326. note_zone_scanning_priority(zone, priority);
  1327. if (zone_is_all_unreclaimable(zone) &&
  1328. priority != DEF_PRIORITY)
  1329. continue; /* Let kswapd poll it */
  1330. sc->all_unreclaimable = 0;
  1331. } else {
  1332. /*
  1333. * Ignore cpuset limitation here. We just want to reduce
  1334. * # of used pages by us regardless of memory shortage.
  1335. */
  1336. sc->all_unreclaimable = 0;
  1337. mem_cgroup_note_reclaim_priority(sc->mem_cgroup,
  1338. priority);
  1339. }
  1340. nr_reclaimed += shrink_zone(priority, zone, sc);
  1341. }
  1342. return nr_reclaimed;
  1343. }
  1344. /*
  1345. * This is the main entry point to direct page reclaim.
  1346. *
  1347. * If a full scan of the inactive list fails to free enough memory then we
  1348. * are "out of memory" and something needs to be killed.
  1349. *
  1350. * If the caller is !__GFP_FS then the probability of a failure is reasonably
  1351. * high - the zone may be full of dirty or under-writeback pages, which this
  1352. * caller can't do much about. We kick pdflush and take explicit naps in the
  1353. * hope that some of these pages can be written. But if the allocating task
  1354. * holds filesystem locks which prevent writeout this might not work, and the
  1355. * allocation attempt will fail.
  1356. *
  1357. * returns: 0, if no pages reclaimed
  1358. * else, the number of pages reclaimed
  1359. */
  1360. static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
  1361. struct scan_control *sc)
  1362. {
  1363. int priority;
  1364. unsigned long ret = 0;
  1365. unsigned long total_scanned = 0;
  1366. unsigned long nr_reclaimed = 0;
  1367. struct reclaim_state *reclaim_state = current->reclaim_state;
  1368. unsigned long lru_pages = 0;
  1369. struct zoneref *z;
  1370. struct zone *zone;
  1371. enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
  1372. delayacct_freepages_start();
  1373. if (scan_global_lru(sc))
  1374. count_vm_event(ALLOCSTALL);
  1375. /*
  1376. * mem_cgroup will not do shrink_slab.
  1377. */
  1378. if (scan_global_lru(sc)) {
  1379. for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
  1380. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  1381. continue;
  1382. lru_pages += zone_lru_pages(zone);
  1383. }
  1384. }
  1385. for (priority = DEF_PRIORITY; priority >= 0; priority--) {
  1386. sc->nr_scanned = 0;
  1387. if (!priority)
  1388. disable_swap_token();
  1389. nr_reclaimed += shrink_zones(priority, zonelist, sc);
  1390. /*
  1391. * Don't shrink slabs when reclaiming memory from
  1392. * over limit cgroups
  1393. */
  1394. if (scan_global_lru(sc)) {
  1395. shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
  1396. if (reclaim_state) {
  1397. nr_reclaimed += reclaim_state->reclaimed_slab;
  1398. reclaim_state->reclaimed_slab = 0;
  1399. }
  1400. }
  1401. total_scanned += sc->nr_scanned;
  1402. if (nr_reclaimed >= sc->swap_cluster_max) {
  1403. ret = nr_reclaimed;
  1404. goto out;
  1405. }
  1406. /*
  1407. * Try to write back as many pages as we just scanned. This
  1408. * tends to cause slow streaming writers to write data to the
  1409. * disk smoothly, at the dirtying rate, which is nice. But
  1410. * that's undesirable in laptop mode, where we *want* lumpy
  1411. * writeout. So in laptop mode, write out the whole world.
  1412. */
  1413. if (total_scanned > sc->swap_cluster_max +
  1414. sc->swap_cluster_max / 2) {
  1415. wakeup_pdflush(laptop_mode ? 0 : total_scanned);
  1416. sc->may_writepage = 1;
  1417. }
  1418. /* Take a nap, wait for some writeback to complete */
  1419. if (sc->nr_scanned && priority < DEF_PRIORITY - 2)
  1420. congestion_wait(WRITE, HZ/10);
  1421. }
  1422. /* top priority shrink_zones still had more to do? don't OOM, then */
  1423. if (!sc->all_unreclaimable && scan_global_lru(sc))
  1424. ret = nr_reclaimed;
  1425. out:
  1426. /*
  1427. * Now that we've scanned all the zones at this priority level, note
  1428. * that level within the zone so that the next thread which performs
  1429. * scanning of this zone will immediately start out at this priority
  1430. * level. This affects only the decision whether or not to bring
  1431. * mapped pages onto the inactive list.
  1432. */
  1433. if (priority < 0)
  1434. priority = 0;
  1435. if (scan_global_lru(sc)) {
  1436. for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
  1437. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  1438. continue;
  1439. zone->prev_priority = priority;
  1440. }
  1441. } else
  1442. mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority);
  1443. delayacct_freepages_end();
  1444. return ret;
  1445. }
  1446. unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
  1447. gfp_t gfp_mask)
  1448. {
  1449. struct scan_control sc = {
  1450. .gfp_mask = gfp_mask,
  1451. .may_writepage = !laptop_mode,
  1452. .swap_cluster_max = SWAP_CLUSTER_MAX,
  1453. .may_swap = 1,
  1454. .swappiness = vm_swappiness,
  1455. .order = order,
  1456. .mem_cgroup = NULL,
  1457. .isolate_pages = isolate_pages_global,
  1458. };
  1459. return do_try_to_free_pages(zonelist, &sc);
  1460. }
  1461. #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  1462. unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
  1463. gfp_t gfp_mask)
  1464. {
  1465. struct scan_control sc = {
  1466. .may_writepage = !laptop_mode,
  1467. .may_swap = 1,
  1468. .swap_cluster_max = SWAP_CLUSTER_MAX,
  1469. .swappiness = vm_swappiness,
  1470. .order = 0,
  1471. .mem_cgroup = mem_cont,
  1472. .isolate_pages = mem_cgroup_isolate_pages,
  1473. };
  1474. struct zonelist *zonelist;
  1475. sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
  1476. (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
  1477. zonelist = NODE_DATA(numa_node_id())->node_zonelists;
  1478. return do_try_to_free_pages(zonelist, &sc);
  1479. }
  1480. #endif
  1481. /*
  1482. * For kswapd, balance_pgdat() will work across all this node's zones until
  1483. * they are all at pages_high.
  1484. *
  1485. * Returns the number of pages which were actually freed.
  1486. *
  1487. * There is special handling here for zones which are full of pinned pages.
  1488. * This can happen if the pages are all mlocked, or if they are all used by
  1489. * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
  1490. * What we do is to detect the case where all pages in the zone have been
  1491. * scanned twice and there has been zero successful reclaim. Mark the zone as
  1492. * dead and from now on, only perform a short scan. Basically we're polling
  1493. * the zone for when the problem goes away.
  1494. *
  1495. * kswapd scans the zones in the highmem->normal->dma direction. It skips
  1496. * zones which have free_pages > pages_high, but once a zone is found to have
  1497. * free_pages <= pages_high, we scan that zone and the lower zones regardless
  1498. * of the number of free pages in the lower zones. This interoperates with
  1499. * the page allocator fallback scheme to ensure that aging of pages is balanced
  1500. * across the zones.
  1501. */
  1502. static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
  1503. {
  1504. int all_zones_ok;
  1505. int priority;
  1506. int i;
  1507. unsigned long total_scanned;
  1508. unsigned long nr_reclaimed;
  1509. struct reclaim_state *reclaim_state = current->reclaim_state;
  1510. struct scan_control sc = {
  1511. .gfp_mask = GFP_KERNEL,
  1512. .may_swap = 1,
  1513. .swap_cluster_max = SWAP_CLUSTER_MAX,
  1514. .swappiness = vm_swappiness,
  1515. .order = order,
  1516. .mem_cgroup = NULL,
  1517. .isolate_pages = isolate_pages_global,
  1518. };
  1519. /*
  1520. * temp_priority is used to remember the scanning priority at which
  1521. * this zone was successfully refilled to free_pages == pages_high.
  1522. */
  1523. int temp_priority[MAX_NR_ZONES];
  1524. loop_again:
  1525. total_scanned = 0;
  1526. nr_reclaimed = 0;
  1527. sc.may_writepage = !laptop_mode;
  1528. count_vm_event(PAGEOUTRUN);
  1529. for (i = 0; i < pgdat->nr_zones; i++)
  1530. temp_priority[i] = DEF_PRIORITY;
  1531. for (priority = DEF_PRIORITY; priority >= 0; priority--) {
  1532. int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
  1533. unsigned long lru_pages = 0;
  1534. /* The swap token gets in the way of swapout... */
  1535. if (!priority)
  1536. disable_swap_token();
  1537. all_zones_ok = 1;
  1538. /*
  1539. * Scan in the highmem->dma direction for the highest
  1540. * zone which needs scanning
  1541. */
  1542. for (i = pgdat->nr_zones - 1; i >= 0; i--) {
  1543. struct zone *zone = pgdat->node_zones + i;
  1544. if (!populated_zone(zone))
  1545. continue;
  1546. if (zone_is_all_unreclaimable(zone) &&
  1547. priority != DEF_PRIORITY)
  1548. continue;
  1549. /*
  1550. * Do some background aging of the anon list, to give
  1551. * pages a chance to be referenced before reclaiming.
  1552. */
  1553. if (inactive_anon_is_low(zone))
  1554. shrink_active_list(SWAP_CLUSTER_MAX, zone,
  1555. &sc, priority, 0);
  1556. if (!zone_watermark_ok(zone, order, zone->pages_high,
  1557. 0, 0)) {
  1558. end_zone = i;
  1559. break;
  1560. }
  1561. }
  1562. if (i < 0)
  1563. goto out;
  1564. for (i = 0; i <= end_zone; i++) {
  1565. struct zone *zone = pgdat->node_zones + i;
  1566. lru_pages += zone_lru_pages(zone);
  1567. }
  1568. /*
  1569. * Now scan the zone in the dma->highmem direction, stopping
  1570. * at the last zone which needs scanning.
  1571. *
  1572. * We do this because the page allocator works in the opposite
  1573. * direction. This prevents the page allocator from allocating
  1574. * pages behind kswapd's direction of progress, which would
  1575. * cause too much scanning of the lower zones.
  1576. */
  1577. for (i = 0; i <= end_zone; i++) {
  1578. struct zone *zone = pgdat->node_zones + i;
  1579. int nr_slab;
  1580. if (!populated_zone(zone))
  1581. continue;
  1582. if (zone_is_all_unreclaimable(zone) &&
  1583. priority != DEF_PRIORITY)
  1584. continue;
  1585. if (!zone_watermark_ok(zone, order, zone->pages_high,
  1586. end_zone, 0))
  1587. all_zones_ok = 0;
  1588. temp_priority[i] = priority;
  1589. sc.nr_scanned = 0;
  1590. note_zone_scanning_priority(zone, priority);
  1591. /*
  1592. * We put equal pressure on every zone, unless one
  1593. * zone has way too many pages free already.
  1594. */
  1595. if (!zone_watermark_ok(zone, order, 8*zone->pages_high,
  1596. end_zone, 0))
  1597. nr_reclaimed += shrink_zone(priority, zone, &sc);
  1598. reclaim_state->reclaimed_slab = 0;
  1599. nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
  1600. lru_pages);
  1601. nr_reclaimed += reclaim_state->reclaimed_slab;
  1602. total_scanned += sc.nr_scanned;
  1603. if (zone_is_all_unreclaimable(zone))
  1604. continue;
  1605. if (nr_slab == 0 && zone->pages_scanned >=
  1606. (zone_lru_pages(zone) * 6))
  1607. zone_set_flag(zone,
  1608. ZONE_ALL_UNRECLAIMABLE);
  1609. /*
  1610. * If we've done a decent amount of scanning and
  1611. * the reclaim ratio is low, start doing writepage
  1612. * even in laptop mode
  1613. */
  1614. if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
  1615. total_scanned > nr_reclaimed + nr_reclaimed / 2)
  1616. sc.may_writepage = 1;
  1617. }
  1618. if (all_zones_ok)
  1619. break; /* kswapd: all done */
  1620. /*
  1621. * OK, kswapd is getting into trouble. Take a nap, then take
  1622. * another pass across the zones.
  1623. */
  1624. if (total_scanned && priority < DEF_PRIORITY - 2)
  1625. congestion_wait(WRITE, HZ/10);
  1626. /*
  1627. * We do this so kswapd doesn't build up large priorities for
  1628. * example when it is freeing in parallel with allocators. It
  1629. * matches the direct reclaim path behaviour in terms of impact
  1630. * on zone->*_priority.
  1631. */
  1632. if (nr_reclaimed >= SWAP_CLUSTER_MAX)
  1633. break;
  1634. }
  1635. out:
  1636. /*
  1637. * Note within each zone the priority level at which this zone was
  1638. * brought into a happy state. So that the next thread which scans this
  1639. * zone will start out at that priority level.
  1640. */
  1641. for (i = 0; i < pgdat->nr_zones; i++) {
  1642. struct zone *zone = pgdat->node_zones + i;
  1643. zone->prev_priority = temp_priority[i];
  1644. }
  1645. if (!all_zones_ok) {
  1646. cond_resched();
  1647. try_to_freeze();
  1648. goto loop_again;
  1649. }
  1650. return nr_reclaimed;
  1651. }
  1652. /*
  1653. * The background pageout daemon, started as a kernel thread
  1654. * from the init process.
  1655. *
  1656. * This basically trickles out pages so that we have _some_
  1657. * free memory available even if there is no other activity
  1658. * that frees anything up. This is needed for things like routing
  1659. * etc, where we otherwise might have all activity going on in
  1660. * asynchronous contexts that cannot page things out.
  1661. *
  1662. * If there are applications that are active memory-allocators
  1663. * (most normal use), this basically shouldn't matter.
  1664. */
  1665. static int kswapd(void *p)
  1666. {
  1667. unsigned long order;
  1668. pg_data_t *pgdat = (pg_data_t*)p;
  1669. struct task_struct *tsk = current;
  1670. DEFINE_WAIT(wait);
  1671. struct reclaim_state reclaim_state = {
  1672. .reclaimed_slab = 0,
  1673. };
  1674. node_to_cpumask_ptr(cpumask, pgdat->node_id);
  1675. if (!cpumask_empty(cpumask))
  1676. set_cpus_allowed_ptr(tsk, cpumask);
  1677. current->reclaim_state = &reclaim_state;
  1678. /*
  1679. * Tell the memory management that we're a "memory allocator",
  1680. * and that if we need more memory we should get access to it
  1681. * regardless (see "__alloc_pages()"). "kswapd" should
  1682. * never get caught in the normal page freeing logic.
  1683. *
  1684. * (Kswapd normally doesn't need memory anyway, but sometimes
  1685. * you need a small amount of memory in order to be able to
  1686. * page out something else, and this flag essentially protects
  1687. * us from recursively trying to free more memory as we're
  1688. * trying to free the first piece of memory in the first place).
  1689. */
  1690. tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
  1691. set_freezable();
  1692. order = 0;
  1693. for ( ; ; ) {
  1694. unsigned long new_order;
  1695. prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
  1696. new_order = pgdat->kswapd_max_order;
  1697. pgdat->kswapd_max_order = 0;
  1698. if (order < new_order) {
  1699. /*
  1700. * Don't sleep if someone wants a larger 'order'
  1701. * allocation
  1702. */
  1703. order = new_order;
  1704. } else {
  1705. if (!freezing(current))
  1706. schedule();
  1707. order = pgdat->kswapd_max_order;
  1708. }
  1709. finish_wait(&pgdat->kswapd_wait, &wait);
  1710. if (!try_to_freeze()) {
  1711. /* We can speed up thawing tasks if we don't call
  1712. * balance_pgdat after returning from the refrigerator
  1713. */
  1714. balance_pgdat(pgdat, order);
  1715. }
  1716. }
  1717. return 0;
  1718. }
  1719. /*
  1720. * A zone is low on free memory, so wake its kswapd task to service it.
  1721. */
  1722. void wakeup_kswapd(struct zone *zone, int order)
  1723. {
  1724. pg_data_t *pgdat;
  1725. if (!populated_zone(zone))
  1726. return;
  1727. pgdat = zone->zone_pgdat;
  1728. if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0))
  1729. return;
  1730. if (pgdat->kswapd_max_order < order)
  1731. pgdat->kswapd_max_order = order;
  1732. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  1733. return;
  1734. if (!waitqueue_active(&pgdat->kswapd_wait))
  1735. return;
  1736. wake_up_interruptible(&pgdat->kswapd_wait);
  1737. }
  1738. unsigned long global_lru_pages(void)
  1739. {
  1740. return global_page_state(NR_ACTIVE_ANON)
  1741. + global_page_state(NR_ACTIVE_FILE)
  1742. + global_page_state(NR_INACTIVE_ANON)
  1743. + global_page_state(NR_INACTIVE_FILE);
  1744. }
  1745. #ifdef CONFIG_PM
  1746. /*
  1747. * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages
  1748. * from LRU lists system-wide, for given pass and priority, and returns the
  1749. * number of reclaimed pages
  1750. *
  1751. * For pass > 3 we also try to shrink the LRU lists that contain a few pages
  1752. */
  1753. static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
  1754. int pass, struct scan_control *sc)
  1755. {
  1756. struct zone *zone;
  1757. unsigned long nr_to_scan, ret = 0;
  1758. enum lru_list l;
  1759. for_each_zone(zone) {
  1760. if (!populated_zone(zone))
  1761. continue;
  1762. if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
  1763. continue;
  1764. for_each_evictable_lru(l) {
  1765. /* For pass = 0, we don't shrink the active list */
  1766. if (pass == 0 &&
  1767. (l == LRU_ACTIVE || l == LRU_ACTIVE_FILE))
  1768. continue;
  1769. zone->lru[l].nr_scan +=
  1770. (zone_page_state(zone, NR_LRU_BASE + l)
  1771. >> prio) + 1;
  1772. if (zone->lru[l].nr_scan >= nr_pages || pass > 3) {
  1773. zone->lru[l].nr_scan = 0;
  1774. nr_to_scan = min(nr_pages,
  1775. zone_page_state(zone,
  1776. NR_LRU_BASE + l));
  1777. ret += shrink_list(l, nr_to_scan, zone,
  1778. sc, prio);
  1779. if (ret >= nr_pages)
  1780. return ret;
  1781. }
  1782. }
  1783. }
  1784. return ret;
  1785. }
  1786. /*
  1787. * Try to free `nr_pages' of memory, system-wide, and return the number of
  1788. * freed pages.
  1789. *
  1790. * Rather than trying to age LRUs the aim is to preserve the overall
  1791. * LRU order by reclaiming preferentially
  1792. * inactive > active > active referenced > active mapped
  1793. */
  1794. unsigned long shrink_all_memory(unsigned long nr_pages)
  1795. {
  1796. unsigned long lru_pages, nr_slab;
  1797. unsigned long ret = 0;
  1798. int pass;
  1799. struct reclaim_state reclaim_state;
  1800. struct scan_control sc = {
  1801. .gfp_mask = GFP_KERNEL,
  1802. .may_swap = 0,
  1803. .swap_cluster_max = nr_pages,
  1804. .may_writepage = 1,
  1805. .swappiness = vm_swappiness,
  1806. .isolate_pages = isolate_pages_global,
  1807. };
  1808. current->reclaim_state = &reclaim_state;
  1809. lru_pages = global_lru_pages();
  1810. nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
  1811. /* If slab caches are huge, it's better to hit them first */
  1812. while (nr_slab >= lru_pages) {
  1813. reclaim_state.reclaimed_slab = 0;
  1814. shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
  1815. if (!reclaim_state.reclaimed_slab)
  1816. break;
  1817. ret += reclaim_state.reclaimed_slab;
  1818. if (ret >= nr_pages)
  1819. goto out;
  1820. nr_slab -= reclaim_state.reclaimed_slab;
  1821. }
  1822. /*
  1823. * We try to shrink LRUs in 5 passes:
  1824. * 0 = Reclaim from inactive_list only
  1825. * 1 = Reclaim from active list but don't reclaim mapped
  1826. * 2 = 2nd pass of type 1
  1827. * 3 = Reclaim mapped (normal reclaim)
  1828. * 4 = 2nd pass of type 3
  1829. */
  1830. for (pass = 0; pass < 5; pass++) {
  1831. int prio;
  1832. /* Force reclaiming mapped pages in the passes #3 and #4 */
  1833. if (pass > 2) {
  1834. sc.may_swap = 1;
  1835. sc.swappiness = 100;
  1836. }
  1837. for (prio = DEF_PRIORITY; prio >= 0; prio--) {
  1838. unsigned long nr_to_scan = nr_pages - ret;
  1839. sc.nr_scanned = 0;
  1840. ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
  1841. if (ret >= nr_pages)
  1842. goto out;
  1843. reclaim_state.reclaimed_slab = 0;
  1844. shrink_slab(sc.nr_scanned, sc.gfp_mask,
  1845. global_lru_pages());
  1846. ret += reclaim_state.reclaimed_slab;
  1847. if (ret >= nr_pages)
  1848. goto out;
  1849. if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
  1850. congestion_wait(WRITE, HZ / 10);
  1851. }
  1852. }
  1853. /*
  1854. * If ret = 0, we could not shrink LRUs, but there may be something
  1855. * in slab caches
  1856. */
  1857. if (!ret) {
  1858. do {
  1859. reclaim_state.reclaimed_slab = 0;
  1860. shrink_slab(nr_pages, sc.gfp_mask, global_lru_pages());
  1861. ret += reclaim_state.reclaimed_slab;
  1862. } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
  1863. }
  1864. out:
  1865. current->reclaim_state = NULL;
  1866. return ret;
  1867. }
  1868. #endif
  1869. /* It's optimal to keep kswapds on the same CPUs as their memory, but
  1870. not required for correctness. So if the last cpu in a node goes
  1871. away, we get changed to run anywhere: as the first one comes back,
  1872. restore their cpu bindings. */
  1873. static int __devinit cpu_callback(struct notifier_block *nfb,
  1874. unsigned long action, void *hcpu)
  1875. {
  1876. int nid;
  1877. if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
  1878. for_each_node_state(nid, N_HIGH_MEMORY) {
  1879. pg_data_t *pgdat = NODE_DATA(nid);
  1880. node_to_cpumask_ptr(mask, pgdat->node_id);
  1881. if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
  1882. /* One of our CPUs online: restore mask */
  1883. set_cpus_allowed_ptr(pgdat->kswapd, mask);
  1884. }
  1885. }
  1886. return NOTIFY_OK;
  1887. }
  1888. /*
  1889. * This kswapd start function will be called by init and node-hot-add.
  1890. * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
  1891. */
  1892. int kswapd_run(int nid)
  1893. {
  1894. pg_data_t *pgdat = NODE_DATA(nid);
  1895. int ret = 0;
  1896. if (pgdat->kswapd)
  1897. return 0;
  1898. pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
  1899. if (IS_ERR(pgdat->kswapd)) {
  1900. /* failure at boot is fatal */
  1901. BUG_ON(system_state == SYSTEM_BOOTING);
  1902. printk("Failed to start kswapd on node %d\n",nid);
  1903. ret = -1;
  1904. }
  1905. return ret;
  1906. }
  1907. static int __init kswapd_init(void)
  1908. {
  1909. int nid;
  1910. swap_setup();
  1911. for_each_node_state(nid, N_HIGH_MEMORY)
  1912. kswapd_run(nid);
  1913. hotcpu_notifier(cpu_callback, 0);
  1914. return 0;
  1915. }
  1916. module_init(kswapd_init)
  1917. #ifdef CONFIG_NUMA
  1918. /*
  1919. * Zone reclaim mode
  1920. *
  1921. * If non-zero call zone_reclaim when the number of free pages falls below
  1922. * the watermarks.
  1923. */
  1924. int zone_reclaim_mode __read_mostly;
  1925. #define RECLAIM_OFF 0
  1926. #define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
  1927. #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
  1928. #define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
  1929. /*
  1930. * Priority for ZONE_RECLAIM. This determines the fraction of pages
  1931. * of a node considered for each zone_reclaim. 4 scans 1/16th of
  1932. * a zone.
  1933. */
  1934. #define ZONE_RECLAIM_PRIORITY 4
  1935. /*
  1936. * Percentage of pages in a zone that must be unmapped for zone_reclaim to
  1937. * occur.
  1938. */
  1939. int sysctl_min_unmapped_ratio = 1;
  1940. /*
  1941. * If the number of slab pages in a zone grows beyond this percentage then
  1942. * slab reclaim needs to occur.
  1943. */
  1944. int sysctl_min_slab_ratio = 5;
  1945. /*
  1946. * Try to free up some pages from this zone through reclaim.
  1947. */
  1948. static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
  1949. {
  1950. /* Minimum pages needed in order to stay on node */
  1951. const unsigned long nr_pages = 1 << order;
  1952. struct task_struct *p = current;
  1953. struct reclaim_state reclaim_state;
  1954. int priority;
  1955. unsigned long nr_reclaimed = 0;
  1956. struct scan_control sc = {
  1957. .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
  1958. .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
  1959. .swap_cluster_max = max_t(unsigned long, nr_pages,
  1960. SWAP_CLUSTER_MAX),
  1961. .gfp_mask = gfp_mask,
  1962. .swappiness = vm_swappiness,
  1963. .isolate_pages = isolate_pages_global,
  1964. };
  1965. unsigned long slab_reclaimable;
  1966. disable_swap_token();
  1967. cond_resched();
  1968. /*
  1969. * We need to be able to allocate from the reserves for RECLAIM_SWAP
  1970. * and we also need to be able to write out pages for RECLAIM_WRITE
  1971. * and RECLAIM_SWAP.
  1972. */
  1973. p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
  1974. reclaim_state.reclaimed_slab = 0;
  1975. p->reclaim_state = &reclaim_state;
  1976. if (zone_page_state(zone, NR_FILE_PAGES) -
  1977. zone_page_state(zone, NR_FILE_MAPPED) >
  1978. zone->min_unmapped_pages) {
  1979. /*
  1980. * Free memory by calling shrink zone with increasing
  1981. * priorities until we have enough memory freed.
  1982. */
  1983. priority = ZONE_RECLAIM_PRIORITY;
  1984. do {
  1985. note_zone_scanning_priority(zone, priority);
  1986. nr_reclaimed += shrink_zone(priority, zone, &sc);
  1987. priority--;
  1988. } while (priority >= 0 && nr_reclaimed < nr_pages);
  1989. }
  1990. slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
  1991. if (slab_reclaimable > zone->min_slab_pages) {
  1992. /*
  1993. * shrink_slab() does not currently allow us to determine how
  1994. * many pages were freed in this zone. So we take the current
  1995. * number of slab pages and shake the slab until it is reduced
  1996. * by the same nr_pages that we used for reclaiming unmapped
  1997. * pages.
  1998. *
  1999. * Note that shrink_slab will free memory on all zones and may
  2000. * take a long time.
  2001. */
  2002. while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
  2003. zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
  2004. slab_reclaimable - nr_pages)
  2005. ;
  2006. /*
  2007. * Update nr_reclaimed by the number of slab pages we
  2008. * reclaimed from this zone.
  2009. */
  2010. nr_reclaimed += slab_reclaimable -
  2011. zone_page_state(zone, NR_SLAB_RECLAIMABLE);
  2012. }
  2013. p->reclaim_state = NULL;
  2014. current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
  2015. return nr_reclaimed >= nr_pages;
  2016. }
  2017. int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
  2018. {
  2019. int node_id;
  2020. int ret;
  2021. /*
  2022. * Zone reclaim reclaims unmapped file backed pages and
  2023. * slab pages if we are over the defined limits.
  2024. *
  2025. * A small portion of unmapped file backed pages is needed for
  2026. * file I/O otherwise pages read by file I/O will be immediately
  2027. * thrown out if the zone is overallocated. So we do not reclaim
  2028. * if less than a specified percentage of the zone is used by
  2029. * unmapped file backed pages.
  2030. */
  2031. if (zone_page_state(zone, NR_FILE_PAGES) -
  2032. zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages
  2033. && zone_page_state(zone, NR_SLAB_RECLAIMABLE)
  2034. <= zone->min_slab_pages)
  2035. return 0;
  2036. if (zone_is_all_unreclaimable(zone))
  2037. return 0;
  2038. /*
  2039. * Do not scan if the allocation should not be delayed.
  2040. */
  2041. if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
  2042. return 0;
  2043. /*
  2044. * Only run zone reclaim on the local zone or on zones that do not
  2045. * have associated processors. This will favor the local processor
  2046. * over remote processors and spread off node memory allocations
  2047. * as wide as possible.
  2048. */
  2049. node_id = zone_to_nid(zone);
  2050. if (node_state(node_id, N_CPU) && node_id != numa_node_id())
  2051. return 0;
  2052. if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
  2053. return 0;
  2054. ret = __zone_reclaim(zone, gfp_mask, order);
  2055. zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
  2056. return ret;
  2057. }
  2058. #endif
  2059. #ifdef CONFIG_UNEVICTABLE_LRU
  2060. /*
  2061. * page_evictable - test whether a page is evictable
  2062. * @page: the page to test
  2063. * @vma: the VMA in which the page is or will be mapped, may be NULL
  2064. *
  2065. * Test whether page is evictable--i.e., should be placed on active/inactive
  2066. * lists vs unevictable list. The vma argument is !NULL when called from the
  2067. * fault path to determine how to instantate a new page.
  2068. *
  2069. * Reasons page might not be evictable:
  2070. * (1) page's mapping marked unevictable
  2071. * (2) page is part of an mlocked VMA
  2072. *
  2073. */
  2074. int page_evictable(struct page *page, struct vm_area_struct *vma)
  2075. {
  2076. if (mapping_unevictable(page_mapping(page)))
  2077. return 0;
  2078. if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page)))
  2079. return 0;
  2080. return 1;
  2081. }
  2082. /**
  2083. * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
  2084. * @page: page to check evictability and move to appropriate lru list
  2085. * @zone: zone page is in
  2086. *
  2087. * Checks a page for evictability and moves the page to the appropriate
  2088. * zone lru list.
  2089. *
  2090. * Restrictions: zone->lru_lock must be held, page must be on LRU and must
  2091. * have PageUnevictable set.
  2092. */
  2093. static void check_move_unevictable_page(struct page *page, struct zone *zone)
  2094. {
  2095. VM_BUG_ON(PageActive(page));
  2096. retry:
  2097. ClearPageUnevictable(page);
  2098. if (page_evictable(page, NULL)) {
  2099. enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page);
  2100. __dec_zone_state(zone, NR_UNEVICTABLE);
  2101. list_move(&page->lru, &zone->lru[l].list);
  2102. __inc_zone_state(zone, NR_INACTIVE_ANON + l);
  2103. __count_vm_event(UNEVICTABLE_PGRESCUED);
  2104. } else {
  2105. /*
  2106. * rotate unevictable list
  2107. */
  2108. SetPageUnevictable(page);
  2109. list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
  2110. if (page_evictable(page, NULL))
  2111. goto retry;
  2112. }
  2113. }
  2114. /**
  2115. * scan_mapping_unevictable_pages - scan an address space for evictable pages
  2116. * @mapping: struct address_space to scan for evictable pages
  2117. *
  2118. * Scan all pages in mapping. Check unevictable pages for
  2119. * evictability and move them to the appropriate zone lru list.
  2120. */
  2121. void scan_mapping_unevictable_pages(struct address_space *mapping)
  2122. {
  2123. pgoff_t next = 0;
  2124. pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
  2125. PAGE_CACHE_SHIFT;
  2126. struct zone *zone;
  2127. struct pagevec pvec;
  2128. if (mapping->nrpages == 0)
  2129. return;
  2130. pagevec_init(&pvec, 0);
  2131. while (next < end &&
  2132. pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
  2133. int i;
  2134. int pg_scanned = 0;
  2135. zone = NULL;
  2136. for (i = 0; i < pagevec_count(&pvec); i++) {
  2137. struct page *page = pvec.pages[i];
  2138. pgoff_t page_index = page->index;
  2139. struct zone *pagezone = page_zone(page);
  2140. pg_scanned++;
  2141. if (page_index > next)
  2142. next = page_index;
  2143. next++;
  2144. if (pagezone != zone) {
  2145. if (zone)
  2146. spin_unlock_irq(&zone->lru_lock);
  2147. zone = pagezone;
  2148. spin_lock_irq(&zone->lru_lock);
  2149. }
  2150. if (PageLRU(page) && PageUnevictable(page))
  2151. check_move_unevictable_page(page, zone);
  2152. }
  2153. if (zone)
  2154. spin_unlock_irq(&zone->lru_lock);
  2155. pagevec_release(&pvec);
  2156. count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
  2157. }
  2158. }
  2159. /**
  2160. * scan_zone_unevictable_pages - check unevictable list for evictable pages
  2161. * @zone - zone of which to scan the unevictable list
  2162. *
  2163. * Scan @zone's unevictable LRU lists to check for pages that have become
  2164. * evictable. Move those that have to @zone's inactive list where they
  2165. * become candidates for reclaim, unless shrink_inactive_zone() decides
  2166. * to reactivate them. Pages that are still unevictable are rotated
  2167. * back onto @zone's unevictable list.
  2168. */
  2169. #define SCAN_UNEVICTABLE_BATCH_SIZE 16UL /* arbitrary lock hold batch size */
  2170. void scan_zone_unevictable_pages(struct zone *zone)
  2171. {
  2172. struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list;
  2173. unsigned long scan;
  2174. unsigned long nr_to_scan = zone_page_state(zone, NR_UNEVICTABLE);
  2175. while (nr_to_scan > 0) {
  2176. unsigned long batch_size = min(nr_to_scan,
  2177. SCAN_UNEVICTABLE_BATCH_SIZE);
  2178. spin_lock_irq(&zone->lru_lock);
  2179. for (scan = 0; scan < batch_size; scan++) {
  2180. struct page *page = lru_to_page(l_unevictable);
  2181. if (!trylock_page(page))
  2182. continue;
  2183. prefetchw_prev_lru_page(page, l_unevictable, flags);
  2184. if (likely(PageLRU(page) && PageUnevictable(page)))
  2185. check_move_unevictable_page(page, zone);
  2186. unlock_page(page);
  2187. }
  2188. spin_unlock_irq(&zone->lru_lock);
  2189. nr_to_scan -= batch_size;
  2190. }
  2191. }
  2192. /**
  2193. * scan_all_zones_unevictable_pages - scan all unevictable lists for evictable pages
  2194. *
  2195. * A really big hammer: scan all zones' unevictable LRU lists to check for
  2196. * pages that have become evictable. Move those back to the zones'
  2197. * inactive list where they become candidates for reclaim.
  2198. * This occurs when, e.g., we have unswappable pages on the unevictable lists,
  2199. * and we add swap to the system. As such, it runs in the context of a task
  2200. * that has possibly/probably made some previously unevictable pages
  2201. * evictable.
  2202. */
  2203. void scan_all_zones_unevictable_pages(void)
  2204. {
  2205. struct zone *zone;
  2206. for_each_zone(zone) {
  2207. scan_zone_unevictable_pages(zone);
  2208. }
  2209. }
  2210. /*
  2211. * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of
  2212. * all nodes' unevictable lists for evictable pages
  2213. */
  2214. unsigned long scan_unevictable_pages;
  2215. int scan_unevictable_handler(struct ctl_table *table, int write,
  2216. struct file *file, void __user *buffer,
  2217. size_t *length, loff_t *ppos)
  2218. {
  2219. proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
  2220. if (write && *(unsigned long *)table->data)
  2221. scan_all_zones_unevictable_pages();
  2222. scan_unevictable_pages = 0;
  2223. return 0;
  2224. }
  2225. /*
  2226. * per node 'scan_unevictable_pages' attribute. On demand re-scan of
  2227. * a specified node's per zone unevictable lists for evictable pages.
  2228. */
  2229. static ssize_t read_scan_unevictable_node(struct sys_device *dev,
  2230. struct sysdev_attribute *attr,
  2231. char *buf)
  2232. {
  2233. return sprintf(buf, "0\n"); /* always zero; should fit... */
  2234. }
  2235. static ssize_t write_scan_unevictable_node(struct sys_device *dev,
  2236. struct sysdev_attribute *attr,
  2237. const char *buf, size_t count)
  2238. {
  2239. struct zone *node_zones = NODE_DATA(dev->id)->node_zones;
  2240. struct zone *zone;
  2241. unsigned long res;
  2242. unsigned long req = strict_strtoul(buf, 10, &res);
  2243. if (!req)
  2244. return 1; /* zero is no-op */
  2245. for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
  2246. if (!populated_zone(zone))
  2247. continue;
  2248. scan_zone_unevictable_pages(zone);
  2249. }
  2250. return 1;
  2251. }
  2252. static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
  2253. read_scan_unevictable_node,
  2254. write_scan_unevictable_node);
  2255. int scan_unevictable_register_node(struct node *node)
  2256. {
  2257. return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages);
  2258. }
  2259. void scan_unevictable_unregister_node(struct node *node)
  2260. {
  2261. sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
  2262. }
  2263. #endif