compaction.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097
  1. /*
  2. * linux/mm/compaction.c
  3. *
  4. * Memory compaction for the reduction of external fragmentation. Note that
  5. * this heavily depends upon page migration to do all the real heavy
  6. * lifting
  7. *
  8. * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
  9. */
  10. #include <linux/cpu.h>
  11. #include <linux/swap.h>
  12. #include <linux/migrate.h>
  13. #include <linux/compaction.h>
  14. #include <linux/mm_inline.h>
  15. #include <linux/backing-dev.h>
  16. #include <linux/sysctl.h>
  17. #include <linux/sysfs.h>
  18. #include <linux/page-isolation.h>
  19. #include <linux/kasan.h>
  20. #include <linux/kthread.h>
  21. #include <linux/freezer.h>
  22. #include <linux/page_owner.h>
  23. #include "internal.h"
  24. #ifdef CONFIG_COMPACTION
  25. static inline void count_compact_event(enum vm_event_item item)
  26. {
  27. count_vm_event(item);
  28. }
  29. static inline void count_compact_events(enum vm_event_item item, long delta)
  30. {
  31. count_vm_events(item, delta);
  32. }
  33. #else
  34. #define count_compact_event(item) do { } while (0)
  35. #define count_compact_events(item, delta) do { } while (0)
  36. #endif
  37. #if defined CONFIG_COMPACTION || defined CONFIG_CMA
  38. #define CREATE_TRACE_POINTS
  39. #include <trace/events/compaction.h>
  40. #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order))
  41. #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order))
  42. #define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order)
  43. #define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order)
  44. static unsigned long release_freepages(struct list_head *freelist)
  45. {
  46. struct page *page, *next;
  47. unsigned long high_pfn = 0;
  48. list_for_each_entry_safe(page, next, freelist, lru) {
  49. unsigned long pfn = page_to_pfn(page);
  50. list_del(&page->lru);
  51. __free_page(page);
  52. if (pfn > high_pfn)
  53. high_pfn = pfn;
  54. }
  55. return high_pfn;
  56. }
  57. static void map_pages(struct list_head *list)
  58. {
  59. unsigned int i, order, nr_pages;
  60. struct page *page, *next;
  61. LIST_HEAD(tmp_list);
  62. list_for_each_entry_safe(page, next, list, lru) {
  63. list_del(&page->lru);
  64. order = page_private(page);
  65. nr_pages = 1 << order;
  66. post_alloc_hook(page, order, __GFP_MOVABLE);
  67. if (order)
  68. split_page(page, order);
  69. for (i = 0; i < nr_pages; i++) {
  70. list_add(&page->lru, &tmp_list);
  71. page++;
  72. }
  73. }
  74. list_splice(&tmp_list, list);
  75. }
  76. static inline bool migrate_async_suitable(int migratetype)
  77. {
  78. return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
  79. }
  80. #ifdef CONFIG_COMPACTION
  81. int PageMovable(struct page *page)
  82. {
  83. struct address_space *mapping;
  84. VM_BUG_ON_PAGE(!PageLocked(page), page);
  85. if (!__PageMovable(page))
  86. return 0;
  87. mapping = page_mapping(page);
  88. if (mapping && mapping->a_ops && mapping->a_ops->isolate_page)
  89. return 1;
  90. return 0;
  91. }
  92. EXPORT_SYMBOL(PageMovable);
  93. void __SetPageMovable(struct page *page, struct address_space *mapping)
  94. {
  95. VM_BUG_ON_PAGE(!PageLocked(page), page);
  96. VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page);
  97. page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE);
  98. }
  99. EXPORT_SYMBOL(__SetPageMovable);
  100. void __ClearPageMovable(struct page *page)
  101. {
  102. VM_BUG_ON_PAGE(!PageLocked(page), page);
  103. VM_BUG_ON_PAGE(!PageMovable(page), page);
  104. /*
  105. * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE
  106. * flag so that VM can catch up released page by driver after isolation.
  107. * With it, VM migration doesn't try to put it back.
  108. */
  109. page->mapping = (void *)((unsigned long)page->mapping &
  110. PAGE_MAPPING_MOVABLE);
  111. }
  112. EXPORT_SYMBOL(__ClearPageMovable);
  113. /* Do not skip compaction more than 64 times */
  114. #define COMPACT_MAX_DEFER_SHIFT 6
  115. /*
  116. * Compaction is deferred when compaction fails to result in a page
  117. * allocation success. 1 << compact_defer_limit compactions are skipped up
  118. * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
  119. */
  120. void defer_compaction(struct zone *zone, int order)
  121. {
  122. zone->compact_considered = 0;
  123. zone->compact_defer_shift++;
  124. if (order < zone->compact_order_failed)
  125. zone->compact_order_failed = order;
  126. if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
  127. zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
  128. trace_mm_compaction_defer_compaction(zone, order);
  129. }
  130. /* Returns true if compaction should be skipped this time */
  131. bool compaction_deferred(struct zone *zone, int order)
  132. {
  133. unsigned long defer_limit = 1UL << zone->compact_defer_shift;
  134. if (order < zone->compact_order_failed)
  135. return false;
  136. /* Avoid possible overflow */
  137. if (++zone->compact_considered > defer_limit)
  138. zone->compact_considered = defer_limit;
  139. if (zone->compact_considered >= defer_limit)
  140. return false;
  141. trace_mm_compaction_deferred(zone, order);
  142. return true;
  143. }
  144. /*
  145. * Update defer tracking counters after successful compaction of given order,
  146. * which means an allocation either succeeded (alloc_success == true) or is
  147. * expected to succeed.
  148. */
  149. void compaction_defer_reset(struct zone *zone, int order,
  150. bool alloc_success)
  151. {
  152. if (alloc_success) {
  153. zone->compact_considered = 0;
  154. zone->compact_defer_shift = 0;
  155. }
  156. if (order >= zone->compact_order_failed)
  157. zone->compact_order_failed = order + 1;
  158. trace_mm_compaction_defer_reset(zone, order);
  159. }
  160. /* Returns true if restarting compaction after many failures */
  161. bool compaction_restarting(struct zone *zone, int order)
  162. {
  163. if (order < zone->compact_order_failed)
  164. return false;
  165. return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
  166. zone->compact_considered >= 1UL << zone->compact_defer_shift;
  167. }
  168. /* Returns true if the pageblock should be scanned for pages to isolate. */
  169. static inline bool isolation_suitable(struct compact_control *cc,
  170. struct page *page)
  171. {
  172. if (cc->ignore_skip_hint)
  173. return true;
  174. return !get_pageblock_skip(page);
  175. }
  176. static void reset_cached_positions(struct zone *zone)
  177. {
  178. zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
  179. zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
  180. zone->compact_cached_free_pfn =
  181. pageblock_start_pfn(zone_end_pfn(zone) - 1);
  182. }
  183. /*
  184. * This function is called to clear all cached information on pageblocks that
  185. * should be skipped for page isolation when the migrate and free page scanner
  186. * meet.
  187. */
  188. static void __reset_isolation_suitable(struct zone *zone)
  189. {
  190. unsigned long start_pfn = zone->zone_start_pfn;
  191. unsigned long end_pfn = zone_end_pfn(zone);
  192. unsigned long pfn;
  193. zone->compact_blockskip_flush = false;
  194. /* Walk the zone and mark every pageblock as suitable for isolation */
  195. for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
  196. struct page *page;
  197. cond_resched();
  198. if (!pfn_valid(pfn))
  199. continue;
  200. page = pfn_to_page(pfn);
  201. if (zone != page_zone(page))
  202. continue;
  203. clear_pageblock_skip(page);
  204. }
  205. reset_cached_positions(zone);
  206. }
  207. void reset_isolation_suitable(pg_data_t *pgdat)
  208. {
  209. int zoneid;
  210. for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
  211. struct zone *zone = &pgdat->node_zones[zoneid];
  212. if (!populated_zone(zone))
  213. continue;
  214. /* Only flush if a full compaction finished recently */
  215. if (zone->compact_blockskip_flush)
  216. __reset_isolation_suitable(zone);
  217. }
  218. }
  219. /*
  220. * If no pages were isolated then mark this pageblock to be skipped in the
  221. * future. The information is later cleared by __reset_isolation_suitable().
  222. */
  223. static void update_pageblock_skip(struct compact_control *cc,
  224. struct page *page, unsigned long nr_isolated,
  225. bool migrate_scanner)
  226. {
  227. struct zone *zone = cc->zone;
  228. unsigned long pfn;
  229. if (cc->ignore_skip_hint)
  230. return;
  231. if (!page)
  232. return;
  233. if (nr_isolated)
  234. return;
  235. set_pageblock_skip(page);
  236. pfn = page_to_pfn(page);
  237. /* Update where async and sync compaction should restart */
  238. if (migrate_scanner) {
  239. if (pfn > zone->compact_cached_migrate_pfn[0])
  240. zone->compact_cached_migrate_pfn[0] = pfn;
  241. if (cc->mode != MIGRATE_ASYNC &&
  242. pfn > zone->compact_cached_migrate_pfn[1])
  243. zone->compact_cached_migrate_pfn[1] = pfn;
  244. } else {
  245. if (pfn < zone->compact_cached_free_pfn)
  246. zone->compact_cached_free_pfn = pfn;
  247. }
  248. }
  249. #else
  250. static inline bool isolation_suitable(struct compact_control *cc,
  251. struct page *page)
  252. {
  253. return true;
  254. }
  255. static void update_pageblock_skip(struct compact_control *cc,
  256. struct page *page, unsigned long nr_isolated,
  257. bool migrate_scanner)
  258. {
  259. }
  260. #endif /* CONFIG_COMPACTION */
  261. /*
  262. * Compaction requires the taking of some coarse locks that are potentially
  263. * very heavily contended. For async compaction, back out if the lock cannot
  264. * be taken immediately. For sync compaction, spin on the lock if needed.
  265. *
  266. * Returns true if the lock is held
  267. * Returns false if the lock is not held and compaction should abort
  268. */
  269. static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags,
  270. struct compact_control *cc)
  271. {
  272. if (cc->mode == MIGRATE_ASYNC) {
  273. if (!spin_trylock_irqsave(lock, *flags)) {
  274. cc->contended = true;
  275. return false;
  276. }
  277. } else {
  278. spin_lock_irqsave(lock, *flags);
  279. }
  280. return true;
  281. }
  282. /*
  283. * Compaction requires the taking of some coarse locks that are potentially
  284. * very heavily contended. The lock should be periodically unlocked to avoid
  285. * having disabled IRQs for a long time, even when there is nobody waiting on
  286. * the lock. It might also be that allowing the IRQs will result in
  287. * need_resched() becoming true. If scheduling is needed, async compaction
  288. * aborts. Sync compaction schedules.
  289. * Either compaction type will also abort if a fatal signal is pending.
  290. * In either case if the lock was locked, it is dropped and not regained.
  291. *
  292. * Returns true if compaction should abort due to fatal signal pending, or
  293. * async compaction due to need_resched()
  294. * Returns false when compaction can continue (sync compaction might have
  295. * scheduled)
  296. */
  297. static bool compact_unlock_should_abort(spinlock_t *lock,
  298. unsigned long flags, bool *locked, struct compact_control *cc)
  299. {
  300. if (*locked) {
  301. spin_unlock_irqrestore(lock, flags);
  302. *locked = false;
  303. }
  304. if (fatal_signal_pending(current)) {
  305. cc->contended = true;
  306. return true;
  307. }
  308. if (need_resched()) {
  309. if (cc->mode == MIGRATE_ASYNC) {
  310. cc->contended = true;
  311. return true;
  312. }
  313. cond_resched();
  314. }
  315. return false;
  316. }
  317. /*
  318. * Aside from avoiding lock contention, compaction also periodically checks
  319. * need_resched() and either schedules in sync compaction or aborts async
  320. * compaction. This is similar to what compact_unlock_should_abort() does, but
  321. * is used where no lock is concerned.
  322. *
  323. * Returns false when no scheduling was needed, or sync compaction scheduled.
  324. * Returns true when async compaction should abort.
  325. */
  326. static inline bool compact_should_abort(struct compact_control *cc)
  327. {
  328. /* async compaction aborts if contended */
  329. if (need_resched()) {
  330. if (cc->mode == MIGRATE_ASYNC) {
  331. cc->contended = true;
  332. return true;
  333. }
  334. cond_resched();
  335. }
  336. return false;
  337. }
  338. /*
  339. * Isolate free pages onto a private freelist. If @strict is true, will abort
  340. * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
  341. * (even though it may still end up isolating some pages).
  342. */
  343. static unsigned long isolate_freepages_block(struct compact_control *cc,
  344. unsigned long *start_pfn,
  345. unsigned long end_pfn,
  346. struct list_head *freelist,
  347. bool strict)
  348. {
  349. int nr_scanned = 0, total_isolated = 0;
  350. struct page *cursor, *valid_page = NULL;
  351. unsigned long flags = 0;
  352. bool locked = false;
  353. unsigned long blockpfn = *start_pfn;
  354. unsigned int order;
  355. cursor = pfn_to_page(blockpfn);
  356. /* Isolate free pages. */
  357. for (; blockpfn < end_pfn; blockpfn++, cursor++) {
  358. int isolated;
  359. struct page *page = cursor;
  360. /*
  361. * Periodically drop the lock (if held) regardless of its
  362. * contention, to give chance to IRQs. Abort if fatal signal
  363. * pending or async compaction detects need_resched()
  364. */
  365. if (!(blockpfn % SWAP_CLUSTER_MAX)
  366. && compact_unlock_should_abort(&cc->zone->lock, flags,
  367. &locked, cc))
  368. break;
  369. nr_scanned++;
  370. if (!pfn_valid_within(blockpfn))
  371. goto isolate_fail;
  372. if (!valid_page)
  373. valid_page = page;
  374. /*
  375. * For compound pages such as THP and hugetlbfs, we can save
  376. * potentially a lot of iterations if we skip them at once.
  377. * The check is racy, but we can consider only valid values
  378. * and the only danger is skipping too much.
  379. */
  380. if (PageCompound(page)) {
  381. unsigned int comp_order = compound_order(page);
  382. if (likely(comp_order < MAX_ORDER)) {
  383. blockpfn += (1UL << comp_order) - 1;
  384. cursor += (1UL << comp_order) - 1;
  385. }
  386. goto isolate_fail;
  387. }
  388. if (!PageBuddy(page))
  389. goto isolate_fail;
  390. /*
  391. * If we already hold the lock, we can skip some rechecking.
  392. * Note that if we hold the lock now, checked_pageblock was
  393. * already set in some previous iteration (or strict is true),
  394. * so it is correct to skip the suitable migration target
  395. * recheck as well.
  396. */
  397. if (!locked) {
  398. /*
  399. * The zone lock must be held to isolate freepages.
  400. * Unfortunately this is a very coarse lock and can be
  401. * heavily contended if there are parallel allocations
  402. * or parallel compactions. For async compaction do not
  403. * spin on the lock and we acquire the lock as late as
  404. * possible.
  405. */
  406. locked = compact_trylock_irqsave(&cc->zone->lock,
  407. &flags, cc);
  408. if (!locked)
  409. break;
  410. /* Recheck this is a buddy page under lock */
  411. if (!PageBuddy(page))
  412. goto isolate_fail;
  413. }
  414. /* Found a free page, will break it into order-0 pages */
  415. order = page_order(page);
  416. isolated = __isolate_free_page(page, order);
  417. if (!isolated)
  418. break;
  419. set_page_private(page, order);
  420. total_isolated += isolated;
  421. cc->nr_freepages += isolated;
  422. list_add_tail(&page->lru, freelist);
  423. if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
  424. blockpfn += isolated;
  425. break;
  426. }
  427. /* Advance to the end of split page */
  428. blockpfn += isolated - 1;
  429. cursor += isolated - 1;
  430. continue;
  431. isolate_fail:
  432. if (strict)
  433. break;
  434. else
  435. continue;
  436. }
  437. if (locked)
  438. spin_unlock_irqrestore(&cc->zone->lock, flags);
  439. /*
  440. * There is a tiny chance that we have read bogus compound_order(),
  441. * so be careful to not go outside of the pageblock.
  442. */
  443. if (unlikely(blockpfn > end_pfn))
  444. blockpfn = end_pfn;
  445. trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
  446. nr_scanned, total_isolated);
  447. /* Record how far we have got within the block */
  448. *start_pfn = blockpfn;
  449. /*
  450. * If strict isolation is requested by CMA then check that all the
  451. * pages requested were isolated. If there were any failures, 0 is
  452. * returned and CMA will fail.
  453. */
  454. if (strict && blockpfn < end_pfn)
  455. total_isolated = 0;
  456. /* Update the pageblock-skip if the whole pageblock was scanned */
  457. if (blockpfn == end_pfn)
  458. update_pageblock_skip(cc, valid_page, total_isolated, false);
  459. cc->total_free_scanned += nr_scanned;
  460. if (total_isolated)
  461. count_compact_events(COMPACTISOLATED, total_isolated);
  462. return total_isolated;
  463. }
  464. /**
  465. * isolate_freepages_range() - isolate free pages.
  466. * @start_pfn: The first PFN to start isolating.
  467. * @end_pfn: The one-past-last PFN.
  468. *
  469. * Non-free pages, invalid PFNs, or zone boundaries within the
  470. * [start_pfn, end_pfn) range are considered errors, cause function to
  471. * undo its actions and return zero.
  472. *
  473. * Otherwise, function returns one-past-the-last PFN of isolated page
  474. * (which may be greater then end_pfn if end fell in a middle of
  475. * a free page).
  476. */
  477. unsigned long
  478. isolate_freepages_range(struct compact_control *cc,
  479. unsigned long start_pfn, unsigned long end_pfn)
  480. {
  481. unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
  482. LIST_HEAD(freelist);
  483. pfn = start_pfn;
  484. block_start_pfn = pageblock_start_pfn(pfn);
  485. if (block_start_pfn < cc->zone->zone_start_pfn)
  486. block_start_pfn = cc->zone->zone_start_pfn;
  487. block_end_pfn = pageblock_end_pfn(pfn);
  488. for (; pfn < end_pfn; pfn += isolated,
  489. block_start_pfn = block_end_pfn,
  490. block_end_pfn += pageblock_nr_pages) {
  491. /* Protect pfn from changing by isolate_freepages_block */
  492. unsigned long isolate_start_pfn = pfn;
  493. block_end_pfn = min(block_end_pfn, end_pfn);
  494. /*
  495. * pfn could pass the block_end_pfn if isolated freepage
  496. * is more than pageblock order. In this case, we adjust
  497. * scanning range to right one.
  498. */
  499. if (pfn >= block_end_pfn) {
  500. block_start_pfn = pageblock_start_pfn(pfn);
  501. block_end_pfn = pageblock_end_pfn(pfn);
  502. block_end_pfn = min(block_end_pfn, end_pfn);
  503. }
  504. if (!pageblock_pfn_to_page(block_start_pfn,
  505. block_end_pfn, cc->zone))
  506. break;
  507. isolated = isolate_freepages_block(cc, &isolate_start_pfn,
  508. block_end_pfn, &freelist, true);
  509. /*
  510. * In strict mode, isolate_freepages_block() returns 0 if
  511. * there are any holes in the block (ie. invalid PFNs or
  512. * non-free pages).
  513. */
  514. if (!isolated)
  515. break;
  516. /*
  517. * If we managed to isolate pages, it is always (1 << n) *
  518. * pageblock_nr_pages for some non-negative n. (Max order
  519. * page may span two pageblocks).
  520. */
  521. }
  522. /* __isolate_free_page() does not map the pages */
  523. map_pages(&freelist);
  524. if (pfn < end_pfn) {
  525. /* Loop terminated early, cleanup. */
  526. release_freepages(&freelist);
  527. return 0;
  528. }
  529. /* We don't use freelists for anything. */
  530. return pfn;
  531. }
  532. /* Similar to reclaim, but different enough that they don't share logic */
  533. static bool too_many_isolated(struct zone *zone)
  534. {
  535. unsigned long active, inactive, isolated;
  536. inactive = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE) +
  537. node_page_state(zone->zone_pgdat, NR_INACTIVE_ANON);
  538. active = node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE) +
  539. node_page_state(zone->zone_pgdat, NR_ACTIVE_ANON);
  540. isolated = node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE) +
  541. node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON);
  542. return isolated > (inactive + active) / 2;
  543. }
  544. /**
  545. * isolate_migratepages_block() - isolate all migrate-able pages within
  546. * a single pageblock
  547. * @cc: Compaction control structure.
  548. * @low_pfn: The first PFN to isolate
  549. * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock
  550. * @isolate_mode: Isolation mode to be used.
  551. *
  552. * Isolate all pages that can be migrated from the range specified by
  553. * [low_pfn, end_pfn). The range is expected to be within same pageblock.
  554. * Returns zero if there is a fatal signal pending, otherwise PFN of the
  555. * first page that was not scanned (which may be both less, equal to or more
  556. * than end_pfn).
  557. *
  558. * The pages are isolated on cc->migratepages list (not required to be empty),
  559. * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
  560. * is neither read nor updated.
  561. */
  562. static unsigned long
  563. isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
  564. unsigned long end_pfn, isolate_mode_t isolate_mode)
  565. {
  566. struct zone *zone = cc->zone;
  567. unsigned long nr_scanned = 0, nr_isolated = 0;
  568. struct lruvec *lruvec;
  569. unsigned long flags = 0;
  570. bool locked = false;
  571. struct page *page = NULL, *valid_page = NULL;
  572. unsigned long start_pfn = low_pfn;
  573. bool skip_on_failure = false;
  574. unsigned long next_skip_pfn = 0;
  575. /*
  576. * Ensure that there are not too many pages isolated from the LRU
  577. * list by either parallel reclaimers or compaction. If there are,
  578. * delay for some time until fewer pages are isolated
  579. */
  580. while (unlikely(too_many_isolated(zone))) {
  581. /* async migration should just abort */
  582. if (cc->mode == MIGRATE_ASYNC)
  583. return 0;
  584. congestion_wait(BLK_RW_ASYNC, HZ/10);
  585. if (fatal_signal_pending(current))
  586. return 0;
  587. }
  588. if (compact_should_abort(cc))
  589. return 0;
  590. if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
  591. skip_on_failure = true;
  592. next_skip_pfn = block_end_pfn(low_pfn, cc->order);
  593. }
  594. /* Time to isolate some pages for migration */
  595. for (; low_pfn < end_pfn; low_pfn++) {
  596. if (skip_on_failure && low_pfn >= next_skip_pfn) {
  597. /*
  598. * We have isolated all migration candidates in the
  599. * previous order-aligned block, and did not skip it due
  600. * to failure. We should migrate the pages now and
  601. * hopefully succeed compaction.
  602. */
  603. if (nr_isolated)
  604. break;
  605. /*
  606. * We failed to isolate in the previous order-aligned
  607. * block. Set the new boundary to the end of the
  608. * current block. Note we can't simply increase
  609. * next_skip_pfn by 1 << order, as low_pfn might have
  610. * been incremented by a higher number due to skipping
  611. * a compound or a high-order buddy page in the
  612. * previous loop iteration.
  613. */
  614. next_skip_pfn = block_end_pfn(low_pfn, cc->order);
  615. }
  616. /*
  617. * Periodically drop the lock (if held) regardless of its
  618. * contention, to give chance to IRQs. Abort async compaction
  619. * if contended.
  620. */
  621. if (!(low_pfn % SWAP_CLUSTER_MAX)
  622. && compact_unlock_should_abort(zone_lru_lock(zone), flags,
  623. &locked, cc))
  624. break;
  625. if (!pfn_valid_within(low_pfn))
  626. goto isolate_fail;
  627. nr_scanned++;
  628. page = pfn_to_page(low_pfn);
  629. if (!valid_page)
  630. valid_page = page;
  631. /*
  632. * Skip if free. We read page order here without zone lock
  633. * which is generally unsafe, but the race window is small and
  634. * the worst thing that can happen is that we skip some
  635. * potential isolation targets.
  636. */
  637. if (PageBuddy(page)) {
  638. unsigned long freepage_order = page_order_unsafe(page);
  639. /*
  640. * Without lock, we cannot be sure that what we got is
  641. * a valid page order. Consider only values in the
  642. * valid order range to prevent low_pfn overflow.
  643. */
  644. if (freepage_order > 0 && freepage_order < MAX_ORDER)
  645. low_pfn += (1UL << freepage_order) - 1;
  646. continue;
  647. }
  648. /*
  649. * Regardless of being on LRU, compound pages such as THP and
  650. * hugetlbfs are not to be compacted. We can potentially save
  651. * a lot of iterations if we skip them at once. The check is
  652. * racy, but we can consider only valid values and the only
  653. * danger is skipping too much.
  654. */
  655. if (PageCompound(page)) {
  656. unsigned int comp_order = compound_order(page);
  657. if (likely(comp_order < MAX_ORDER))
  658. low_pfn += (1UL << comp_order) - 1;
  659. goto isolate_fail;
  660. }
  661. /*
  662. * Check may be lockless but that's ok as we recheck later.
  663. * It's possible to migrate LRU and non-lru movable pages.
  664. * Skip any other type of page
  665. */
  666. if (!PageLRU(page)) {
  667. /*
  668. * __PageMovable can return false positive so we need
  669. * to verify it under page_lock.
  670. */
  671. if (unlikely(__PageMovable(page)) &&
  672. !PageIsolated(page)) {
  673. if (locked) {
  674. spin_unlock_irqrestore(zone_lru_lock(zone),
  675. flags);
  676. locked = false;
  677. }
  678. if (!isolate_movable_page(page, isolate_mode))
  679. goto isolate_success;
  680. }
  681. goto isolate_fail;
  682. }
  683. /*
  684. * Migration will fail if an anonymous page is pinned in memory,
  685. * so avoid taking lru_lock and isolating it unnecessarily in an
  686. * admittedly racy check.
  687. */
  688. if (!page_mapping(page) &&
  689. page_count(page) > page_mapcount(page))
  690. goto isolate_fail;
  691. /*
  692. * Only allow to migrate anonymous pages in GFP_NOFS context
  693. * because those do not depend on fs locks.
  694. */
  695. if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page))
  696. goto isolate_fail;
  697. /* If we already hold the lock, we can skip some rechecking */
  698. if (!locked) {
  699. locked = compact_trylock_irqsave(zone_lru_lock(zone),
  700. &flags, cc);
  701. if (!locked)
  702. break;
  703. /* Recheck PageLRU and PageCompound under lock */
  704. if (!PageLRU(page))
  705. goto isolate_fail;
  706. /*
  707. * Page become compound since the non-locked check,
  708. * and it's on LRU. It can only be a THP so the order
  709. * is safe to read and it's 0 for tail pages.
  710. */
  711. if (unlikely(PageCompound(page))) {
  712. low_pfn += (1UL << compound_order(page)) - 1;
  713. goto isolate_fail;
  714. }
  715. }
  716. lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
  717. /* Try isolate the page */
  718. if (__isolate_lru_page(page, isolate_mode) != 0)
  719. goto isolate_fail;
  720. VM_BUG_ON_PAGE(PageCompound(page), page);
  721. /* Successfully isolated */
  722. del_page_from_lru_list(page, lruvec, page_lru(page));
  723. inc_node_page_state(page,
  724. NR_ISOLATED_ANON + page_is_file_cache(page));
  725. isolate_success:
  726. list_add(&page->lru, &cc->migratepages);
  727. cc->nr_migratepages++;
  728. nr_isolated++;
  729. /*
  730. * Record where we could have freed pages by migration and not
  731. * yet flushed them to buddy allocator.
  732. * - this is the lowest page that was isolated and likely be
  733. * then freed by migration.
  734. */
  735. if (!cc->last_migrated_pfn)
  736. cc->last_migrated_pfn = low_pfn;
  737. /* Avoid isolating too much */
  738. if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
  739. ++low_pfn;
  740. break;
  741. }
  742. continue;
  743. isolate_fail:
  744. if (!skip_on_failure)
  745. continue;
  746. /*
  747. * We have isolated some pages, but then failed. Release them
  748. * instead of migrating, as we cannot form the cc->order buddy
  749. * page anyway.
  750. */
  751. if (nr_isolated) {
  752. if (locked) {
  753. spin_unlock_irqrestore(zone_lru_lock(zone), flags);
  754. locked = false;
  755. }
  756. putback_movable_pages(&cc->migratepages);
  757. cc->nr_migratepages = 0;
  758. cc->last_migrated_pfn = 0;
  759. nr_isolated = 0;
  760. }
  761. if (low_pfn < next_skip_pfn) {
  762. low_pfn = next_skip_pfn - 1;
  763. /*
  764. * The check near the loop beginning would have updated
  765. * next_skip_pfn too, but this is a bit simpler.
  766. */
  767. next_skip_pfn += 1UL << cc->order;
  768. }
  769. }
  770. /*
  771. * The PageBuddy() check could have potentially brought us outside
  772. * the range to be scanned.
  773. */
  774. if (unlikely(low_pfn > end_pfn))
  775. low_pfn = end_pfn;
  776. if (locked)
  777. spin_unlock_irqrestore(zone_lru_lock(zone), flags);
  778. /*
  779. * Update the pageblock-skip information and cached scanner pfn,
  780. * if the whole pageblock was scanned without isolating any page.
  781. */
  782. if (low_pfn == end_pfn)
  783. update_pageblock_skip(cc, valid_page, nr_isolated, true);
  784. trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
  785. nr_scanned, nr_isolated);
  786. cc->total_migrate_scanned += nr_scanned;
  787. if (nr_isolated)
  788. count_compact_events(COMPACTISOLATED, nr_isolated);
  789. return low_pfn;
  790. }
  791. /**
  792. * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
  793. * @cc: Compaction control structure.
  794. * @start_pfn: The first PFN to start isolating.
  795. * @end_pfn: The one-past-last PFN.
  796. *
  797. * Returns zero if isolation fails fatally due to e.g. pending signal.
  798. * Otherwise, function returns one-past-the-last PFN of isolated page
  799. * (which may be greater than end_pfn if end fell in a middle of a THP page).
  800. */
  801. unsigned long
  802. isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
  803. unsigned long end_pfn)
  804. {
  805. unsigned long pfn, block_start_pfn, block_end_pfn;
  806. /* Scan block by block. First and last block may be incomplete */
  807. pfn = start_pfn;
  808. block_start_pfn = pageblock_start_pfn(pfn);
  809. if (block_start_pfn < cc->zone->zone_start_pfn)
  810. block_start_pfn = cc->zone->zone_start_pfn;
  811. block_end_pfn = pageblock_end_pfn(pfn);
  812. for (; pfn < end_pfn; pfn = block_end_pfn,
  813. block_start_pfn = block_end_pfn,
  814. block_end_pfn += pageblock_nr_pages) {
  815. block_end_pfn = min(block_end_pfn, end_pfn);
  816. if (!pageblock_pfn_to_page(block_start_pfn,
  817. block_end_pfn, cc->zone))
  818. continue;
  819. pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
  820. ISOLATE_UNEVICTABLE);
  821. if (!pfn)
  822. break;
  823. if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
  824. break;
  825. }
  826. return pfn;
  827. }
  828. #endif /* CONFIG_COMPACTION || CONFIG_CMA */
  829. #ifdef CONFIG_COMPACTION
  830. /* Returns true if the page is within a block suitable for migration to */
  831. static bool suitable_migration_target(struct compact_control *cc,
  832. struct page *page)
  833. {
  834. if (cc->ignore_block_suitable)
  835. return true;
  836. /* If the page is a large free page, then disallow migration */
  837. if (PageBuddy(page)) {
  838. /*
  839. * We are checking page_order without zone->lock taken. But
  840. * the only small danger is that we skip a potentially suitable
  841. * pageblock, so it's not worth to check order for valid range.
  842. */
  843. if (page_order_unsafe(page) >= pageblock_order)
  844. return false;
  845. }
  846. /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
  847. if (migrate_async_suitable(get_pageblock_migratetype(page)))
  848. return true;
  849. /* Otherwise skip the block */
  850. return false;
  851. }
  852. /*
  853. * Test whether the free scanner has reached the same or lower pageblock than
  854. * the migration scanner, and compaction should thus terminate.
  855. */
  856. static inline bool compact_scanners_met(struct compact_control *cc)
  857. {
  858. return (cc->free_pfn >> pageblock_order)
  859. <= (cc->migrate_pfn >> pageblock_order);
  860. }
  861. /*
  862. * Based on information in the current compact_control, find blocks
  863. * suitable for isolating free pages from and then isolate them.
  864. */
  865. static void isolate_freepages(struct compact_control *cc)
  866. {
  867. struct zone *zone = cc->zone;
  868. struct page *page;
  869. unsigned long block_start_pfn; /* start of current pageblock */
  870. unsigned long isolate_start_pfn; /* exact pfn we start at */
  871. unsigned long block_end_pfn; /* end of current pageblock */
  872. unsigned long low_pfn; /* lowest pfn scanner is able to scan */
  873. struct list_head *freelist = &cc->freepages;
  874. /*
  875. * Initialise the free scanner. The starting point is where we last
  876. * successfully isolated from, zone-cached value, or the end of the
  877. * zone when isolating for the first time. For looping we also need
  878. * this pfn aligned down to the pageblock boundary, because we do
  879. * block_start_pfn -= pageblock_nr_pages in the for loop.
  880. * For ending point, take care when isolating in last pageblock of a
  881. * a zone which ends in the middle of a pageblock.
  882. * The low boundary is the end of the pageblock the migration scanner
  883. * is using.
  884. */
  885. isolate_start_pfn = cc->free_pfn;
  886. block_start_pfn = pageblock_start_pfn(cc->free_pfn);
  887. block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
  888. zone_end_pfn(zone));
  889. low_pfn = pageblock_end_pfn(cc->migrate_pfn);
  890. /*
  891. * Isolate free pages until enough are available to migrate the
  892. * pages on cc->migratepages. We stop searching if the migrate
  893. * and free page scanners meet or enough free pages are isolated.
  894. */
  895. for (; block_start_pfn >= low_pfn;
  896. block_end_pfn = block_start_pfn,
  897. block_start_pfn -= pageblock_nr_pages,
  898. isolate_start_pfn = block_start_pfn) {
  899. /*
  900. * This can iterate a massively long zone without finding any
  901. * suitable migration targets, so periodically check if we need
  902. * to schedule, or even abort async compaction.
  903. */
  904. if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
  905. && compact_should_abort(cc))
  906. break;
  907. page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
  908. zone);
  909. if (!page)
  910. continue;
  911. /* Check the block is suitable for migration */
  912. if (!suitable_migration_target(cc, page))
  913. continue;
  914. /* If isolation recently failed, do not retry */
  915. if (!isolation_suitable(cc, page))
  916. continue;
  917. /* Found a block suitable for isolating free pages from. */
  918. isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
  919. freelist, false);
  920. /*
  921. * If we isolated enough freepages, or aborted due to lock
  922. * contention, terminate.
  923. */
  924. if ((cc->nr_freepages >= cc->nr_migratepages)
  925. || cc->contended) {
  926. if (isolate_start_pfn >= block_end_pfn) {
  927. /*
  928. * Restart at previous pageblock if more
  929. * freepages can be isolated next time.
  930. */
  931. isolate_start_pfn =
  932. block_start_pfn - pageblock_nr_pages;
  933. }
  934. break;
  935. } else if (isolate_start_pfn < block_end_pfn) {
  936. /*
  937. * If isolation failed early, do not continue
  938. * needlessly.
  939. */
  940. break;
  941. }
  942. }
  943. /* __isolate_free_page() does not map the pages */
  944. map_pages(freelist);
  945. /*
  946. * Record where the free scanner will restart next time. Either we
  947. * broke from the loop and set isolate_start_pfn based on the last
  948. * call to isolate_freepages_block(), or we met the migration scanner
  949. * and the loop terminated due to isolate_start_pfn < low_pfn
  950. */
  951. cc->free_pfn = isolate_start_pfn;
  952. }
  953. /*
  954. * This is a migrate-callback that "allocates" freepages by taking pages
  955. * from the isolated freelists in the block we are migrating to.
  956. */
  957. static struct page *compaction_alloc(struct page *migratepage,
  958. unsigned long data,
  959. int **result)
  960. {
  961. struct compact_control *cc = (struct compact_control *)data;
  962. struct page *freepage;
  963. /*
  964. * Isolate free pages if necessary, and if we are not aborting due to
  965. * contention.
  966. */
  967. if (list_empty(&cc->freepages)) {
  968. if (!cc->contended)
  969. isolate_freepages(cc);
  970. if (list_empty(&cc->freepages))
  971. return NULL;
  972. }
  973. freepage = list_entry(cc->freepages.next, struct page, lru);
  974. list_del(&freepage->lru);
  975. cc->nr_freepages--;
  976. return freepage;
  977. }
  978. /*
  979. * This is a migrate-callback that "frees" freepages back to the isolated
  980. * freelist. All pages on the freelist are from the same zone, so there is no
  981. * special handling needed for NUMA.
  982. */
  983. static void compaction_free(struct page *page, unsigned long data)
  984. {
  985. struct compact_control *cc = (struct compact_control *)data;
  986. list_add(&page->lru, &cc->freepages);
  987. cc->nr_freepages++;
  988. }
  989. /* possible outcome of isolate_migratepages */
  990. typedef enum {
  991. ISOLATE_ABORT, /* Abort compaction now */
  992. ISOLATE_NONE, /* No pages isolated, continue scanning */
  993. ISOLATE_SUCCESS, /* Pages isolated, migrate */
  994. } isolate_migrate_t;
  995. /*
  996. * Allow userspace to control policy on scanning the unevictable LRU for
  997. * compactable pages.
  998. */
  999. int sysctl_compact_unevictable_allowed __read_mostly = 1;
  1000. /*
  1001. * Isolate all pages that can be migrated from the first suitable block,
  1002. * starting at the block pointed to by the migrate scanner pfn within
  1003. * compact_control.
  1004. */
  1005. static isolate_migrate_t isolate_migratepages(struct zone *zone,
  1006. struct compact_control *cc)
  1007. {
  1008. unsigned long block_start_pfn;
  1009. unsigned long block_end_pfn;
  1010. unsigned long low_pfn;
  1011. struct page *page;
  1012. const isolate_mode_t isolate_mode =
  1013. (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
  1014. (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
  1015. /*
  1016. * Start at where we last stopped, or beginning of the zone as
  1017. * initialized by compact_zone()
  1018. */
  1019. low_pfn = cc->migrate_pfn;
  1020. block_start_pfn = pageblock_start_pfn(low_pfn);
  1021. if (block_start_pfn < zone->zone_start_pfn)
  1022. block_start_pfn = zone->zone_start_pfn;
  1023. /* Only scan within a pageblock boundary */
  1024. block_end_pfn = pageblock_end_pfn(low_pfn);
  1025. /*
  1026. * Iterate over whole pageblocks until we find the first suitable.
  1027. * Do not cross the free scanner.
  1028. */
  1029. for (; block_end_pfn <= cc->free_pfn;
  1030. low_pfn = block_end_pfn,
  1031. block_start_pfn = block_end_pfn,
  1032. block_end_pfn += pageblock_nr_pages) {
  1033. /*
  1034. * This can potentially iterate a massively long zone with
  1035. * many pageblocks unsuitable, so periodically check if we
  1036. * need to schedule, or even abort async compaction.
  1037. */
  1038. if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
  1039. && compact_should_abort(cc))
  1040. break;
  1041. page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
  1042. zone);
  1043. if (!page)
  1044. continue;
  1045. /* If isolation recently failed, do not retry */
  1046. if (!isolation_suitable(cc, page))
  1047. continue;
  1048. /*
  1049. * For async compaction, also only scan in MOVABLE blocks.
  1050. * Async compaction is optimistic to see if the minimum amount
  1051. * of work satisfies the allocation.
  1052. */
  1053. if (cc->mode == MIGRATE_ASYNC &&
  1054. !migrate_async_suitable(get_pageblock_migratetype(page)))
  1055. continue;
  1056. /* Perform the isolation */
  1057. low_pfn = isolate_migratepages_block(cc, low_pfn,
  1058. block_end_pfn, isolate_mode);
  1059. if (!low_pfn || cc->contended)
  1060. return ISOLATE_ABORT;
  1061. /*
  1062. * Either we isolated something and proceed with migration. Or
  1063. * we failed and compact_zone should decide if we should
  1064. * continue or not.
  1065. */
  1066. break;
  1067. }
  1068. /* Record where migration scanner will be restarted. */
  1069. cc->migrate_pfn = low_pfn;
  1070. return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
  1071. }
  1072. /*
  1073. * order == -1 is expected when compacting via
  1074. * /proc/sys/vm/compact_memory
  1075. */
  1076. static inline bool is_via_compact_memory(int order)
  1077. {
  1078. return order == -1;
  1079. }
  1080. static enum compact_result __compact_finished(struct zone *zone, struct compact_control *cc,
  1081. const int migratetype)
  1082. {
  1083. unsigned int order;
  1084. unsigned long watermark;
  1085. if (cc->contended || fatal_signal_pending(current))
  1086. return COMPACT_CONTENDED;
  1087. /* Compaction run completes if the migrate and free scanner meet */
  1088. if (compact_scanners_met(cc)) {
  1089. /* Let the next compaction start anew. */
  1090. reset_cached_positions(zone);
  1091. /*
  1092. * Mark that the PG_migrate_skip information should be cleared
  1093. * by kswapd when it goes to sleep. kcompactd does not set the
  1094. * flag itself as the decision to be clear should be directly
  1095. * based on an allocation request.
  1096. */
  1097. if (cc->direct_compaction)
  1098. zone->compact_blockskip_flush = true;
  1099. if (cc->whole_zone)
  1100. return COMPACT_COMPLETE;
  1101. else
  1102. return COMPACT_PARTIAL_SKIPPED;
  1103. }
  1104. if (is_via_compact_memory(cc->order))
  1105. return COMPACT_CONTINUE;
  1106. /* Compaction run is not finished if the watermark is not met */
  1107. watermark = zone->watermark[cc->alloc_flags & ALLOC_WMARK_MASK];
  1108. if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx,
  1109. cc->alloc_flags))
  1110. return COMPACT_CONTINUE;
  1111. /* Direct compactor: Is a suitable page free? */
  1112. for (order = cc->order; order < MAX_ORDER; order++) {
  1113. struct free_area *area = &zone->free_area[order];
  1114. bool can_steal;
  1115. /* Job done if page is free of the right migratetype */
  1116. if (!list_empty(&area->free_list[migratetype]))
  1117. return COMPACT_SUCCESS;
  1118. #ifdef CONFIG_CMA
  1119. /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
  1120. if (migratetype == MIGRATE_MOVABLE &&
  1121. !list_empty(&area->free_list[MIGRATE_CMA]))
  1122. return COMPACT_SUCCESS;
  1123. #endif
  1124. /*
  1125. * Job done if allocation would steal freepages from
  1126. * other migratetype buddy lists.
  1127. */
  1128. if (find_suitable_fallback(area, order, migratetype,
  1129. true, &can_steal) != -1)
  1130. return COMPACT_SUCCESS;
  1131. }
  1132. return COMPACT_NO_SUITABLE_PAGE;
  1133. }
  1134. static enum compact_result compact_finished(struct zone *zone,
  1135. struct compact_control *cc,
  1136. const int migratetype)
  1137. {
  1138. int ret;
  1139. ret = __compact_finished(zone, cc, migratetype);
  1140. trace_mm_compaction_finished(zone, cc->order, ret);
  1141. if (ret == COMPACT_NO_SUITABLE_PAGE)
  1142. ret = COMPACT_CONTINUE;
  1143. return ret;
  1144. }
  1145. /*
  1146. * compaction_suitable: Is this suitable to run compaction on this zone now?
  1147. * Returns
  1148. * COMPACT_SKIPPED - If there are too few free pages for compaction
  1149. * COMPACT_SUCCESS - If the allocation would succeed without compaction
  1150. * COMPACT_CONTINUE - If compaction should run now
  1151. */
  1152. static enum compact_result __compaction_suitable(struct zone *zone, int order,
  1153. unsigned int alloc_flags,
  1154. int classzone_idx,
  1155. unsigned long wmark_target)
  1156. {
  1157. unsigned long watermark;
  1158. if (is_via_compact_memory(order))
  1159. return COMPACT_CONTINUE;
  1160. watermark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
  1161. /*
  1162. * If watermarks for high-order allocation are already met, there
  1163. * should be no need for compaction at all.
  1164. */
  1165. if (zone_watermark_ok(zone, order, watermark, classzone_idx,
  1166. alloc_flags))
  1167. return COMPACT_SUCCESS;
  1168. /*
  1169. * Watermarks for order-0 must be met for compaction to be able to
  1170. * isolate free pages for migration targets. This means that the
  1171. * watermark and alloc_flags have to match, or be more pessimistic than
  1172. * the check in __isolate_free_page(). We don't use the direct
  1173. * compactor's alloc_flags, as they are not relevant for freepage
  1174. * isolation. We however do use the direct compactor's classzone_idx to
  1175. * skip over zones where lowmem reserves would prevent allocation even
  1176. * if compaction succeeds.
  1177. * For costly orders, we require low watermark instead of min for
  1178. * compaction to proceed to increase its chances.
  1179. * ALLOC_CMA is used, as pages in CMA pageblocks are considered
  1180. * suitable migration targets
  1181. */
  1182. watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
  1183. low_wmark_pages(zone) : min_wmark_pages(zone);
  1184. watermark += compact_gap(order);
  1185. if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx,
  1186. ALLOC_CMA, wmark_target))
  1187. return COMPACT_SKIPPED;
  1188. return COMPACT_CONTINUE;
  1189. }
  1190. enum compact_result compaction_suitable(struct zone *zone, int order,
  1191. unsigned int alloc_flags,
  1192. int classzone_idx)
  1193. {
  1194. enum compact_result ret;
  1195. int fragindex;
  1196. ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx,
  1197. zone_page_state(zone, NR_FREE_PAGES));
  1198. /*
  1199. * fragmentation index determines if allocation failures are due to
  1200. * low memory or external fragmentation
  1201. *
  1202. * index of -1000 would imply allocations might succeed depending on
  1203. * watermarks, but we already failed the high-order watermark check
  1204. * index towards 0 implies failure is due to lack of memory
  1205. * index towards 1000 implies failure is due to fragmentation
  1206. *
  1207. * Only compact if a failure would be due to fragmentation. Also
  1208. * ignore fragindex for non-costly orders where the alternative to
  1209. * a successful reclaim/compaction is OOM. Fragindex and the
  1210. * vm.extfrag_threshold sysctl is meant as a heuristic to prevent
  1211. * excessive compaction for costly orders, but it should not be at the
  1212. * expense of system stability.
  1213. */
  1214. if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) {
  1215. fragindex = fragmentation_index(zone, order);
  1216. if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
  1217. ret = COMPACT_NOT_SUITABLE_ZONE;
  1218. }
  1219. trace_mm_compaction_suitable(zone, order, ret);
  1220. if (ret == COMPACT_NOT_SUITABLE_ZONE)
  1221. ret = COMPACT_SKIPPED;
  1222. return ret;
  1223. }
  1224. bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
  1225. int alloc_flags)
  1226. {
  1227. struct zone *zone;
  1228. struct zoneref *z;
  1229. /*
  1230. * Make sure at least one zone would pass __compaction_suitable if we continue
  1231. * retrying the reclaim.
  1232. */
  1233. for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
  1234. ac->nodemask) {
  1235. unsigned long available;
  1236. enum compact_result compact_result;
  1237. /*
  1238. * Do not consider all the reclaimable memory because we do not
  1239. * want to trash just for a single high order allocation which
  1240. * is even not guaranteed to appear even if __compaction_suitable
  1241. * is happy about the watermark check.
  1242. */
  1243. available = zone_reclaimable_pages(zone) / order;
  1244. available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
  1245. compact_result = __compaction_suitable(zone, order, alloc_flags,
  1246. ac_classzone_idx(ac), available);
  1247. if (compact_result != COMPACT_SKIPPED)
  1248. return true;
  1249. }
  1250. return false;
  1251. }
  1252. static enum compact_result compact_zone(struct zone *zone, struct compact_control *cc)
  1253. {
  1254. enum compact_result ret;
  1255. unsigned long start_pfn = zone->zone_start_pfn;
  1256. unsigned long end_pfn = zone_end_pfn(zone);
  1257. const int migratetype = gfpflags_to_migratetype(cc->gfp_mask);
  1258. const bool sync = cc->mode != MIGRATE_ASYNC;
  1259. ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
  1260. cc->classzone_idx);
  1261. /* Compaction is likely to fail */
  1262. if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED)
  1263. return ret;
  1264. /* huh, compaction_suitable is returning something unexpected */
  1265. VM_BUG_ON(ret != COMPACT_CONTINUE);
  1266. /*
  1267. * Clear pageblock skip if there were failures recently and compaction
  1268. * is about to be retried after being deferred.
  1269. */
  1270. if (compaction_restarting(zone, cc->order))
  1271. __reset_isolation_suitable(zone);
  1272. /*
  1273. * Setup to move all movable pages to the end of the zone. Used cached
  1274. * information on where the scanners should start (unless we explicitly
  1275. * want to compact the whole zone), but check that it is initialised
  1276. * by ensuring the values are within zone boundaries.
  1277. */
  1278. if (cc->whole_zone) {
  1279. cc->migrate_pfn = start_pfn;
  1280. cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
  1281. } else {
  1282. cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
  1283. cc->free_pfn = zone->compact_cached_free_pfn;
  1284. if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
  1285. cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
  1286. zone->compact_cached_free_pfn = cc->free_pfn;
  1287. }
  1288. if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
  1289. cc->migrate_pfn = start_pfn;
  1290. zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
  1291. zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
  1292. }
  1293. if (cc->migrate_pfn == start_pfn)
  1294. cc->whole_zone = true;
  1295. }
  1296. cc->last_migrated_pfn = 0;
  1297. trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
  1298. cc->free_pfn, end_pfn, sync);
  1299. migrate_prep_local();
  1300. while ((ret = compact_finished(zone, cc, migratetype)) ==
  1301. COMPACT_CONTINUE) {
  1302. int err;
  1303. switch (isolate_migratepages(zone, cc)) {
  1304. case ISOLATE_ABORT:
  1305. ret = COMPACT_CONTENDED;
  1306. putback_movable_pages(&cc->migratepages);
  1307. cc->nr_migratepages = 0;
  1308. goto out;
  1309. case ISOLATE_NONE:
  1310. /*
  1311. * We haven't isolated and migrated anything, but
  1312. * there might still be unflushed migrations from
  1313. * previous cc->order aligned block.
  1314. */
  1315. goto check_drain;
  1316. case ISOLATE_SUCCESS:
  1317. ;
  1318. }
  1319. err = migrate_pages(&cc->migratepages, compaction_alloc,
  1320. compaction_free, (unsigned long)cc, cc->mode,
  1321. MR_COMPACTION);
  1322. trace_mm_compaction_migratepages(cc->nr_migratepages, err,
  1323. &cc->migratepages);
  1324. /* All pages were either migrated or will be released */
  1325. cc->nr_migratepages = 0;
  1326. if (err) {
  1327. putback_movable_pages(&cc->migratepages);
  1328. /*
  1329. * migrate_pages() may return -ENOMEM when scanners meet
  1330. * and we want compact_finished() to detect it
  1331. */
  1332. if (err == -ENOMEM && !compact_scanners_met(cc)) {
  1333. ret = COMPACT_CONTENDED;
  1334. goto out;
  1335. }
  1336. /*
  1337. * We failed to migrate at least one page in the current
  1338. * order-aligned block, so skip the rest of it.
  1339. */
  1340. if (cc->direct_compaction &&
  1341. (cc->mode == MIGRATE_ASYNC)) {
  1342. cc->migrate_pfn = block_end_pfn(
  1343. cc->migrate_pfn - 1, cc->order);
  1344. /* Draining pcplists is useless in this case */
  1345. cc->last_migrated_pfn = 0;
  1346. }
  1347. }
  1348. check_drain:
  1349. /*
  1350. * Has the migration scanner moved away from the previous
  1351. * cc->order aligned block where we migrated from? If yes,
  1352. * flush the pages that were freed, so that they can merge and
  1353. * compact_finished() can detect immediately if allocation
  1354. * would succeed.
  1355. */
  1356. if (cc->order > 0 && cc->last_migrated_pfn) {
  1357. int cpu;
  1358. unsigned long current_block_start =
  1359. block_start_pfn(cc->migrate_pfn, cc->order);
  1360. if (cc->last_migrated_pfn < current_block_start) {
  1361. cpu = get_cpu();
  1362. lru_add_drain_cpu(cpu);
  1363. drain_local_pages(zone);
  1364. put_cpu();
  1365. /* No more flushing until we migrate again */
  1366. cc->last_migrated_pfn = 0;
  1367. }
  1368. }
  1369. }
  1370. out:
  1371. /*
  1372. * Release free pages and update where the free scanner should restart,
  1373. * so we don't leave any returned pages behind in the next attempt.
  1374. */
  1375. if (cc->nr_freepages > 0) {
  1376. unsigned long free_pfn = release_freepages(&cc->freepages);
  1377. cc->nr_freepages = 0;
  1378. VM_BUG_ON(free_pfn == 0);
  1379. /* The cached pfn is always the first in a pageblock */
  1380. free_pfn = pageblock_start_pfn(free_pfn);
  1381. /*
  1382. * Only go back, not forward. The cached pfn might have been
  1383. * already reset to zone end in compact_finished()
  1384. */
  1385. if (free_pfn > zone->compact_cached_free_pfn)
  1386. zone->compact_cached_free_pfn = free_pfn;
  1387. }
  1388. count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned);
  1389. count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned);
  1390. trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
  1391. cc->free_pfn, end_pfn, sync, ret);
  1392. return ret;
  1393. }
  1394. static enum compact_result compact_zone_order(struct zone *zone, int order,
  1395. gfp_t gfp_mask, enum compact_priority prio,
  1396. unsigned int alloc_flags, int classzone_idx)
  1397. {
  1398. enum compact_result ret;
  1399. struct compact_control cc = {
  1400. .nr_freepages = 0,
  1401. .nr_migratepages = 0,
  1402. .total_migrate_scanned = 0,
  1403. .total_free_scanned = 0,
  1404. .order = order,
  1405. .gfp_mask = gfp_mask,
  1406. .zone = zone,
  1407. .mode = (prio == COMPACT_PRIO_ASYNC) ?
  1408. MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT,
  1409. .alloc_flags = alloc_flags,
  1410. .classzone_idx = classzone_idx,
  1411. .direct_compaction = true,
  1412. .whole_zone = (prio == MIN_COMPACT_PRIORITY),
  1413. .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY),
  1414. .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY)
  1415. };
  1416. INIT_LIST_HEAD(&cc.freepages);
  1417. INIT_LIST_HEAD(&cc.migratepages);
  1418. ret = compact_zone(zone, &cc);
  1419. VM_BUG_ON(!list_empty(&cc.freepages));
  1420. VM_BUG_ON(!list_empty(&cc.migratepages));
  1421. return ret;
  1422. }
  1423. int sysctl_extfrag_threshold = 500;
  1424. /**
  1425. * try_to_compact_pages - Direct compact to satisfy a high-order allocation
  1426. * @gfp_mask: The GFP mask of the current allocation
  1427. * @order: The order of the current allocation
  1428. * @alloc_flags: The allocation flags of the current allocation
  1429. * @ac: The context of current allocation
  1430. * @mode: The migration mode for async, sync light, or sync migration
  1431. *
  1432. * This is the main entry point for direct page compaction.
  1433. */
  1434. enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
  1435. unsigned int alloc_flags, const struct alloc_context *ac,
  1436. enum compact_priority prio)
  1437. {
  1438. int may_perform_io = gfp_mask & __GFP_IO;
  1439. struct zoneref *z;
  1440. struct zone *zone;
  1441. enum compact_result rc = COMPACT_SKIPPED;
  1442. /*
  1443. * Check if the GFP flags allow compaction - GFP_NOIO is really
  1444. * tricky context because the migration might require IO
  1445. */
  1446. if (!may_perform_io)
  1447. return COMPACT_SKIPPED;
  1448. trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
  1449. /* Compact each zone in the list */
  1450. for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
  1451. ac->nodemask) {
  1452. enum compact_result status;
  1453. if (prio > MIN_COMPACT_PRIORITY
  1454. && compaction_deferred(zone, order)) {
  1455. rc = max_t(enum compact_result, COMPACT_DEFERRED, rc);
  1456. continue;
  1457. }
  1458. status = compact_zone_order(zone, order, gfp_mask, prio,
  1459. alloc_flags, ac_classzone_idx(ac));
  1460. rc = max(status, rc);
  1461. /* The allocation should succeed, stop compacting */
  1462. if (status == COMPACT_SUCCESS) {
  1463. /*
  1464. * We think the allocation will succeed in this zone,
  1465. * but it is not certain, hence the false. The caller
  1466. * will repeat this with true if allocation indeed
  1467. * succeeds in this zone.
  1468. */
  1469. compaction_defer_reset(zone, order, false);
  1470. break;
  1471. }
  1472. if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE ||
  1473. status == COMPACT_PARTIAL_SKIPPED))
  1474. /*
  1475. * We think that allocation won't succeed in this zone
  1476. * so we defer compaction there. If it ends up
  1477. * succeeding after all, it will be reset.
  1478. */
  1479. defer_compaction(zone, order);
  1480. /*
  1481. * We might have stopped compacting due to need_resched() in
  1482. * async compaction, or due to a fatal signal detected. In that
  1483. * case do not try further zones
  1484. */
  1485. if ((prio == COMPACT_PRIO_ASYNC && need_resched())
  1486. || fatal_signal_pending(current))
  1487. break;
  1488. }
  1489. return rc;
  1490. }
  1491. /* Compact all zones within a node */
  1492. static void compact_node(int nid)
  1493. {
  1494. pg_data_t *pgdat = NODE_DATA(nid);
  1495. int zoneid;
  1496. struct zone *zone;
  1497. struct compact_control cc = {
  1498. .order = -1,
  1499. .total_migrate_scanned = 0,
  1500. .total_free_scanned = 0,
  1501. .mode = MIGRATE_SYNC,
  1502. .ignore_skip_hint = true,
  1503. .whole_zone = true,
  1504. .gfp_mask = GFP_KERNEL,
  1505. };
  1506. for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
  1507. zone = &pgdat->node_zones[zoneid];
  1508. if (!populated_zone(zone))
  1509. continue;
  1510. cc.nr_freepages = 0;
  1511. cc.nr_migratepages = 0;
  1512. cc.zone = zone;
  1513. INIT_LIST_HEAD(&cc.freepages);
  1514. INIT_LIST_HEAD(&cc.migratepages);
  1515. compact_zone(zone, &cc);
  1516. VM_BUG_ON(!list_empty(&cc.freepages));
  1517. VM_BUG_ON(!list_empty(&cc.migratepages));
  1518. }
  1519. }
  1520. /* Compact all nodes in the system */
  1521. static void compact_nodes(void)
  1522. {
  1523. int nid;
  1524. /* Flush pending updates to the LRU lists */
  1525. lru_add_drain_all();
  1526. for_each_online_node(nid)
  1527. compact_node(nid);
  1528. }
  1529. /* The written value is actually unused, all memory is compacted */
  1530. int sysctl_compact_memory;
  1531. /*
  1532. * This is the entry point for compacting all nodes via
  1533. * /proc/sys/vm/compact_memory
  1534. */
  1535. int sysctl_compaction_handler(struct ctl_table *table, int write,
  1536. void __user *buffer, size_t *length, loff_t *ppos)
  1537. {
  1538. if (write)
  1539. compact_nodes();
  1540. return 0;
  1541. }
  1542. int sysctl_extfrag_handler(struct ctl_table *table, int write,
  1543. void __user *buffer, size_t *length, loff_t *ppos)
  1544. {
  1545. proc_dointvec_minmax(table, write, buffer, length, ppos);
  1546. return 0;
  1547. }
  1548. #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
  1549. static ssize_t sysfs_compact_node(struct device *dev,
  1550. struct device_attribute *attr,
  1551. const char *buf, size_t count)
  1552. {
  1553. int nid = dev->id;
  1554. if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
  1555. /* Flush pending updates to the LRU lists */
  1556. lru_add_drain_all();
  1557. compact_node(nid);
  1558. }
  1559. return count;
  1560. }
  1561. static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
  1562. int compaction_register_node(struct node *node)
  1563. {
  1564. return device_create_file(&node->dev, &dev_attr_compact);
  1565. }
  1566. void compaction_unregister_node(struct node *node)
  1567. {
  1568. return device_remove_file(&node->dev, &dev_attr_compact);
  1569. }
  1570. #endif /* CONFIG_SYSFS && CONFIG_NUMA */
  1571. static inline bool kcompactd_work_requested(pg_data_t *pgdat)
  1572. {
  1573. return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
  1574. }
  1575. static bool kcompactd_node_suitable(pg_data_t *pgdat)
  1576. {
  1577. int zoneid;
  1578. struct zone *zone;
  1579. enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx;
  1580. for (zoneid = 0; zoneid <= classzone_idx; zoneid++) {
  1581. zone = &pgdat->node_zones[zoneid];
  1582. if (!populated_zone(zone))
  1583. continue;
  1584. if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
  1585. classzone_idx) == COMPACT_CONTINUE)
  1586. return true;
  1587. }
  1588. return false;
  1589. }
  1590. static void kcompactd_do_work(pg_data_t *pgdat)
  1591. {
  1592. /*
  1593. * With no special task, compact all zones so that a page of requested
  1594. * order is allocatable.
  1595. */
  1596. int zoneid;
  1597. struct zone *zone;
  1598. struct compact_control cc = {
  1599. .order = pgdat->kcompactd_max_order,
  1600. .total_migrate_scanned = 0,
  1601. .total_free_scanned = 0,
  1602. .classzone_idx = pgdat->kcompactd_classzone_idx,
  1603. .mode = MIGRATE_SYNC_LIGHT,
  1604. .ignore_skip_hint = true,
  1605. .gfp_mask = GFP_KERNEL,
  1606. };
  1607. trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
  1608. cc.classzone_idx);
  1609. count_compact_event(KCOMPACTD_WAKE);
  1610. for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) {
  1611. int status;
  1612. zone = &pgdat->node_zones[zoneid];
  1613. if (!populated_zone(zone))
  1614. continue;
  1615. if (compaction_deferred(zone, cc.order))
  1616. continue;
  1617. if (compaction_suitable(zone, cc.order, 0, zoneid) !=
  1618. COMPACT_CONTINUE)
  1619. continue;
  1620. cc.nr_freepages = 0;
  1621. cc.nr_migratepages = 0;
  1622. cc.total_migrate_scanned = 0;
  1623. cc.total_free_scanned = 0;
  1624. cc.zone = zone;
  1625. INIT_LIST_HEAD(&cc.freepages);
  1626. INIT_LIST_HEAD(&cc.migratepages);
  1627. if (kthread_should_stop())
  1628. return;
  1629. status = compact_zone(zone, &cc);
  1630. if (status == COMPACT_SUCCESS) {
  1631. compaction_defer_reset(zone, cc.order, false);
  1632. } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
  1633. /*
  1634. * We use sync migration mode here, so we defer like
  1635. * sync direct compaction does.
  1636. */
  1637. defer_compaction(zone, cc.order);
  1638. }
  1639. count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
  1640. cc.total_migrate_scanned);
  1641. count_compact_events(KCOMPACTD_FREE_SCANNED,
  1642. cc.total_free_scanned);
  1643. VM_BUG_ON(!list_empty(&cc.freepages));
  1644. VM_BUG_ON(!list_empty(&cc.migratepages));
  1645. }
  1646. /*
  1647. * Regardless of success, we are done until woken up next. But remember
  1648. * the requested order/classzone_idx in case it was higher/tighter than
  1649. * our current ones
  1650. */
  1651. if (pgdat->kcompactd_max_order <= cc.order)
  1652. pgdat->kcompactd_max_order = 0;
  1653. if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx)
  1654. pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
  1655. }
  1656. void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
  1657. {
  1658. if (!order)
  1659. return;
  1660. if (pgdat->kcompactd_max_order < order)
  1661. pgdat->kcompactd_max_order = order;
  1662. /*
  1663. * Pairs with implicit barrier in wait_event_freezable()
  1664. * such that wakeups are not missed in the lockless
  1665. * waitqueue_active() call.
  1666. */
  1667. smp_acquire__after_ctrl_dep();
  1668. if (pgdat->kcompactd_classzone_idx > classzone_idx)
  1669. pgdat->kcompactd_classzone_idx = classzone_idx;
  1670. if (!waitqueue_active(&pgdat->kcompactd_wait))
  1671. return;
  1672. if (!kcompactd_node_suitable(pgdat))
  1673. return;
  1674. trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
  1675. classzone_idx);
  1676. wake_up_interruptible(&pgdat->kcompactd_wait);
  1677. }
  1678. /*
  1679. * The background compaction daemon, started as a kernel thread
  1680. * from the init process.
  1681. */
  1682. static int kcompactd(void *p)
  1683. {
  1684. pg_data_t *pgdat = (pg_data_t*)p;
  1685. struct task_struct *tsk = current;
  1686. const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
  1687. if (!cpumask_empty(cpumask))
  1688. set_cpus_allowed_ptr(tsk, cpumask);
  1689. set_freezable();
  1690. pgdat->kcompactd_max_order = 0;
  1691. pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
  1692. while (!kthread_should_stop()) {
  1693. trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
  1694. wait_event_freezable(pgdat->kcompactd_wait,
  1695. kcompactd_work_requested(pgdat));
  1696. kcompactd_do_work(pgdat);
  1697. }
  1698. return 0;
  1699. }
  1700. /*
  1701. * This kcompactd start function will be called by init and node-hot-add.
  1702. * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
  1703. */
  1704. int kcompactd_run(int nid)
  1705. {
  1706. pg_data_t *pgdat = NODE_DATA(nid);
  1707. int ret = 0;
  1708. if (pgdat->kcompactd)
  1709. return 0;
  1710. pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
  1711. if (IS_ERR(pgdat->kcompactd)) {
  1712. pr_err("Failed to start kcompactd on node %d\n", nid);
  1713. ret = PTR_ERR(pgdat->kcompactd);
  1714. pgdat->kcompactd = NULL;
  1715. }
  1716. return ret;
  1717. }
  1718. /*
  1719. * Called by memory hotplug when all memory in a node is offlined. Caller must
  1720. * hold mem_hotplug_begin/end().
  1721. */
  1722. void kcompactd_stop(int nid)
  1723. {
  1724. struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
  1725. if (kcompactd) {
  1726. kthread_stop(kcompactd);
  1727. NODE_DATA(nid)->kcompactd = NULL;
  1728. }
  1729. }
  1730. /*
  1731. * It's optimal to keep kcompactd on the same CPUs as their memory, but
  1732. * not required for correctness. So if the last cpu in a node goes
  1733. * away, we get changed to run anywhere: as the first one comes back,
  1734. * restore their cpu bindings.
  1735. */
  1736. static int kcompactd_cpu_online(unsigned int cpu)
  1737. {
  1738. int nid;
  1739. for_each_node_state(nid, N_MEMORY) {
  1740. pg_data_t *pgdat = NODE_DATA(nid);
  1741. const struct cpumask *mask;
  1742. mask = cpumask_of_node(pgdat->node_id);
  1743. if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
  1744. /* One of our CPUs online: restore mask */
  1745. set_cpus_allowed_ptr(pgdat->kcompactd, mask);
  1746. }
  1747. return 0;
  1748. }
  1749. static int __init kcompactd_init(void)
  1750. {
  1751. int nid;
  1752. int ret;
  1753. ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
  1754. "mm/compaction:online",
  1755. kcompactd_cpu_online, NULL);
  1756. if (ret < 0) {
  1757. pr_err("kcompactd: failed to register hotplug callbacks.\n");
  1758. return ret;
  1759. }
  1760. for_each_node_state(nid, N_MEMORY)
  1761. kcompactd_run(nid);
  1762. return 0;
  1763. }
  1764. subsys_initcall(kcompactd_init)
  1765. #endif /* CONFIG_COMPACTION */