compaction.c 53 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960
  1. /*
  2. * linux/mm/compaction.c
  3. *
  4. * Memory compaction for the reduction of external fragmentation. Note that
  5. * this heavily depends upon page migration to do all the real heavy
  6. * lifting
  7. *
  8. * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
  9. */
  10. #include <linux/cpu.h>
  11. #include <linux/swap.h>
  12. #include <linux/migrate.h>
  13. #include <linux/compaction.h>
  14. #include <linux/mm_inline.h>
  15. #include <linux/backing-dev.h>
  16. #include <linux/sysctl.h>
  17. #include <linux/sysfs.h>
  18. #include <linux/balloon_compaction.h>
  19. #include <linux/page-isolation.h>
  20. #include <linux/kasan.h>
  21. #include <linux/kthread.h>
  22. #include <linux/freezer.h>
  23. #include "internal.h"
  24. #ifdef CONFIG_COMPACTION
  25. static inline void count_compact_event(enum vm_event_item item)
  26. {
  27. count_vm_event(item);
  28. }
  29. static inline void count_compact_events(enum vm_event_item item, long delta)
  30. {
  31. count_vm_events(item, delta);
  32. }
  33. #else
  34. #define count_compact_event(item) do { } while (0)
  35. #define count_compact_events(item, delta) do { } while (0)
  36. #endif
  37. #if defined CONFIG_COMPACTION || defined CONFIG_CMA
  38. #define CREATE_TRACE_POINTS
  39. #include <trace/events/compaction.h>
  40. #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order))
  41. #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order))
  42. #define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order)
  43. #define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order)
  44. static unsigned long release_freepages(struct list_head *freelist)
  45. {
  46. struct page *page, *next;
  47. unsigned long high_pfn = 0;
  48. list_for_each_entry_safe(page, next, freelist, lru) {
  49. unsigned long pfn = page_to_pfn(page);
  50. list_del(&page->lru);
  51. __free_page(page);
  52. if (pfn > high_pfn)
  53. high_pfn = pfn;
  54. }
  55. return high_pfn;
  56. }
  57. static void map_pages(struct list_head *list)
  58. {
  59. struct page *page;
  60. list_for_each_entry(page, list, lru) {
  61. arch_alloc_page(page, 0);
  62. kernel_map_pages(page, 1, 1);
  63. kasan_alloc_pages(page, 0);
  64. }
  65. }
  66. static inline bool migrate_async_suitable(int migratetype)
  67. {
  68. return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
  69. }
  70. #ifdef CONFIG_COMPACTION
  71. /* Do not skip compaction more than 64 times */
  72. #define COMPACT_MAX_DEFER_SHIFT 6
  73. /*
  74. * Compaction is deferred when compaction fails to result in a page
  75. * allocation success. 1 << compact_defer_limit compactions are skipped up
  76. * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
  77. */
  78. void defer_compaction(struct zone *zone, int order)
  79. {
  80. zone->compact_considered = 0;
  81. zone->compact_defer_shift++;
  82. if (order < zone->compact_order_failed)
  83. zone->compact_order_failed = order;
  84. if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
  85. zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
  86. trace_mm_compaction_defer_compaction(zone, order);
  87. }
  88. /* Returns true if compaction should be skipped this time */
  89. bool compaction_deferred(struct zone *zone, int order)
  90. {
  91. unsigned long defer_limit = 1UL << zone->compact_defer_shift;
  92. if (order < zone->compact_order_failed)
  93. return false;
  94. /* Avoid possible overflow */
  95. if (++zone->compact_considered > defer_limit)
  96. zone->compact_considered = defer_limit;
  97. if (zone->compact_considered >= defer_limit)
  98. return false;
  99. trace_mm_compaction_deferred(zone, order);
  100. return true;
  101. }
  102. /*
  103. * Update defer tracking counters after successful compaction of given order,
  104. * which means an allocation either succeeded (alloc_success == true) or is
  105. * expected to succeed.
  106. */
  107. void compaction_defer_reset(struct zone *zone, int order,
  108. bool alloc_success)
  109. {
  110. if (alloc_success) {
  111. zone->compact_considered = 0;
  112. zone->compact_defer_shift = 0;
  113. }
  114. if (order >= zone->compact_order_failed)
  115. zone->compact_order_failed = order + 1;
  116. trace_mm_compaction_defer_reset(zone, order);
  117. }
  118. /* Returns true if restarting compaction after many failures */
  119. bool compaction_restarting(struct zone *zone, int order)
  120. {
  121. if (order < zone->compact_order_failed)
  122. return false;
  123. return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
  124. zone->compact_considered >= 1UL << zone->compact_defer_shift;
  125. }
  126. /* Returns true if the pageblock should be scanned for pages to isolate. */
  127. static inline bool isolation_suitable(struct compact_control *cc,
  128. struct page *page)
  129. {
  130. if (cc->ignore_skip_hint)
  131. return true;
  132. return !get_pageblock_skip(page);
  133. }
  134. static void reset_cached_positions(struct zone *zone)
  135. {
  136. zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
  137. zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
  138. zone->compact_cached_free_pfn =
  139. pageblock_start_pfn(zone_end_pfn(zone) - 1);
  140. }
  141. /*
  142. * This function is called to clear all cached information on pageblocks that
  143. * should be skipped for page isolation when the migrate and free page scanner
  144. * meet.
  145. */
  146. static void __reset_isolation_suitable(struct zone *zone)
  147. {
  148. unsigned long start_pfn = zone->zone_start_pfn;
  149. unsigned long end_pfn = zone_end_pfn(zone);
  150. unsigned long pfn;
  151. zone->compact_blockskip_flush = false;
  152. /* Walk the zone and mark every pageblock as suitable for isolation */
  153. for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
  154. struct page *page;
  155. cond_resched();
  156. if (!pfn_valid(pfn))
  157. continue;
  158. page = pfn_to_page(pfn);
  159. if (zone != page_zone(page))
  160. continue;
  161. clear_pageblock_skip(page);
  162. }
  163. reset_cached_positions(zone);
  164. }
  165. void reset_isolation_suitable(pg_data_t *pgdat)
  166. {
  167. int zoneid;
  168. for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
  169. struct zone *zone = &pgdat->node_zones[zoneid];
  170. if (!populated_zone(zone))
  171. continue;
  172. /* Only flush if a full compaction finished recently */
  173. if (zone->compact_blockskip_flush)
  174. __reset_isolation_suitable(zone);
  175. }
  176. }
  177. /*
  178. * If no pages were isolated then mark this pageblock to be skipped in the
  179. * future. The information is later cleared by __reset_isolation_suitable().
  180. */
  181. static void update_pageblock_skip(struct compact_control *cc,
  182. struct page *page, unsigned long nr_isolated,
  183. bool migrate_scanner)
  184. {
  185. struct zone *zone = cc->zone;
  186. unsigned long pfn;
  187. if (cc->ignore_skip_hint)
  188. return;
  189. if (!page)
  190. return;
  191. if (nr_isolated)
  192. return;
  193. set_pageblock_skip(page);
  194. pfn = page_to_pfn(page);
  195. /* Update where async and sync compaction should restart */
  196. if (migrate_scanner) {
  197. if (pfn > zone->compact_cached_migrate_pfn[0])
  198. zone->compact_cached_migrate_pfn[0] = pfn;
  199. if (cc->mode != MIGRATE_ASYNC &&
  200. pfn > zone->compact_cached_migrate_pfn[1])
  201. zone->compact_cached_migrate_pfn[1] = pfn;
  202. } else {
  203. if (pfn < zone->compact_cached_free_pfn)
  204. zone->compact_cached_free_pfn = pfn;
  205. }
  206. }
  207. #else
  208. static inline bool isolation_suitable(struct compact_control *cc,
  209. struct page *page)
  210. {
  211. return true;
  212. }
  213. static void update_pageblock_skip(struct compact_control *cc,
  214. struct page *page, unsigned long nr_isolated,
  215. bool migrate_scanner)
  216. {
  217. }
  218. #endif /* CONFIG_COMPACTION */
  219. /*
  220. * Compaction requires the taking of some coarse locks that are potentially
  221. * very heavily contended. For async compaction, back out if the lock cannot
  222. * be taken immediately. For sync compaction, spin on the lock if needed.
  223. *
  224. * Returns true if the lock is held
  225. * Returns false if the lock is not held and compaction should abort
  226. */
  227. static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags,
  228. struct compact_control *cc)
  229. {
  230. if (cc->mode == MIGRATE_ASYNC) {
  231. if (!spin_trylock_irqsave(lock, *flags)) {
  232. cc->contended = COMPACT_CONTENDED_LOCK;
  233. return false;
  234. }
  235. } else {
  236. spin_lock_irqsave(lock, *flags);
  237. }
  238. return true;
  239. }
  240. /*
  241. * Compaction requires the taking of some coarse locks that are potentially
  242. * very heavily contended. The lock should be periodically unlocked to avoid
  243. * having disabled IRQs for a long time, even when there is nobody waiting on
  244. * the lock. It might also be that allowing the IRQs will result in
  245. * need_resched() becoming true. If scheduling is needed, async compaction
  246. * aborts. Sync compaction schedules.
  247. * Either compaction type will also abort if a fatal signal is pending.
  248. * In either case if the lock was locked, it is dropped and not regained.
  249. *
  250. * Returns true if compaction should abort due to fatal signal pending, or
  251. * async compaction due to need_resched()
  252. * Returns false when compaction can continue (sync compaction might have
  253. * scheduled)
  254. */
  255. static bool compact_unlock_should_abort(spinlock_t *lock,
  256. unsigned long flags, bool *locked, struct compact_control *cc)
  257. {
  258. if (*locked) {
  259. spin_unlock_irqrestore(lock, flags);
  260. *locked = false;
  261. }
  262. if (fatal_signal_pending(current)) {
  263. cc->contended = COMPACT_CONTENDED_SCHED;
  264. return true;
  265. }
  266. if (need_resched()) {
  267. if (cc->mode == MIGRATE_ASYNC) {
  268. cc->contended = COMPACT_CONTENDED_SCHED;
  269. return true;
  270. }
  271. cond_resched();
  272. }
  273. return false;
  274. }
  275. /*
  276. * Aside from avoiding lock contention, compaction also periodically checks
  277. * need_resched() and either schedules in sync compaction or aborts async
  278. * compaction. This is similar to what compact_unlock_should_abort() does, but
  279. * is used where no lock is concerned.
  280. *
  281. * Returns false when no scheduling was needed, or sync compaction scheduled.
  282. * Returns true when async compaction should abort.
  283. */
  284. static inline bool compact_should_abort(struct compact_control *cc)
  285. {
  286. /* async compaction aborts if contended */
  287. if (need_resched()) {
  288. if (cc->mode == MIGRATE_ASYNC) {
  289. cc->contended = COMPACT_CONTENDED_SCHED;
  290. return true;
  291. }
  292. cond_resched();
  293. }
  294. return false;
  295. }
  296. /*
  297. * Isolate free pages onto a private freelist. If @strict is true, will abort
  298. * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
  299. * (even though it may still end up isolating some pages).
  300. */
  301. static unsigned long isolate_freepages_block(struct compact_control *cc,
  302. unsigned long *start_pfn,
  303. unsigned long end_pfn,
  304. struct list_head *freelist,
  305. bool strict)
  306. {
  307. int nr_scanned = 0, total_isolated = 0;
  308. struct page *cursor, *valid_page = NULL;
  309. unsigned long flags = 0;
  310. bool locked = false;
  311. unsigned long blockpfn = *start_pfn;
  312. cursor = pfn_to_page(blockpfn);
  313. /* Isolate free pages. */
  314. for (; blockpfn < end_pfn; blockpfn++, cursor++) {
  315. int isolated, i;
  316. struct page *page = cursor;
  317. /*
  318. * Periodically drop the lock (if held) regardless of its
  319. * contention, to give chance to IRQs. Abort if fatal signal
  320. * pending or async compaction detects need_resched()
  321. */
  322. if (!(blockpfn % SWAP_CLUSTER_MAX)
  323. && compact_unlock_should_abort(&cc->zone->lock, flags,
  324. &locked, cc))
  325. break;
  326. nr_scanned++;
  327. if (!pfn_valid_within(blockpfn))
  328. goto isolate_fail;
  329. if (!valid_page)
  330. valid_page = page;
  331. /*
  332. * For compound pages such as THP and hugetlbfs, we can save
  333. * potentially a lot of iterations if we skip them at once.
  334. * The check is racy, but we can consider only valid values
  335. * and the only danger is skipping too much.
  336. */
  337. if (PageCompound(page)) {
  338. unsigned int comp_order = compound_order(page);
  339. if (likely(comp_order < MAX_ORDER)) {
  340. blockpfn += (1UL << comp_order) - 1;
  341. cursor += (1UL << comp_order) - 1;
  342. }
  343. goto isolate_fail;
  344. }
  345. if (!PageBuddy(page))
  346. goto isolate_fail;
  347. /*
  348. * If we already hold the lock, we can skip some rechecking.
  349. * Note that if we hold the lock now, checked_pageblock was
  350. * already set in some previous iteration (or strict is true),
  351. * so it is correct to skip the suitable migration target
  352. * recheck as well.
  353. */
  354. if (!locked) {
  355. /*
  356. * The zone lock must be held to isolate freepages.
  357. * Unfortunately this is a very coarse lock and can be
  358. * heavily contended if there are parallel allocations
  359. * or parallel compactions. For async compaction do not
  360. * spin on the lock and we acquire the lock as late as
  361. * possible.
  362. */
  363. locked = compact_trylock_irqsave(&cc->zone->lock,
  364. &flags, cc);
  365. if (!locked)
  366. break;
  367. /* Recheck this is a buddy page under lock */
  368. if (!PageBuddy(page))
  369. goto isolate_fail;
  370. }
  371. /* Found a free page, break it into order-0 pages */
  372. isolated = split_free_page(page);
  373. total_isolated += isolated;
  374. for (i = 0; i < isolated; i++) {
  375. list_add(&page->lru, freelist);
  376. page++;
  377. }
  378. /* If a page was split, advance to the end of it */
  379. if (isolated) {
  380. cc->nr_freepages += isolated;
  381. if (!strict &&
  382. cc->nr_migratepages <= cc->nr_freepages) {
  383. blockpfn += isolated;
  384. break;
  385. }
  386. blockpfn += isolated - 1;
  387. cursor += isolated - 1;
  388. continue;
  389. }
  390. isolate_fail:
  391. if (strict)
  392. break;
  393. else
  394. continue;
  395. }
  396. /*
  397. * There is a tiny chance that we have read bogus compound_order(),
  398. * so be careful to not go outside of the pageblock.
  399. */
  400. if (unlikely(blockpfn > end_pfn))
  401. blockpfn = end_pfn;
  402. trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
  403. nr_scanned, total_isolated);
  404. /* Record how far we have got within the block */
  405. *start_pfn = blockpfn;
  406. /*
  407. * If strict isolation is requested by CMA then check that all the
  408. * pages requested were isolated. If there were any failures, 0 is
  409. * returned and CMA will fail.
  410. */
  411. if (strict && blockpfn < end_pfn)
  412. total_isolated = 0;
  413. if (locked)
  414. spin_unlock_irqrestore(&cc->zone->lock, flags);
  415. /* Update the pageblock-skip if the whole pageblock was scanned */
  416. if (blockpfn == end_pfn)
  417. update_pageblock_skip(cc, valid_page, total_isolated, false);
  418. count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
  419. if (total_isolated)
  420. count_compact_events(COMPACTISOLATED, total_isolated);
  421. return total_isolated;
  422. }
  423. /**
  424. * isolate_freepages_range() - isolate free pages.
  425. * @start_pfn: The first PFN to start isolating.
  426. * @end_pfn: The one-past-last PFN.
  427. *
  428. * Non-free pages, invalid PFNs, or zone boundaries within the
  429. * [start_pfn, end_pfn) range are considered errors, cause function to
  430. * undo its actions and return zero.
  431. *
  432. * Otherwise, function returns one-past-the-last PFN of isolated page
  433. * (which may be greater then end_pfn if end fell in a middle of
  434. * a free page).
  435. */
  436. unsigned long
  437. isolate_freepages_range(struct compact_control *cc,
  438. unsigned long start_pfn, unsigned long end_pfn)
  439. {
  440. unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
  441. LIST_HEAD(freelist);
  442. pfn = start_pfn;
  443. block_start_pfn = pageblock_start_pfn(pfn);
  444. if (block_start_pfn < cc->zone->zone_start_pfn)
  445. block_start_pfn = cc->zone->zone_start_pfn;
  446. block_end_pfn = pageblock_end_pfn(pfn);
  447. for (; pfn < end_pfn; pfn += isolated,
  448. block_start_pfn = block_end_pfn,
  449. block_end_pfn += pageblock_nr_pages) {
  450. /* Protect pfn from changing by isolate_freepages_block */
  451. unsigned long isolate_start_pfn = pfn;
  452. block_end_pfn = min(block_end_pfn, end_pfn);
  453. /*
  454. * pfn could pass the block_end_pfn if isolated freepage
  455. * is more than pageblock order. In this case, we adjust
  456. * scanning range to right one.
  457. */
  458. if (pfn >= block_end_pfn) {
  459. block_start_pfn = pageblock_start_pfn(pfn);
  460. block_end_pfn = pageblock_end_pfn(pfn);
  461. block_end_pfn = min(block_end_pfn, end_pfn);
  462. }
  463. if (!pageblock_pfn_to_page(block_start_pfn,
  464. block_end_pfn, cc->zone))
  465. break;
  466. isolated = isolate_freepages_block(cc, &isolate_start_pfn,
  467. block_end_pfn, &freelist, true);
  468. /*
  469. * In strict mode, isolate_freepages_block() returns 0 if
  470. * there are any holes in the block (ie. invalid PFNs or
  471. * non-free pages).
  472. */
  473. if (!isolated)
  474. break;
  475. /*
  476. * If we managed to isolate pages, it is always (1 << n) *
  477. * pageblock_nr_pages for some non-negative n. (Max order
  478. * page may span two pageblocks).
  479. */
  480. }
  481. /* split_free_page does not map the pages */
  482. map_pages(&freelist);
  483. if (pfn < end_pfn) {
  484. /* Loop terminated early, cleanup. */
  485. release_freepages(&freelist);
  486. return 0;
  487. }
  488. /* We don't use freelists for anything. */
  489. return pfn;
  490. }
  491. /* Update the number of anon and file isolated pages in the zone */
  492. static void acct_isolated(struct zone *zone, struct compact_control *cc)
  493. {
  494. struct page *page;
  495. unsigned int count[2] = { 0, };
  496. if (list_empty(&cc->migratepages))
  497. return;
  498. list_for_each_entry(page, &cc->migratepages, lru)
  499. count[!!page_is_file_cache(page)]++;
  500. mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
  501. mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
  502. }
  503. /* Similar to reclaim, but different enough that they don't share logic */
  504. static bool too_many_isolated(struct zone *zone)
  505. {
  506. unsigned long active, inactive, isolated;
  507. inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
  508. zone_page_state(zone, NR_INACTIVE_ANON);
  509. active = zone_page_state(zone, NR_ACTIVE_FILE) +
  510. zone_page_state(zone, NR_ACTIVE_ANON);
  511. isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
  512. zone_page_state(zone, NR_ISOLATED_ANON);
  513. return isolated > (inactive + active) / 2;
  514. }
  515. /**
  516. * isolate_migratepages_block() - isolate all migrate-able pages within
  517. * a single pageblock
  518. * @cc: Compaction control structure.
  519. * @low_pfn: The first PFN to isolate
  520. * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock
  521. * @isolate_mode: Isolation mode to be used.
  522. *
  523. * Isolate all pages that can be migrated from the range specified by
  524. * [low_pfn, end_pfn). The range is expected to be within same pageblock.
  525. * Returns zero if there is a fatal signal pending, otherwise PFN of the
  526. * first page that was not scanned (which may be both less, equal to or more
  527. * than end_pfn).
  528. *
  529. * The pages are isolated on cc->migratepages list (not required to be empty),
  530. * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
  531. * is neither read nor updated.
  532. */
  533. static unsigned long
  534. isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
  535. unsigned long end_pfn, isolate_mode_t isolate_mode)
  536. {
  537. struct zone *zone = cc->zone;
  538. unsigned long nr_scanned = 0, nr_isolated = 0;
  539. struct list_head *migratelist = &cc->migratepages;
  540. struct lruvec *lruvec;
  541. unsigned long flags = 0;
  542. bool locked = false;
  543. struct page *page = NULL, *valid_page = NULL;
  544. unsigned long start_pfn = low_pfn;
  545. /*
  546. * Ensure that there are not too many pages isolated from the LRU
  547. * list by either parallel reclaimers or compaction. If there are,
  548. * delay for some time until fewer pages are isolated
  549. */
  550. while (unlikely(too_many_isolated(zone))) {
  551. /* async migration should just abort */
  552. if (cc->mode == MIGRATE_ASYNC)
  553. return 0;
  554. congestion_wait(BLK_RW_ASYNC, HZ/10);
  555. if (fatal_signal_pending(current))
  556. return 0;
  557. }
  558. if (compact_should_abort(cc))
  559. return 0;
  560. /* Time to isolate some pages for migration */
  561. for (; low_pfn < end_pfn; low_pfn++) {
  562. bool is_lru;
  563. /*
  564. * Periodically drop the lock (if held) regardless of its
  565. * contention, to give chance to IRQs. Abort async compaction
  566. * if contended.
  567. */
  568. if (!(low_pfn % SWAP_CLUSTER_MAX)
  569. && compact_unlock_should_abort(&zone->lru_lock, flags,
  570. &locked, cc))
  571. break;
  572. if (!pfn_valid_within(low_pfn))
  573. continue;
  574. nr_scanned++;
  575. page = pfn_to_page(low_pfn);
  576. if (!valid_page)
  577. valid_page = page;
  578. /*
  579. * Skip if free. We read page order here without zone lock
  580. * which is generally unsafe, but the race window is small and
  581. * the worst thing that can happen is that we skip some
  582. * potential isolation targets.
  583. */
  584. if (PageBuddy(page)) {
  585. unsigned long freepage_order = page_order_unsafe(page);
  586. /*
  587. * Without lock, we cannot be sure that what we got is
  588. * a valid page order. Consider only values in the
  589. * valid order range to prevent low_pfn overflow.
  590. */
  591. if (freepage_order > 0 && freepage_order < MAX_ORDER)
  592. low_pfn += (1UL << freepage_order) - 1;
  593. continue;
  594. }
  595. /*
  596. * Check may be lockless but that's ok as we recheck later.
  597. * It's possible to migrate LRU pages and balloon pages
  598. * Skip any other type of page
  599. */
  600. is_lru = PageLRU(page);
  601. if (!is_lru) {
  602. if (unlikely(balloon_page_movable(page))) {
  603. if (balloon_page_isolate(page)) {
  604. /* Successfully isolated */
  605. goto isolate_success;
  606. }
  607. }
  608. }
  609. /*
  610. * Regardless of being on LRU, compound pages such as THP and
  611. * hugetlbfs are not to be compacted. We can potentially save
  612. * a lot of iterations if we skip them at once. The check is
  613. * racy, but we can consider only valid values and the only
  614. * danger is skipping too much.
  615. */
  616. if (PageCompound(page)) {
  617. unsigned int comp_order = compound_order(page);
  618. if (likely(comp_order < MAX_ORDER))
  619. low_pfn += (1UL << comp_order) - 1;
  620. continue;
  621. }
  622. if (!is_lru)
  623. continue;
  624. /*
  625. * Migration will fail if an anonymous page is pinned in memory,
  626. * so avoid taking lru_lock and isolating it unnecessarily in an
  627. * admittedly racy check.
  628. */
  629. if (!page_mapping(page) &&
  630. page_count(page) > page_mapcount(page))
  631. continue;
  632. /* If we already hold the lock, we can skip some rechecking */
  633. if (!locked) {
  634. locked = compact_trylock_irqsave(&zone->lru_lock,
  635. &flags, cc);
  636. if (!locked)
  637. break;
  638. /* Recheck PageLRU and PageCompound under lock */
  639. if (!PageLRU(page))
  640. continue;
  641. /*
  642. * Page become compound since the non-locked check,
  643. * and it's on LRU. It can only be a THP so the order
  644. * is safe to read and it's 0 for tail pages.
  645. */
  646. if (unlikely(PageCompound(page))) {
  647. low_pfn += (1UL << compound_order(page)) - 1;
  648. continue;
  649. }
  650. }
  651. lruvec = mem_cgroup_page_lruvec(page, zone);
  652. /* Try isolate the page */
  653. if (__isolate_lru_page(page, isolate_mode) != 0)
  654. continue;
  655. VM_BUG_ON_PAGE(PageCompound(page), page);
  656. /* Successfully isolated */
  657. del_page_from_lru_list(page, lruvec, page_lru(page));
  658. isolate_success:
  659. list_add(&page->lru, migratelist);
  660. cc->nr_migratepages++;
  661. nr_isolated++;
  662. /* Avoid isolating too much */
  663. if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
  664. ++low_pfn;
  665. break;
  666. }
  667. }
  668. /*
  669. * The PageBuddy() check could have potentially brought us outside
  670. * the range to be scanned.
  671. */
  672. if (unlikely(low_pfn > end_pfn))
  673. low_pfn = end_pfn;
  674. if (locked)
  675. spin_unlock_irqrestore(&zone->lru_lock, flags);
  676. /*
  677. * Update the pageblock-skip information and cached scanner pfn,
  678. * if the whole pageblock was scanned without isolating any page.
  679. */
  680. if (low_pfn == end_pfn)
  681. update_pageblock_skip(cc, valid_page, nr_isolated, true);
  682. trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
  683. nr_scanned, nr_isolated);
  684. count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
  685. if (nr_isolated)
  686. count_compact_events(COMPACTISOLATED, nr_isolated);
  687. return low_pfn;
  688. }
  689. /**
  690. * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
  691. * @cc: Compaction control structure.
  692. * @start_pfn: The first PFN to start isolating.
  693. * @end_pfn: The one-past-last PFN.
  694. *
  695. * Returns zero if isolation fails fatally due to e.g. pending signal.
  696. * Otherwise, function returns one-past-the-last PFN of isolated page
  697. * (which may be greater than end_pfn if end fell in a middle of a THP page).
  698. */
  699. unsigned long
  700. isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
  701. unsigned long end_pfn)
  702. {
  703. unsigned long pfn, block_start_pfn, block_end_pfn;
  704. /* Scan block by block. First and last block may be incomplete */
  705. pfn = start_pfn;
  706. block_start_pfn = pageblock_start_pfn(pfn);
  707. if (block_start_pfn < cc->zone->zone_start_pfn)
  708. block_start_pfn = cc->zone->zone_start_pfn;
  709. block_end_pfn = pageblock_end_pfn(pfn);
  710. for (; pfn < end_pfn; pfn = block_end_pfn,
  711. block_start_pfn = block_end_pfn,
  712. block_end_pfn += pageblock_nr_pages) {
  713. block_end_pfn = min(block_end_pfn, end_pfn);
  714. if (!pageblock_pfn_to_page(block_start_pfn,
  715. block_end_pfn, cc->zone))
  716. continue;
  717. pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
  718. ISOLATE_UNEVICTABLE);
  719. if (!pfn)
  720. break;
  721. if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
  722. break;
  723. }
  724. acct_isolated(cc->zone, cc);
  725. return pfn;
  726. }
  727. #endif /* CONFIG_COMPACTION || CONFIG_CMA */
  728. #ifdef CONFIG_COMPACTION
  729. /* Returns true if the page is within a block suitable for migration to */
  730. static bool suitable_migration_target(struct page *page)
  731. {
  732. /* If the page is a large free page, then disallow migration */
  733. if (PageBuddy(page)) {
  734. /*
  735. * We are checking page_order without zone->lock taken. But
  736. * the only small danger is that we skip a potentially suitable
  737. * pageblock, so it's not worth to check order for valid range.
  738. */
  739. if (page_order_unsafe(page) >= pageblock_order)
  740. return false;
  741. }
  742. /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
  743. if (migrate_async_suitable(get_pageblock_migratetype(page)))
  744. return true;
  745. /* Otherwise skip the block */
  746. return false;
  747. }
  748. /*
  749. * Test whether the free scanner has reached the same or lower pageblock than
  750. * the migration scanner, and compaction should thus terminate.
  751. */
  752. static inline bool compact_scanners_met(struct compact_control *cc)
  753. {
  754. return (cc->free_pfn >> pageblock_order)
  755. <= (cc->migrate_pfn >> pageblock_order);
  756. }
  757. /*
  758. * Based on information in the current compact_control, find blocks
  759. * suitable for isolating free pages from and then isolate them.
  760. */
  761. static void isolate_freepages(struct compact_control *cc)
  762. {
  763. struct zone *zone = cc->zone;
  764. struct page *page;
  765. unsigned long block_start_pfn; /* start of current pageblock */
  766. unsigned long isolate_start_pfn; /* exact pfn we start at */
  767. unsigned long block_end_pfn; /* end of current pageblock */
  768. unsigned long low_pfn; /* lowest pfn scanner is able to scan */
  769. struct list_head *freelist = &cc->freepages;
  770. /*
  771. * Initialise the free scanner. The starting point is where we last
  772. * successfully isolated from, zone-cached value, or the end of the
  773. * zone when isolating for the first time. For looping we also need
  774. * this pfn aligned down to the pageblock boundary, because we do
  775. * block_start_pfn -= pageblock_nr_pages in the for loop.
  776. * For ending point, take care when isolating in last pageblock of a
  777. * a zone which ends in the middle of a pageblock.
  778. * The low boundary is the end of the pageblock the migration scanner
  779. * is using.
  780. */
  781. isolate_start_pfn = cc->free_pfn;
  782. block_start_pfn = pageblock_start_pfn(cc->free_pfn);
  783. block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
  784. zone_end_pfn(zone));
  785. low_pfn = pageblock_end_pfn(cc->migrate_pfn);
  786. /*
  787. * Isolate free pages until enough are available to migrate the
  788. * pages on cc->migratepages. We stop searching if the migrate
  789. * and free page scanners meet or enough free pages are isolated.
  790. */
  791. for (; block_start_pfn >= low_pfn;
  792. block_end_pfn = block_start_pfn,
  793. block_start_pfn -= pageblock_nr_pages,
  794. isolate_start_pfn = block_start_pfn) {
  795. /*
  796. * This can iterate a massively long zone without finding any
  797. * suitable migration targets, so periodically check if we need
  798. * to schedule, or even abort async compaction.
  799. */
  800. if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
  801. && compact_should_abort(cc))
  802. break;
  803. page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
  804. zone);
  805. if (!page)
  806. continue;
  807. /* Check the block is suitable for migration */
  808. if (!suitable_migration_target(page))
  809. continue;
  810. /* If isolation recently failed, do not retry */
  811. if (!isolation_suitable(cc, page))
  812. continue;
  813. /* Found a block suitable for isolating free pages from. */
  814. isolate_freepages_block(cc, &isolate_start_pfn,
  815. block_end_pfn, freelist, false);
  816. /*
  817. * If we isolated enough freepages, or aborted due to async
  818. * compaction being contended, terminate the loop.
  819. * Remember where the free scanner should restart next time,
  820. * which is where isolate_freepages_block() left off.
  821. * But if it scanned the whole pageblock, isolate_start_pfn
  822. * now points at block_end_pfn, which is the start of the next
  823. * pageblock.
  824. * In that case we will however want to restart at the start
  825. * of the previous pageblock.
  826. */
  827. if ((cc->nr_freepages >= cc->nr_migratepages)
  828. || cc->contended) {
  829. if (isolate_start_pfn >= block_end_pfn)
  830. isolate_start_pfn =
  831. block_start_pfn - pageblock_nr_pages;
  832. break;
  833. } else {
  834. /*
  835. * isolate_freepages_block() should not terminate
  836. * prematurely unless contended, or isolated enough
  837. */
  838. VM_BUG_ON(isolate_start_pfn < block_end_pfn);
  839. }
  840. }
  841. /* split_free_page does not map the pages */
  842. map_pages(freelist);
  843. /*
  844. * Record where the free scanner will restart next time. Either we
  845. * broke from the loop and set isolate_start_pfn based on the last
  846. * call to isolate_freepages_block(), or we met the migration scanner
  847. * and the loop terminated due to isolate_start_pfn < low_pfn
  848. */
  849. cc->free_pfn = isolate_start_pfn;
  850. }
  851. /*
  852. * This is a migrate-callback that "allocates" freepages by taking pages
  853. * from the isolated freelists in the block we are migrating to.
  854. */
  855. static struct page *compaction_alloc(struct page *migratepage,
  856. unsigned long data,
  857. int **result)
  858. {
  859. struct compact_control *cc = (struct compact_control *)data;
  860. struct page *freepage;
  861. /*
  862. * Isolate free pages if necessary, and if we are not aborting due to
  863. * contention.
  864. */
  865. if (list_empty(&cc->freepages)) {
  866. if (!cc->contended)
  867. isolate_freepages(cc);
  868. if (list_empty(&cc->freepages))
  869. return NULL;
  870. }
  871. freepage = list_entry(cc->freepages.next, struct page, lru);
  872. list_del(&freepage->lru);
  873. cc->nr_freepages--;
  874. return freepage;
  875. }
  876. /*
  877. * This is a migrate-callback that "frees" freepages back to the isolated
  878. * freelist. All pages on the freelist are from the same zone, so there is no
  879. * special handling needed for NUMA.
  880. */
  881. static void compaction_free(struct page *page, unsigned long data)
  882. {
  883. struct compact_control *cc = (struct compact_control *)data;
  884. list_add(&page->lru, &cc->freepages);
  885. cc->nr_freepages++;
  886. }
  887. /* possible outcome of isolate_migratepages */
  888. typedef enum {
  889. ISOLATE_ABORT, /* Abort compaction now */
  890. ISOLATE_NONE, /* No pages isolated, continue scanning */
  891. ISOLATE_SUCCESS, /* Pages isolated, migrate */
  892. } isolate_migrate_t;
  893. /*
  894. * Allow userspace to control policy on scanning the unevictable LRU for
  895. * compactable pages.
  896. */
  897. int sysctl_compact_unevictable_allowed __read_mostly = 1;
  898. /*
  899. * Isolate all pages that can be migrated from the first suitable block,
  900. * starting at the block pointed to by the migrate scanner pfn within
  901. * compact_control.
  902. */
  903. static isolate_migrate_t isolate_migratepages(struct zone *zone,
  904. struct compact_control *cc)
  905. {
  906. unsigned long block_start_pfn;
  907. unsigned long block_end_pfn;
  908. unsigned long low_pfn;
  909. unsigned long isolate_start_pfn;
  910. struct page *page;
  911. const isolate_mode_t isolate_mode =
  912. (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
  913. (cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0);
  914. /*
  915. * Start at where we last stopped, or beginning of the zone as
  916. * initialized by compact_zone()
  917. */
  918. low_pfn = cc->migrate_pfn;
  919. block_start_pfn = pageblock_start_pfn(low_pfn);
  920. if (block_start_pfn < zone->zone_start_pfn)
  921. block_start_pfn = zone->zone_start_pfn;
  922. /* Only scan within a pageblock boundary */
  923. block_end_pfn = pageblock_end_pfn(low_pfn);
  924. /*
  925. * Iterate over whole pageblocks until we find the first suitable.
  926. * Do not cross the free scanner.
  927. */
  928. for (; block_end_pfn <= cc->free_pfn;
  929. low_pfn = block_end_pfn,
  930. block_start_pfn = block_end_pfn,
  931. block_end_pfn += pageblock_nr_pages) {
  932. /*
  933. * This can potentially iterate a massively long zone with
  934. * many pageblocks unsuitable, so periodically check if we
  935. * need to schedule, or even abort async compaction.
  936. */
  937. if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
  938. && compact_should_abort(cc))
  939. break;
  940. page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
  941. zone);
  942. if (!page)
  943. continue;
  944. /* If isolation recently failed, do not retry */
  945. if (!isolation_suitable(cc, page))
  946. continue;
  947. /*
  948. * For async compaction, also only scan in MOVABLE blocks.
  949. * Async compaction is optimistic to see if the minimum amount
  950. * of work satisfies the allocation.
  951. */
  952. if (cc->mode == MIGRATE_ASYNC &&
  953. !migrate_async_suitable(get_pageblock_migratetype(page)))
  954. continue;
  955. /* Perform the isolation */
  956. isolate_start_pfn = low_pfn;
  957. low_pfn = isolate_migratepages_block(cc, low_pfn,
  958. block_end_pfn, isolate_mode);
  959. if (!low_pfn || cc->contended) {
  960. acct_isolated(zone, cc);
  961. return ISOLATE_ABORT;
  962. }
  963. /*
  964. * Record where we could have freed pages by migration and not
  965. * yet flushed them to buddy allocator.
  966. * - this is the lowest page that could have been isolated and
  967. * then freed by migration.
  968. */
  969. if (cc->nr_migratepages && !cc->last_migrated_pfn)
  970. cc->last_migrated_pfn = isolate_start_pfn;
  971. /*
  972. * Either we isolated something and proceed with migration. Or
  973. * we failed and compact_zone should decide if we should
  974. * continue or not.
  975. */
  976. break;
  977. }
  978. acct_isolated(zone, cc);
  979. /* Record where migration scanner will be restarted. */
  980. cc->migrate_pfn = low_pfn;
  981. return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
  982. }
  983. /*
  984. * order == -1 is expected when compacting via
  985. * /proc/sys/vm/compact_memory
  986. */
  987. static inline bool is_via_compact_memory(int order)
  988. {
  989. return order == -1;
  990. }
  991. static int __compact_finished(struct zone *zone, struct compact_control *cc,
  992. const int migratetype)
  993. {
  994. unsigned int order;
  995. unsigned long watermark;
  996. if (cc->contended || fatal_signal_pending(current))
  997. return COMPACT_CONTENDED;
  998. /* Compaction run completes if the migrate and free scanner meet */
  999. if (compact_scanners_met(cc)) {
  1000. /* Let the next compaction start anew. */
  1001. reset_cached_positions(zone);
  1002. /*
  1003. * Mark that the PG_migrate_skip information should be cleared
  1004. * by kswapd when it goes to sleep. kcompactd does not set the
  1005. * flag itself as the decision to be clear should be directly
  1006. * based on an allocation request.
  1007. */
  1008. if (cc->direct_compaction)
  1009. zone->compact_blockskip_flush = true;
  1010. return COMPACT_COMPLETE;
  1011. }
  1012. if (is_via_compact_memory(cc->order))
  1013. return COMPACT_CONTINUE;
  1014. /* Compaction run is not finished if the watermark is not met */
  1015. watermark = low_wmark_pages(zone);
  1016. if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx,
  1017. cc->alloc_flags))
  1018. return COMPACT_CONTINUE;
  1019. /* Direct compactor: Is a suitable page free? */
  1020. for (order = cc->order; order < MAX_ORDER; order++) {
  1021. struct free_area *area = &zone->free_area[order];
  1022. bool can_steal;
  1023. /* Job done if page is free of the right migratetype */
  1024. if (!list_empty(&area->free_list[migratetype]))
  1025. return COMPACT_PARTIAL;
  1026. #ifdef CONFIG_CMA
  1027. /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
  1028. if (migratetype == MIGRATE_MOVABLE &&
  1029. !list_empty(&area->free_list[MIGRATE_CMA]))
  1030. return COMPACT_PARTIAL;
  1031. #endif
  1032. /*
  1033. * Job done if allocation would steal freepages from
  1034. * other migratetype buddy lists.
  1035. */
  1036. if (find_suitable_fallback(area, order, migratetype,
  1037. true, &can_steal) != -1)
  1038. return COMPACT_PARTIAL;
  1039. }
  1040. return COMPACT_NO_SUITABLE_PAGE;
  1041. }
  1042. static int compact_finished(struct zone *zone, struct compact_control *cc,
  1043. const int migratetype)
  1044. {
  1045. int ret;
  1046. ret = __compact_finished(zone, cc, migratetype);
  1047. trace_mm_compaction_finished(zone, cc->order, ret);
  1048. if (ret == COMPACT_NO_SUITABLE_PAGE)
  1049. ret = COMPACT_CONTINUE;
  1050. return ret;
  1051. }
  1052. /*
  1053. * compaction_suitable: Is this suitable to run compaction on this zone now?
  1054. * Returns
  1055. * COMPACT_SKIPPED - If there are too few free pages for compaction
  1056. * COMPACT_PARTIAL - If the allocation would succeed without compaction
  1057. * COMPACT_CONTINUE - If compaction should run now
  1058. */
  1059. static unsigned long __compaction_suitable(struct zone *zone, int order,
  1060. int alloc_flags, int classzone_idx)
  1061. {
  1062. int fragindex;
  1063. unsigned long watermark;
  1064. if (is_via_compact_memory(order))
  1065. return COMPACT_CONTINUE;
  1066. watermark = low_wmark_pages(zone);
  1067. /*
  1068. * If watermarks for high-order allocation are already met, there
  1069. * should be no need for compaction at all.
  1070. */
  1071. if (zone_watermark_ok(zone, order, watermark, classzone_idx,
  1072. alloc_flags))
  1073. return COMPACT_PARTIAL;
  1074. /*
  1075. * Watermarks for order-0 must be met for compaction. Note the 2UL.
  1076. * This is because during migration, copies of pages need to be
  1077. * allocated and for a short time, the footprint is higher
  1078. */
  1079. watermark += (2UL << order);
  1080. if (!zone_watermark_ok(zone, 0, watermark, classzone_idx, alloc_flags))
  1081. return COMPACT_SKIPPED;
  1082. /*
  1083. * fragmentation index determines if allocation failures are due to
  1084. * low memory or external fragmentation
  1085. *
  1086. * index of -1000 would imply allocations might succeed depending on
  1087. * watermarks, but we already failed the high-order watermark check
  1088. * index towards 0 implies failure is due to lack of memory
  1089. * index towards 1000 implies failure is due to fragmentation
  1090. *
  1091. * Only compact if a failure would be due to fragmentation.
  1092. */
  1093. fragindex = fragmentation_index(zone, order);
  1094. if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
  1095. return COMPACT_NOT_SUITABLE_ZONE;
  1096. return COMPACT_CONTINUE;
  1097. }
  1098. unsigned long compaction_suitable(struct zone *zone, int order,
  1099. int alloc_flags, int classzone_idx)
  1100. {
  1101. unsigned long ret;
  1102. ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx);
  1103. trace_mm_compaction_suitable(zone, order, ret);
  1104. if (ret == COMPACT_NOT_SUITABLE_ZONE)
  1105. ret = COMPACT_SKIPPED;
  1106. return ret;
  1107. }
  1108. static int compact_zone(struct zone *zone, struct compact_control *cc)
  1109. {
  1110. int ret;
  1111. unsigned long start_pfn = zone->zone_start_pfn;
  1112. unsigned long end_pfn = zone_end_pfn(zone);
  1113. const int migratetype = gfpflags_to_migratetype(cc->gfp_mask);
  1114. const bool sync = cc->mode != MIGRATE_ASYNC;
  1115. ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
  1116. cc->classzone_idx);
  1117. switch (ret) {
  1118. case COMPACT_PARTIAL:
  1119. case COMPACT_SKIPPED:
  1120. /* Compaction is likely to fail */
  1121. return ret;
  1122. case COMPACT_CONTINUE:
  1123. /* Fall through to compaction */
  1124. ;
  1125. }
  1126. /*
  1127. * Clear pageblock skip if there were failures recently and compaction
  1128. * is about to be retried after being deferred.
  1129. */
  1130. if (compaction_restarting(zone, cc->order))
  1131. __reset_isolation_suitable(zone);
  1132. /*
  1133. * Setup to move all movable pages to the end of the zone. Used cached
  1134. * information on where the scanners should start but check that it
  1135. * is initialised by ensuring the values are within zone boundaries.
  1136. */
  1137. cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
  1138. cc->free_pfn = zone->compact_cached_free_pfn;
  1139. if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
  1140. cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
  1141. zone->compact_cached_free_pfn = cc->free_pfn;
  1142. }
  1143. if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
  1144. cc->migrate_pfn = start_pfn;
  1145. zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
  1146. zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
  1147. }
  1148. cc->last_migrated_pfn = 0;
  1149. trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
  1150. cc->free_pfn, end_pfn, sync);
  1151. migrate_prep_local();
  1152. while ((ret = compact_finished(zone, cc, migratetype)) ==
  1153. COMPACT_CONTINUE) {
  1154. int err;
  1155. switch (isolate_migratepages(zone, cc)) {
  1156. case ISOLATE_ABORT:
  1157. ret = COMPACT_CONTENDED;
  1158. putback_movable_pages(&cc->migratepages);
  1159. cc->nr_migratepages = 0;
  1160. goto out;
  1161. case ISOLATE_NONE:
  1162. /*
  1163. * We haven't isolated and migrated anything, but
  1164. * there might still be unflushed migrations from
  1165. * previous cc->order aligned block.
  1166. */
  1167. goto check_drain;
  1168. case ISOLATE_SUCCESS:
  1169. ;
  1170. }
  1171. err = migrate_pages(&cc->migratepages, compaction_alloc,
  1172. compaction_free, (unsigned long)cc, cc->mode,
  1173. MR_COMPACTION);
  1174. trace_mm_compaction_migratepages(cc->nr_migratepages, err,
  1175. &cc->migratepages);
  1176. /* All pages were either migrated or will be released */
  1177. cc->nr_migratepages = 0;
  1178. if (err) {
  1179. putback_movable_pages(&cc->migratepages);
  1180. /*
  1181. * migrate_pages() may return -ENOMEM when scanners meet
  1182. * and we want compact_finished() to detect it
  1183. */
  1184. if (err == -ENOMEM && !compact_scanners_met(cc)) {
  1185. ret = COMPACT_CONTENDED;
  1186. goto out;
  1187. }
  1188. }
  1189. check_drain:
  1190. /*
  1191. * Has the migration scanner moved away from the previous
  1192. * cc->order aligned block where we migrated from? If yes,
  1193. * flush the pages that were freed, so that they can merge and
  1194. * compact_finished() can detect immediately if allocation
  1195. * would succeed.
  1196. */
  1197. if (cc->order > 0 && cc->last_migrated_pfn) {
  1198. int cpu;
  1199. unsigned long current_block_start =
  1200. block_start_pfn(cc->migrate_pfn, cc->order);
  1201. if (cc->last_migrated_pfn < current_block_start) {
  1202. cpu = get_cpu();
  1203. lru_add_drain_cpu(cpu);
  1204. drain_local_pages(zone);
  1205. put_cpu();
  1206. /* No more flushing until we migrate again */
  1207. cc->last_migrated_pfn = 0;
  1208. }
  1209. }
  1210. }
  1211. out:
  1212. /*
  1213. * Release free pages and update where the free scanner should restart,
  1214. * so we don't leave any returned pages behind in the next attempt.
  1215. */
  1216. if (cc->nr_freepages > 0) {
  1217. unsigned long free_pfn = release_freepages(&cc->freepages);
  1218. cc->nr_freepages = 0;
  1219. VM_BUG_ON(free_pfn == 0);
  1220. /* The cached pfn is always the first in a pageblock */
  1221. free_pfn = pageblock_start_pfn(free_pfn);
  1222. /*
  1223. * Only go back, not forward. The cached pfn might have been
  1224. * already reset to zone end in compact_finished()
  1225. */
  1226. if (free_pfn > zone->compact_cached_free_pfn)
  1227. zone->compact_cached_free_pfn = free_pfn;
  1228. }
  1229. trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
  1230. cc->free_pfn, end_pfn, sync, ret);
  1231. if (ret == COMPACT_CONTENDED)
  1232. ret = COMPACT_PARTIAL;
  1233. return ret;
  1234. }
  1235. static unsigned long compact_zone_order(struct zone *zone, int order,
  1236. gfp_t gfp_mask, enum migrate_mode mode, int *contended,
  1237. int alloc_flags, int classzone_idx)
  1238. {
  1239. unsigned long ret;
  1240. struct compact_control cc = {
  1241. .nr_freepages = 0,
  1242. .nr_migratepages = 0,
  1243. .order = order,
  1244. .gfp_mask = gfp_mask,
  1245. .zone = zone,
  1246. .mode = mode,
  1247. .alloc_flags = alloc_flags,
  1248. .classzone_idx = classzone_idx,
  1249. .direct_compaction = true,
  1250. };
  1251. INIT_LIST_HEAD(&cc.freepages);
  1252. INIT_LIST_HEAD(&cc.migratepages);
  1253. ret = compact_zone(zone, &cc);
  1254. VM_BUG_ON(!list_empty(&cc.freepages));
  1255. VM_BUG_ON(!list_empty(&cc.migratepages));
  1256. *contended = cc.contended;
  1257. return ret;
  1258. }
  1259. int sysctl_extfrag_threshold = 500;
  1260. /**
  1261. * try_to_compact_pages - Direct compact to satisfy a high-order allocation
  1262. * @gfp_mask: The GFP mask of the current allocation
  1263. * @order: The order of the current allocation
  1264. * @alloc_flags: The allocation flags of the current allocation
  1265. * @ac: The context of current allocation
  1266. * @mode: The migration mode for async, sync light, or sync migration
  1267. * @contended: Return value that determines if compaction was aborted due to
  1268. * need_resched() or lock contention
  1269. *
  1270. * This is the main entry point for direct page compaction.
  1271. */
  1272. unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
  1273. int alloc_flags, const struct alloc_context *ac,
  1274. enum migrate_mode mode, int *contended)
  1275. {
  1276. int may_enter_fs = gfp_mask & __GFP_FS;
  1277. int may_perform_io = gfp_mask & __GFP_IO;
  1278. struct zoneref *z;
  1279. struct zone *zone;
  1280. int rc = COMPACT_DEFERRED;
  1281. int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */
  1282. *contended = COMPACT_CONTENDED_NONE;
  1283. /* Check if the GFP flags allow compaction */
  1284. if (!order || !may_enter_fs || !may_perform_io)
  1285. return COMPACT_SKIPPED;
  1286. trace_mm_compaction_try_to_compact_pages(order, gfp_mask, mode);
  1287. /* Compact each zone in the list */
  1288. for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
  1289. ac->nodemask) {
  1290. int status;
  1291. int zone_contended;
  1292. if (compaction_deferred(zone, order))
  1293. continue;
  1294. status = compact_zone_order(zone, order, gfp_mask, mode,
  1295. &zone_contended, alloc_flags,
  1296. ac->classzone_idx);
  1297. rc = max(status, rc);
  1298. /*
  1299. * It takes at least one zone that wasn't lock contended
  1300. * to clear all_zones_contended.
  1301. */
  1302. all_zones_contended &= zone_contended;
  1303. /* If a normal allocation would succeed, stop compacting */
  1304. if (zone_watermark_ok(zone, order, low_wmark_pages(zone),
  1305. ac->classzone_idx, alloc_flags)) {
  1306. /*
  1307. * We think the allocation will succeed in this zone,
  1308. * but it is not certain, hence the false. The caller
  1309. * will repeat this with true if allocation indeed
  1310. * succeeds in this zone.
  1311. */
  1312. compaction_defer_reset(zone, order, false);
  1313. /*
  1314. * It is possible that async compaction aborted due to
  1315. * need_resched() and the watermarks were ok thanks to
  1316. * somebody else freeing memory. The allocation can
  1317. * however still fail so we better signal the
  1318. * need_resched() contention anyway (this will not
  1319. * prevent the allocation attempt).
  1320. */
  1321. if (zone_contended == COMPACT_CONTENDED_SCHED)
  1322. *contended = COMPACT_CONTENDED_SCHED;
  1323. goto break_loop;
  1324. }
  1325. if (mode != MIGRATE_ASYNC && status == COMPACT_COMPLETE) {
  1326. /*
  1327. * We think that allocation won't succeed in this zone
  1328. * so we defer compaction there. If it ends up
  1329. * succeeding after all, it will be reset.
  1330. */
  1331. defer_compaction(zone, order);
  1332. }
  1333. /*
  1334. * We might have stopped compacting due to need_resched() in
  1335. * async compaction, or due to a fatal signal detected. In that
  1336. * case do not try further zones and signal need_resched()
  1337. * contention.
  1338. */
  1339. if ((zone_contended == COMPACT_CONTENDED_SCHED)
  1340. || fatal_signal_pending(current)) {
  1341. *contended = COMPACT_CONTENDED_SCHED;
  1342. goto break_loop;
  1343. }
  1344. continue;
  1345. break_loop:
  1346. /*
  1347. * We might not have tried all the zones, so be conservative
  1348. * and assume they are not all lock contended.
  1349. */
  1350. all_zones_contended = 0;
  1351. break;
  1352. }
  1353. /*
  1354. * If at least one zone wasn't deferred or skipped, we report if all
  1355. * zones that were tried were lock contended.
  1356. */
  1357. if (rc > COMPACT_SKIPPED && all_zones_contended)
  1358. *contended = COMPACT_CONTENDED_LOCK;
  1359. return rc;
  1360. }
  1361. /* Compact all zones within a node */
  1362. static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
  1363. {
  1364. int zoneid;
  1365. struct zone *zone;
  1366. for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
  1367. zone = &pgdat->node_zones[zoneid];
  1368. if (!populated_zone(zone))
  1369. continue;
  1370. cc->nr_freepages = 0;
  1371. cc->nr_migratepages = 0;
  1372. cc->zone = zone;
  1373. INIT_LIST_HEAD(&cc->freepages);
  1374. INIT_LIST_HEAD(&cc->migratepages);
  1375. /*
  1376. * When called via /proc/sys/vm/compact_memory
  1377. * this makes sure we compact the whole zone regardless of
  1378. * cached scanner positions.
  1379. */
  1380. if (is_via_compact_memory(cc->order))
  1381. __reset_isolation_suitable(zone);
  1382. if (is_via_compact_memory(cc->order) ||
  1383. !compaction_deferred(zone, cc->order))
  1384. compact_zone(zone, cc);
  1385. VM_BUG_ON(!list_empty(&cc->freepages));
  1386. VM_BUG_ON(!list_empty(&cc->migratepages));
  1387. if (is_via_compact_memory(cc->order))
  1388. continue;
  1389. if (zone_watermark_ok(zone, cc->order,
  1390. low_wmark_pages(zone), 0, 0))
  1391. compaction_defer_reset(zone, cc->order, false);
  1392. }
  1393. }
  1394. void compact_pgdat(pg_data_t *pgdat, int order)
  1395. {
  1396. struct compact_control cc = {
  1397. .order = order,
  1398. .mode = MIGRATE_ASYNC,
  1399. };
  1400. if (!order)
  1401. return;
  1402. __compact_pgdat(pgdat, &cc);
  1403. }
  1404. static void compact_node(int nid)
  1405. {
  1406. struct compact_control cc = {
  1407. .order = -1,
  1408. .mode = MIGRATE_SYNC,
  1409. .ignore_skip_hint = true,
  1410. };
  1411. __compact_pgdat(NODE_DATA(nid), &cc);
  1412. }
  1413. /* Compact all nodes in the system */
  1414. static void compact_nodes(void)
  1415. {
  1416. int nid;
  1417. /* Flush pending updates to the LRU lists */
  1418. lru_add_drain_all();
  1419. for_each_online_node(nid)
  1420. compact_node(nid);
  1421. }
  1422. /* The written value is actually unused, all memory is compacted */
  1423. int sysctl_compact_memory;
  1424. /*
  1425. * This is the entry point for compacting all nodes via
  1426. * /proc/sys/vm/compact_memory
  1427. */
  1428. int sysctl_compaction_handler(struct ctl_table *table, int write,
  1429. void __user *buffer, size_t *length, loff_t *ppos)
  1430. {
  1431. if (write)
  1432. compact_nodes();
  1433. return 0;
  1434. }
  1435. int sysctl_extfrag_handler(struct ctl_table *table, int write,
  1436. void __user *buffer, size_t *length, loff_t *ppos)
  1437. {
  1438. proc_dointvec_minmax(table, write, buffer, length, ppos);
  1439. return 0;
  1440. }
  1441. #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
  1442. static ssize_t sysfs_compact_node(struct device *dev,
  1443. struct device_attribute *attr,
  1444. const char *buf, size_t count)
  1445. {
  1446. int nid = dev->id;
  1447. if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
  1448. /* Flush pending updates to the LRU lists */
  1449. lru_add_drain_all();
  1450. compact_node(nid);
  1451. }
  1452. return count;
  1453. }
  1454. static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
  1455. int compaction_register_node(struct node *node)
  1456. {
  1457. return device_create_file(&node->dev, &dev_attr_compact);
  1458. }
  1459. void compaction_unregister_node(struct node *node)
  1460. {
  1461. return device_remove_file(&node->dev, &dev_attr_compact);
  1462. }
  1463. #endif /* CONFIG_SYSFS && CONFIG_NUMA */
  1464. static inline bool kcompactd_work_requested(pg_data_t *pgdat)
  1465. {
  1466. return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
  1467. }
  1468. static bool kcompactd_node_suitable(pg_data_t *pgdat)
  1469. {
  1470. int zoneid;
  1471. struct zone *zone;
  1472. enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx;
  1473. for (zoneid = 0; zoneid < classzone_idx; zoneid++) {
  1474. zone = &pgdat->node_zones[zoneid];
  1475. if (!populated_zone(zone))
  1476. continue;
  1477. if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
  1478. classzone_idx) == COMPACT_CONTINUE)
  1479. return true;
  1480. }
  1481. return false;
  1482. }
  1483. static void kcompactd_do_work(pg_data_t *pgdat)
  1484. {
  1485. /*
  1486. * With no special task, compact all zones so that a page of requested
  1487. * order is allocatable.
  1488. */
  1489. int zoneid;
  1490. struct zone *zone;
  1491. struct compact_control cc = {
  1492. .order = pgdat->kcompactd_max_order,
  1493. .classzone_idx = pgdat->kcompactd_classzone_idx,
  1494. .mode = MIGRATE_SYNC_LIGHT,
  1495. .ignore_skip_hint = true,
  1496. };
  1497. bool success = false;
  1498. trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
  1499. cc.classzone_idx);
  1500. count_vm_event(KCOMPACTD_WAKE);
  1501. for (zoneid = 0; zoneid < cc.classzone_idx; zoneid++) {
  1502. int status;
  1503. zone = &pgdat->node_zones[zoneid];
  1504. if (!populated_zone(zone))
  1505. continue;
  1506. if (compaction_deferred(zone, cc.order))
  1507. continue;
  1508. if (compaction_suitable(zone, cc.order, 0, zoneid) !=
  1509. COMPACT_CONTINUE)
  1510. continue;
  1511. cc.nr_freepages = 0;
  1512. cc.nr_migratepages = 0;
  1513. cc.zone = zone;
  1514. INIT_LIST_HEAD(&cc.freepages);
  1515. INIT_LIST_HEAD(&cc.migratepages);
  1516. if (kthread_should_stop())
  1517. return;
  1518. status = compact_zone(zone, &cc);
  1519. if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone),
  1520. cc.classzone_idx, 0)) {
  1521. success = true;
  1522. compaction_defer_reset(zone, cc.order, false);
  1523. } else if (status == COMPACT_COMPLETE) {
  1524. /*
  1525. * We use sync migration mode here, so we defer like
  1526. * sync direct compaction does.
  1527. */
  1528. defer_compaction(zone, cc.order);
  1529. }
  1530. VM_BUG_ON(!list_empty(&cc.freepages));
  1531. VM_BUG_ON(!list_empty(&cc.migratepages));
  1532. }
  1533. /*
  1534. * Regardless of success, we are done until woken up next. But remember
  1535. * the requested order/classzone_idx in case it was higher/tighter than
  1536. * our current ones
  1537. */
  1538. if (pgdat->kcompactd_max_order <= cc.order)
  1539. pgdat->kcompactd_max_order = 0;
  1540. if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx)
  1541. pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
  1542. }
  1543. void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
  1544. {
  1545. if (!order)
  1546. return;
  1547. if (pgdat->kcompactd_max_order < order)
  1548. pgdat->kcompactd_max_order = order;
  1549. if (pgdat->kcompactd_classzone_idx > classzone_idx)
  1550. pgdat->kcompactd_classzone_idx = classzone_idx;
  1551. if (!waitqueue_active(&pgdat->kcompactd_wait))
  1552. return;
  1553. if (!kcompactd_node_suitable(pgdat))
  1554. return;
  1555. trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
  1556. classzone_idx);
  1557. wake_up_interruptible(&pgdat->kcompactd_wait);
  1558. }
  1559. /*
  1560. * The background compaction daemon, started as a kernel thread
  1561. * from the init process.
  1562. */
  1563. static int kcompactd(void *p)
  1564. {
  1565. pg_data_t *pgdat = (pg_data_t*)p;
  1566. struct task_struct *tsk = current;
  1567. const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
  1568. if (!cpumask_empty(cpumask))
  1569. set_cpus_allowed_ptr(tsk, cpumask);
  1570. set_freezable();
  1571. pgdat->kcompactd_max_order = 0;
  1572. pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
  1573. while (!kthread_should_stop()) {
  1574. trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
  1575. wait_event_freezable(pgdat->kcompactd_wait,
  1576. kcompactd_work_requested(pgdat));
  1577. kcompactd_do_work(pgdat);
  1578. }
  1579. return 0;
  1580. }
  1581. /*
  1582. * This kcompactd start function will be called by init and node-hot-add.
  1583. * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
  1584. */
  1585. int kcompactd_run(int nid)
  1586. {
  1587. pg_data_t *pgdat = NODE_DATA(nid);
  1588. int ret = 0;
  1589. if (pgdat->kcompactd)
  1590. return 0;
  1591. pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
  1592. if (IS_ERR(pgdat->kcompactd)) {
  1593. pr_err("Failed to start kcompactd on node %d\n", nid);
  1594. ret = PTR_ERR(pgdat->kcompactd);
  1595. pgdat->kcompactd = NULL;
  1596. }
  1597. return ret;
  1598. }
  1599. /*
  1600. * Called by memory hotplug when all memory in a node is offlined. Caller must
  1601. * hold mem_hotplug_begin/end().
  1602. */
  1603. void kcompactd_stop(int nid)
  1604. {
  1605. struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
  1606. if (kcompactd) {
  1607. kthread_stop(kcompactd);
  1608. NODE_DATA(nid)->kcompactd = NULL;
  1609. }
  1610. }
  1611. /*
  1612. * It's optimal to keep kcompactd on the same CPUs as their memory, but
  1613. * not required for correctness. So if the last cpu in a node goes
  1614. * away, we get changed to run anywhere: as the first one comes back,
  1615. * restore their cpu bindings.
  1616. */
  1617. static int cpu_callback(struct notifier_block *nfb, unsigned long action,
  1618. void *hcpu)
  1619. {
  1620. int nid;
  1621. if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
  1622. for_each_node_state(nid, N_MEMORY) {
  1623. pg_data_t *pgdat = NODE_DATA(nid);
  1624. const struct cpumask *mask;
  1625. mask = cpumask_of_node(pgdat->node_id);
  1626. if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
  1627. /* One of our CPUs online: restore mask */
  1628. set_cpus_allowed_ptr(pgdat->kcompactd, mask);
  1629. }
  1630. }
  1631. return NOTIFY_OK;
  1632. }
  1633. static int __init kcompactd_init(void)
  1634. {
  1635. int nid;
  1636. for_each_node_state(nid, N_MEMORY)
  1637. kcompactd_run(nid);
  1638. hotcpu_notifier(cpu_callback, 0);
  1639. return 0;
  1640. }
  1641. subsys_initcall(kcompactd_init)
  1642. #endif /* CONFIG_COMPACTION */