compaction.c 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077
  1. /*
  2. * linux/mm/compaction.c
  3. *
  4. * Memory compaction for the reduction of external fragmentation. Note that
  5. * this heavily depends upon page migration to do all the real heavy
  6. * lifting
  7. *
  8. * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
  9. */
  10. #include <linux/cpu.h>
  11. #include <linux/swap.h>
  12. #include <linux/migrate.h>
  13. #include <linux/compaction.h>
  14. #include <linux/mm_inline.h>
  15. #include <linux/backing-dev.h>
  16. #include <linux/sysctl.h>
  17. #include <linux/sysfs.h>
  18. #include <linux/page-isolation.h>
  19. #include <linux/kasan.h>
  20. #include <linux/kthread.h>
  21. #include <linux/freezer.h>
  22. #include <linux/page_owner.h>
  23. #include "internal.h"
  24. #ifdef CONFIG_COMPACTION
  25. static inline void count_compact_event(enum vm_event_item item)
  26. {
  27. count_vm_event(item);
  28. }
  29. static inline void count_compact_events(enum vm_event_item item, long delta)
  30. {
  31. count_vm_events(item, delta);
  32. }
  33. #else
  34. #define count_compact_event(item) do { } while (0)
  35. #define count_compact_events(item, delta) do { } while (0)
  36. #endif
  37. #if defined CONFIG_COMPACTION || defined CONFIG_CMA
  38. #define CREATE_TRACE_POINTS
  39. #include <trace/events/compaction.h>
  40. #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order))
  41. #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order))
  42. #define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order)
  43. #define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order)
  44. static unsigned long release_freepages(struct list_head *freelist)
  45. {
  46. struct page *page, *next;
  47. unsigned long high_pfn = 0;
  48. list_for_each_entry_safe(page, next, freelist, lru) {
  49. unsigned long pfn = page_to_pfn(page);
  50. list_del(&page->lru);
  51. __free_page(page);
  52. if (pfn > high_pfn)
  53. high_pfn = pfn;
  54. }
  55. return high_pfn;
  56. }
  57. static void map_pages(struct list_head *list)
  58. {
  59. unsigned int i, order, nr_pages;
  60. struct page *page, *next;
  61. LIST_HEAD(tmp_list);
  62. list_for_each_entry_safe(page, next, list, lru) {
  63. list_del(&page->lru);
  64. order = page_private(page);
  65. nr_pages = 1 << order;
  66. post_alloc_hook(page, order, __GFP_MOVABLE);
  67. if (order)
  68. split_page(page, order);
  69. for (i = 0; i < nr_pages; i++) {
  70. list_add(&page->lru, &tmp_list);
  71. page++;
  72. }
  73. }
  74. list_splice(&tmp_list, list);
  75. }
  76. static inline bool migrate_async_suitable(int migratetype)
  77. {
  78. return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
  79. }
  80. #ifdef CONFIG_COMPACTION
  81. int PageMovable(struct page *page)
  82. {
  83. struct address_space *mapping;
  84. VM_BUG_ON_PAGE(!PageLocked(page), page);
  85. if (!__PageMovable(page))
  86. return 0;
  87. mapping = page_mapping(page);
  88. if (mapping && mapping->a_ops && mapping->a_ops->isolate_page)
  89. return 1;
  90. return 0;
  91. }
  92. EXPORT_SYMBOL(PageMovable);
  93. void __SetPageMovable(struct page *page, struct address_space *mapping)
  94. {
  95. VM_BUG_ON_PAGE(!PageLocked(page), page);
  96. VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page);
  97. page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE);
  98. }
  99. EXPORT_SYMBOL(__SetPageMovable);
  100. void __ClearPageMovable(struct page *page)
  101. {
  102. VM_BUG_ON_PAGE(!PageLocked(page), page);
  103. VM_BUG_ON_PAGE(!PageMovable(page), page);
  104. /*
  105. * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE
  106. * flag so that VM can catch up released page by driver after isolation.
  107. * With it, VM migration doesn't try to put it back.
  108. */
  109. page->mapping = (void *)((unsigned long)page->mapping &
  110. PAGE_MAPPING_MOVABLE);
  111. }
  112. EXPORT_SYMBOL(__ClearPageMovable);
  113. /* Do not skip compaction more than 64 times */
  114. #define COMPACT_MAX_DEFER_SHIFT 6
  115. /*
  116. * Compaction is deferred when compaction fails to result in a page
  117. * allocation success. 1 << compact_defer_limit compactions are skipped up
  118. * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
  119. */
  120. void defer_compaction(struct zone *zone, int order)
  121. {
  122. zone->compact_considered = 0;
  123. zone->compact_defer_shift++;
  124. if (order < zone->compact_order_failed)
  125. zone->compact_order_failed = order;
  126. if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
  127. zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
  128. trace_mm_compaction_defer_compaction(zone, order);
  129. }
  130. /* Returns true if compaction should be skipped this time */
  131. bool compaction_deferred(struct zone *zone, int order)
  132. {
  133. unsigned long defer_limit = 1UL << zone->compact_defer_shift;
  134. if (order < zone->compact_order_failed)
  135. return false;
  136. /* Avoid possible overflow */
  137. if (++zone->compact_considered > defer_limit)
  138. zone->compact_considered = defer_limit;
  139. if (zone->compact_considered >= defer_limit)
  140. return false;
  141. trace_mm_compaction_deferred(zone, order);
  142. return true;
  143. }
  144. /*
  145. * Update defer tracking counters after successful compaction of given order,
  146. * which means an allocation either succeeded (alloc_success == true) or is
  147. * expected to succeed.
  148. */
  149. void compaction_defer_reset(struct zone *zone, int order,
  150. bool alloc_success)
  151. {
  152. if (alloc_success) {
  153. zone->compact_considered = 0;
  154. zone->compact_defer_shift = 0;
  155. }
  156. if (order >= zone->compact_order_failed)
  157. zone->compact_order_failed = order + 1;
  158. trace_mm_compaction_defer_reset(zone, order);
  159. }
  160. /* Returns true if restarting compaction after many failures */
  161. bool compaction_restarting(struct zone *zone, int order)
  162. {
  163. if (order < zone->compact_order_failed)
  164. return false;
  165. return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
  166. zone->compact_considered >= 1UL << zone->compact_defer_shift;
  167. }
  168. /* Returns true if the pageblock should be scanned for pages to isolate. */
  169. static inline bool isolation_suitable(struct compact_control *cc,
  170. struct page *page)
  171. {
  172. if (cc->ignore_skip_hint)
  173. return true;
  174. return !get_pageblock_skip(page);
  175. }
  176. static void reset_cached_positions(struct zone *zone)
  177. {
  178. zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
  179. zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
  180. zone->compact_cached_free_pfn =
  181. pageblock_start_pfn(zone_end_pfn(zone) - 1);
  182. }
  183. /*
  184. * This function is called to clear all cached information on pageblocks that
  185. * should be skipped for page isolation when the migrate and free page scanner
  186. * meet.
  187. */
  188. static void __reset_isolation_suitable(struct zone *zone)
  189. {
  190. unsigned long start_pfn = zone->zone_start_pfn;
  191. unsigned long end_pfn = zone_end_pfn(zone);
  192. unsigned long pfn;
  193. zone->compact_blockskip_flush = false;
  194. /* Walk the zone and mark every pageblock as suitable for isolation */
  195. for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
  196. struct page *page;
  197. cond_resched();
  198. if (!pfn_valid(pfn))
  199. continue;
  200. page = pfn_to_page(pfn);
  201. if (zone != page_zone(page))
  202. continue;
  203. clear_pageblock_skip(page);
  204. }
  205. reset_cached_positions(zone);
  206. }
  207. void reset_isolation_suitable(pg_data_t *pgdat)
  208. {
  209. int zoneid;
  210. for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
  211. struct zone *zone = &pgdat->node_zones[zoneid];
  212. if (!populated_zone(zone))
  213. continue;
  214. /* Only flush if a full compaction finished recently */
  215. if (zone->compact_blockskip_flush)
  216. __reset_isolation_suitable(zone);
  217. }
  218. }
  219. /*
  220. * If no pages were isolated then mark this pageblock to be skipped in the
  221. * future. The information is later cleared by __reset_isolation_suitable().
  222. */
  223. static void update_pageblock_skip(struct compact_control *cc,
  224. struct page *page, unsigned long nr_isolated,
  225. bool migrate_scanner)
  226. {
  227. struct zone *zone = cc->zone;
  228. unsigned long pfn;
  229. if (cc->ignore_skip_hint)
  230. return;
  231. if (!page)
  232. return;
  233. if (nr_isolated)
  234. return;
  235. set_pageblock_skip(page);
  236. pfn = page_to_pfn(page);
  237. /* Update where async and sync compaction should restart */
  238. if (migrate_scanner) {
  239. if (pfn > zone->compact_cached_migrate_pfn[0])
  240. zone->compact_cached_migrate_pfn[0] = pfn;
  241. if (cc->mode != MIGRATE_ASYNC &&
  242. pfn > zone->compact_cached_migrate_pfn[1])
  243. zone->compact_cached_migrate_pfn[1] = pfn;
  244. } else {
  245. if (pfn < zone->compact_cached_free_pfn)
  246. zone->compact_cached_free_pfn = pfn;
  247. }
  248. }
  249. #else
  250. static inline bool isolation_suitable(struct compact_control *cc,
  251. struct page *page)
  252. {
  253. return true;
  254. }
  255. static void update_pageblock_skip(struct compact_control *cc,
  256. struct page *page, unsigned long nr_isolated,
  257. bool migrate_scanner)
  258. {
  259. }
  260. #endif /* CONFIG_COMPACTION */
  261. /*
  262. * Compaction requires the taking of some coarse locks that are potentially
  263. * very heavily contended. For async compaction, back out if the lock cannot
  264. * be taken immediately. For sync compaction, spin on the lock if needed.
  265. *
  266. * Returns true if the lock is held
  267. * Returns false if the lock is not held and compaction should abort
  268. */
  269. static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags,
  270. struct compact_control *cc)
  271. {
  272. if (cc->mode == MIGRATE_ASYNC) {
  273. if (!spin_trylock_irqsave(lock, *flags)) {
  274. cc->contended = true;
  275. return false;
  276. }
  277. } else {
  278. spin_lock_irqsave(lock, *flags);
  279. }
  280. return true;
  281. }
  282. /*
  283. * Compaction requires the taking of some coarse locks that are potentially
  284. * very heavily contended. The lock should be periodically unlocked to avoid
  285. * having disabled IRQs for a long time, even when there is nobody waiting on
  286. * the lock. It might also be that allowing the IRQs will result in
  287. * need_resched() becoming true. If scheduling is needed, async compaction
  288. * aborts. Sync compaction schedules.
  289. * Either compaction type will also abort if a fatal signal is pending.
  290. * In either case if the lock was locked, it is dropped and not regained.
  291. *
  292. * Returns true if compaction should abort due to fatal signal pending, or
  293. * async compaction due to need_resched()
  294. * Returns false when compaction can continue (sync compaction might have
  295. * scheduled)
  296. */
  297. static bool compact_unlock_should_abort(spinlock_t *lock,
  298. unsigned long flags, bool *locked, struct compact_control *cc)
  299. {
  300. if (*locked) {
  301. spin_unlock_irqrestore(lock, flags);
  302. *locked = false;
  303. }
  304. if (fatal_signal_pending(current)) {
  305. cc->contended = true;
  306. return true;
  307. }
  308. if (need_resched()) {
  309. if (cc->mode == MIGRATE_ASYNC) {
  310. cc->contended = true;
  311. return true;
  312. }
  313. cond_resched();
  314. }
  315. return false;
  316. }
  317. /*
  318. * Aside from avoiding lock contention, compaction also periodically checks
  319. * need_resched() and either schedules in sync compaction or aborts async
  320. * compaction. This is similar to what compact_unlock_should_abort() does, but
  321. * is used where no lock is concerned.
  322. *
  323. * Returns false when no scheduling was needed, or sync compaction scheduled.
  324. * Returns true when async compaction should abort.
  325. */
  326. static inline bool compact_should_abort(struct compact_control *cc)
  327. {
  328. /* async compaction aborts if contended */
  329. if (need_resched()) {
  330. if (cc->mode == MIGRATE_ASYNC) {
  331. cc->contended = true;
  332. return true;
  333. }
  334. cond_resched();
  335. }
  336. return false;
  337. }
  338. /*
  339. * Isolate free pages onto a private freelist. If @strict is true, will abort
  340. * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
  341. * (even though it may still end up isolating some pages).
  342. */
  343. static unsigned long isolate_freepages_block(struct compact_control *cc,
  344. unsigned long *start_pfn,
  345. unsigned long end_pfn,
  346. struct list_head *freelist,
  347. bool strict)
  348. {
  349. int nr_scanned = 0, total_isolated = 0;
  350. struct page *cursor, *valid_page = NULL;
  351. unsigned long flags = 0;
  352. bool locked = false;
  353. unsigned long blockpfn = *start_pfn;
  354. unsigned int order;
  355. cursor = pfn_to_page(blockpfn);
  356. /* Isolate free pages. */
  357. for (; blockpfn < end_pfn; blockpfn++, cursor++) {
  358. int isolated;
  359. struct page *page = cursor;
  360. /*
  361. * Periodically drop the lock (if held) regardless of its
  362. * contention, to give chance to IRQs. Abort if fatal signal
  363. * pending or async compaction detects need_resched()
  364. */
  365. if (!(blockpfn % SWAP_CLUSTER_MAX)
  366. && compact_unlock_should_abort(&cc->zone->lock, flags,
  367. &locked, cc))
  368. break;
  369. nr_scanned++;
  370. if (!pfn_valid_within(blockpfn))
  371. goto isolate_fail;
  372. if (!valid_page)
  373. valid_page = page;
  374. /*
  375. * For compound pages such as THP and hugetlbfs, we can save
  376. * potentially a lot of iterations if we skip them at once.
  377. * The check is racy, but we can consider only valid values
  378. * and the only danger is skipping too much.
  379. */
  380. if (PageCompound(page)) {
  381. unsigned int comp_order = compound_order(page);
  382. if (likely(comp_order < MAX_ORDER)) {
  383. blockpfn += (1UL << comp_order) - 1;
  384. cursor += (1UL << comp_order) - 1;
  385. }
  386. goto isolate_fail;
  387. }
  388. if (!PageBuddy(page))
  389. goto isolate_fail;
  390. /*
  391. * If we already hold the lock, we can skip some rechecking.
  392. * Note that if we hold the lock now, checked_pageblock was
  393. * already set in some previous iteration (or strict is true),
  394. * so it is correct to skip the suitable migration target
  395. * recheck as well.
  396. */
  397. if (!locked) {
  398. /*
  399. * The zone lock must be held to isolate freepages.
  400. * Unfortunately this is a very coarse lock and can be
  401. * heavily contended if there are parallel allocations
  402. * or parallel compactions. For async compaction do not
  403. * spin on the lock and we acquire the lock as late as
  404. * possible.
  405. */
  406. locked = compact_trylock_irqsave(&cc->zone->lock,
  407. &flags, cc);
  408. if (!locked)
  409. break;
  410. /* Recheck this is a buddy page under lock */
  411. if (!PageBuddy(page))
  412. goto isolate_fail;
  413. }
  414. /* Found a free page, will break it into order-0 pages */
  415. order = page_order(page);
  416. isolated = __isolate_free_page(page, order);
  417. if (!isolated)
  418. break;
  419. set_page_private(page, order);
  420. total_isolated += isolated;
  421. cc->nr_freepages += isolated;
  422. list_add_tail(&page->lru, freelist);
  423. if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
  424. blockpfn += isolated;
  425. break;
  426. }
  427. /* Advance to the end of split page */
  428. blockpfn += isolated - 1;
  429. cursor += isolated - 1;
  430. continue;
  431. isolate_fail:
  432. if (strict)
  433. break;
  434. else
  435. continue;
  436. }
  437. if (locked)
  438. spin_unlock_irqrestore(&cc->zone->lock, flags);
  439. /*
  440. * There is a tiny chance that we have read bogus compound_order(),
  441. * so be careful to not go outside of the pageblock.
  442. */
  443. if (unlikely(blockpfn > end_pfn))
  444. blockpfn = end_pfn;
  445. trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
  446. nr_scanned, total_isolated);
  447. /* Record how far we have got within the block */
  448. *start_pfn = blockpfn;
  449. /*
  450. * If strict isolation is requested by CMA then check that all the
  451. * pages requested were isolated. If there were any failures, 0 is
  452. * returned and CMA will fail.
  453. */
  454. if (strict && blockpfn < end_pfn)
  455. total_isolated = 0;
  456. /* Update the pageblock-skip if the whole pageblock was scanned */
  457. if (blockpfn == end_pfn)
  458. update_pageblock_skip(cc, valid_page, total_isolated, false);
  459. count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
  460. if (total_isolated)
  461. count_compact_events(COMPACTISOLATED, total_isolated);
  462. return total_isolated;
  463. }
  464. /**
  465. * isolate_freepages_range() - isolate free pages.
  466. * @start_pfn: The first PFN to start isolating.
  467. * @end_pfn: The one-past-last PFN.
  468. *
  469. * Non-free pages, invalid PFNs, or zone boundaries within the
  470. * [start_pfn, end_pfn) range are considered errors, cause function to
  471. * undo its actions and return zero.
  472. *
  473. * Otherwise, function returns one-past-the-last PFN of isolated page
  474. * (which may be greater then end_pfn if end fell in a middle of
  475. * a free page).
  476. */
  477. unsigned long
  478. isolate_freepages_range(struct compact_control *cc,
  479. unsigned long start_pfn, unsigned long end_pfn)
  480. {
  481. unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
  482. LIST_HEAD(freelist);
  483. pfn = start_pfn;
  484. block_start_pfn = pageblock_start_pfn(pfn);
  485. if (block_start_pfn < cc->zone->zone_start_pfn)
  486. block_start_pfn = cc->zone->zone_start_pfn;
  487. block_end_pfn = pageblock_end_pfn(pfn);
  488. for (; pfn < end_pfn; pfn += isolated,
  489. block_start_pfn = block_end_pfn,
  490. block_end_pfn += pageblock_nr_pages) {
  491. /* Protect pfn from changing by isolate_freepages_block */
  492. unsigned long isolate_start_pfn = pfn;
  493. block_end_pfn = min(block_end_pfn, end_pfn);
  494. /*
  495. * pfn could pass the block_end_pfn if isolated freepage
  496. * is more than pageblock order. In this case, we adjust
  497. * scanning range to right one.
  498. */
  499. if (pfn >= block_end_pfn) {
  500. block_start_pfn = pageblock_start_pfn(pfn);
  501. block_end_pfn = pageblock_end_pfn(pfn);
  502. block_end_pfn = min(block_end_pfn, end_pfn);
  503. }
  504. if (!pageblock_pfn_to_page(block_start_pfn,
  505. block_end_pfn, cc->zone))
  506. break;
  507. isolated = isolate_freepages_block(cc, &isolate_start_pfn,
  508. block_end_pfn, &freelist, true);
  509. /*
  510. * In strict mode, isolate_freepages_block() returns 0 if
  511. * there are any holes in the block (ie. invalid PFNs or
  512. * non-free pages).
  513. */
  514. if (!isolated)
  515. break;
  516. /*
  517. * If we managed to isolate pages, it is always (1 << n) *
  518. * pageblock_nr_pages for some non-negative n. (Max order
  519. * page may span two pageblocks).
  520. */
  521. }
  522. /* __isolate_free_page() does not map the pages */
  523. map_pages(&freelist);
  524. if (pfn < end_pfn) {
  525. /* Loop terminated early, cleanup. */
  526. release_freepages(&freelist);
  527. return 0;
  528. }
  529. /* We don't use freelists for anything. */
  530. return pfn;
  531. }
  532. /* Update the number of anon and file isolated pages in the zone */
  533. static void acct_isolated(struct zone *zone, struct compact_control *cc)
  534. {
  535. struct page *page;
  536. unsigned int count[2] = { 0, };
  537. if (list_empty(&cc->migratepages))
  538. return;
  539. list_for_each_entry(page, &cc->migratepages, lru)
  540. count[!!page_is_file_cache(page)]++;
  541. mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON, count[0]);
  542. mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, count[1]);
  543. }
  544. /* Similar to reclaim, but different enough that they don't share logic */
  545. static bool too_many_isolated(struct zone *zone)
  546. {
  547. unsigned long active, inactive, isolated;
  548. inactive = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE) +
  549. node_page_state(zone->zone_pgdat, NR_INACTIVE_ANON);
  550. active = node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE) +
  551. node_page_state(zone->zone_pgdat, NR_ACTIVE_ANON);
  552. isolated = node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE) +
  553. node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON);
  554. return isolated > (inactive + active) / 2;
  555. }
  556. /**
  557. * isolate_migratepages_block() - isolate all migrate-able pages within
  558. * a single pageblock
  559. * @cc: Compaction control structure.
  560. * @low_pfn: The first PFN to isolate
  561. * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock
  562. * @isolate_mode: Isolation mode to be used.
  563. *
  564. * Isolate all pages that can be migrated from the range specified by
  565. * [low_pfn, end_pfn). The range is expected to be within same pageblock.
  566. * Returns zero if there is a fatal signal pending, otherwise PFN of the
  567. * first page that was not scanned (which may be both less, equal to or more
  568. * than end_pfn).
  569. *
  570. * The pages are isolated on cc->migratepages list (not required to be empty),
  571. * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
  572. * is neither read nor updated.
  573. */
  574. static unsigned long
  575. isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
  576. unsigned long end_pfn, isolate_mode_t isolate_mode)
  577. {
  578. struct zone *zone = cc->zone;
  579. unsigned long nr_scanned = 0, nr_isolated = 0;
  580. struct lruvec *lruvec;
  581. unsigned long flags = 0;
  582. bool locked = false;
  583. struct page *page = NULL, *valid_page = NULL;
  584. unsigned long start_pfn = low_pfn;
  585. bool skip_on_failure = false;
  586. unsigned long next_skip_pfn = 0;
  587. /*
  588. * Ensure that there are not too many pages isolated from the LRU
  589. * list by either parallel reclaimers or compaction. If there are,
  590. * delay for some time until fewer pages are isolated
  591. */
  592. while (unlikely(too_many_isolated(zone))) {
  593. /* async migration should just abort */
  594. if (cc->mode == MIGRATE_ASYNC)
  595. return 0;
  596. congestion_wait(BLK_RW_ASYNC, HZ/10);
  597. if (fatal_signal_pending(current))
  598. return 0;
  599. }
  600. if (compact_should_abort(cc))
  601. return 0;
  602. if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
  603. skip_on_failure = true;
  604. next_skip_pfn = block_end_pfn(low_pfn, cc->order);
  605. }
  606. /* Time to isolate some pages for migration */
  607. for (; low_pfn < end_pfn; low_pfn++) {
  608. if (skip_on_failure && low_pfn >= next_skip_pfn) {
  609. /*
  610. * We have isolated all migration candidates in the
  611. * previous order-aligned block, and did not skip it due
  612. * to failure. We should migrate the pages now and
  613. * hopefully succeed compaction.
  614. */
  615. if (nr_isolated)
  616. break;
  617. /*
  618. * We failed to isolate in the previous order-aligned
  619. * block. Set the new boundary to the end of the
  620. * current block. Note we can't simply increase
  621. * next_skip_pfn by 1 << order, as low_pfn might have
  622. * been incremented by a higher number due to skipping
  623. * a compound or a high-order buddy page in the
  624. * previous loop iteration.
  625. */
  626. next_skip_pfn = block_end_pfn(low_pfn, cc->order);
  627. }
  628. /*
  629. * Periodically drop the lock (if held) regardless of its
  630. * contention, to give chance to IRQs. Abort async compaction
  631. * if contended.
  632. */
  633. if (!(low_pfn % SWAP_CLUSTER_MAX)
  634. && compact_unlock_should_abort(zone_lru_lock(zone), flags,
  635. &locked, cc))
  636. break;
  637. if (!pfn_valid_within(low_pfn))
  638. goto isolate_fail;
  639. nr_scanned++;
  640. page = pfn_to_page(low_pfn);
  641. if (!valid_page)
  642. valid_page = page;
  643. /*
  644. * Skip if free. We read page order here without zone lock
  645. * which is generally unsafe, but the race window is small and
  646. * the worst thing that can happen is that we skip some
  647. * potential isolation targets.
  648. */
  649. if (PageBuddy(page)) {
  650. unsigned long freepage_order = page_order_unsafe(page);
  651. /*
  652. * Without lock, we cannot be sure that what we got is
  653. * a valid page order. Consider only values in the
  654. * valid order range to prevent low_pfn overflow.
  655. */
  656. if (freepage_order > 0 && freepage_order < MAX_ORDER)
  657. low_pfn += (1UL << freepage_order) - 1;
  658. continue;
  659. }
  660. /*
  661. * Regardless of being on LRU, compound pages such as THP and
  662. * hugetlbfs are not to be compacted. We can potentially save
  663. * a lot of iterations if we skip them at once. The check is
  664. * racy, but we can consider only valid values and the only
  665. * danger is skipping too much.
  666. */
  667. if (PageCompound(page)) {
  668. unsigned int comp_order = compound_order(page);
  669. if (likely(comp_order < MAX_ORDER))
  670. low_pfn += (1UL << comp_order) - 1;
  671. goto isolate_fail;
  672. }
  673. /*
  674. * Check may be lockless but that's ok as we recheck later.
  675. * It's possible to migrate LRU and non-lru movable pages.
  676. * Skip any other type of page
  677. */
  678. if (!PageLRU(page)) {
  679. /*
  680. * __PageMovable can return false positive so we need
  681. * to verify it under page_lock.
  682. */
  683. if (unlikely(__PageMovable(page)) &&
  684. !PageIsolated(page)) {
  685. if (locked) {
  686. spin_unlock_irqrestore(zone_lru_lock(zone),
  687. flags);
  688. locked = false;
  689. }
  690. if (isolate_movable_page(page, isolate_mode))
  691. goto isolate_success;
  692. }
  693. goto isolate_fail;
  694. }
  695. /*
  696. * Migration will fail if an anonymous page is pinned in memory,
  697. * so avoid taking lru_lock and isolating it unnecessarily in an
  698. * admittedly racy check.
  699. */
  700. if (!page_mapping(page) &&
  701. page_count(page) > page_mapcount(page))
  702. goto isolate_fail;
  703. /* If we already hold the lock, we can skip some rechecking */
  704. if (!locked) {
  705. locked = compact_trylock_irqsave(zone_lru_lock(zone),
  706. &flags, cc);
  707. if (!locked)
  708. break;
  709. /* Recheck PageLRU and PageCompound under lock */
  710. if (!PageLRU(page))
  711. goto isolate_fail;
  712. /*
  713. * Page become compound since the non-locked check,
  714. * and it's on LRU. It can only be a THP so the order
  715. * is safe to read and it's 0 for tail pages.
  716. */
  717. if (unlikely(PageCompound(page))) {
  718. low_pfn += (1UL << compound_order(page)) - 1;
  719. goto isolate_fail;
  720. }
  721. }
  722. lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
  723. /* Try isolate the page */
  724. if (__isolate_lru_page(page, isolate_mode) != 0)
  725. goto isolate_fail;
  726. VM_BUG_ON_PAGE(PageCompound(page), page);
  727. /* Successfully isolated */
  728. del_page_from_lru_list(page, lruvec, page_lru(page));
  729. isolate_success:
  730. list_add(&page->lru, &cc->migratepages);
  731. cc->nr_migratepages++;
  732. nr_isolated++;
  733. /*
  734. * Record where we could have freed pages by migration and not
  735. * yet flushed them to buddy allocator.
  736. * - this is the lowest page that was isolated and likely be
  737. * then freed by migration.
  738. */
  739. if (!cc->last_migrated_pfn)
  740. cc->last_migrated_pfn = low_pfn;
  741. /* Avoid isolating too much */
  742. if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
  743. ++low_pfn;
  744. break;
  745. }
  746. continue;
  747. isolate_fail:
  748. if (!skip_on_failure)
  749. continue;
  750. /*
  751. * We have isolated some pages, but then failed. Release them
  752. * instead of migrating, as we cannot form the cc->order buddy
  753. * page anyway.
  754. */
  755. if (nr_isolated) {
  756. if (locked) {
  757. spin_unlock_irqrestore(zone_lru_lock(zone), flags);
  758. locked = false;
  759. }
  760. acct_isolated(zone, cc);
  761. putback_movable_pages(&cc->migratepages);
  762. cc->nr_migratepages = 0;
  763. cc->last_migrated_pfn = 0;
  764. nr_isolated = 0;
  765. }
  766. if (low_pfn < next_skip_pfn) {
  767. low_pfn = next_skip_pfn - 1;
  768. /*
  769. * The check near the loop beginning would have updated
  770. * next_skip_pfn too, but this is a bit simpler.
  771. */
  772. next_skip_pfn += 1UL << cc->order;
  773. }
  774. }
  775. /*
  776. * The PageBuddy() check could have potentially brought us outside
  777. * the range to be scanned.
  778. */
  779. if (unlikely(low_pfn > end_pfn))
  780. low_pfn = end_pfn;
  781. if (locked)
  782. spin_unlock_irqrestore(zone_lru_lock(zone), flags);
  783. /*
  784. * Update the pageblock-skip information and cached scanner pfn,
  785. * if the whole pageblock was scanned without isolating any page.
  786. */
  787. if (low_pfn == end_pfn)
  788. update_pageblock_skip(cc, valid_page, nr_isolated, true);
  789. trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
  790. nr_scanned, nr_isolated);
  791. count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
  792. if (nr_isolated)
  793. count_compact_events(COMPACTISOLATED, nr_isolated);
  794. return low_pfn;
  795. }
  796. /**
  797. * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
  798. * @cc: Compaction control structure.
  799. * @start_pfn: The first PFN to start isolating.
  800. * @end_pfn: The one-past-last PFN.
  801. *
  802. * Returns zero if isolation fails fatally due to e.g. pending signal.
  803. * Otherwise, function returns one-past-the-last PFN of isolated page
  804. * (which may be greater than end_pfn if end fell in a middle of a THP page).
  805. */
  806. unsigned long
  807. isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
  808. unsigned long end_pfn)
  809. {
  810. unsigned long pfn, block_start_pfn, block_end_pfn;
  811. /* Scan block by block. First and last block may be incomplete */
  812. pfn = start_pfn;
  813. block_start_pfn = pageblock_start_pfn(pfn);
  814. if (block_start_pfn < cc->zone->zone_start_pfn)
  815. block_start_pfn = cc->zone->zone_start_pfn;
  816. block_end_pfn = pageblock_end_pfn(pfn);
  817. for (; pfn < end_pfn; pfn = block_end_pfn,
  818. block_start_pfn = block_end_pfn,
  819. block_end_pfn += pageblock_nr_pages) {
  820. block_end_pfn = min(block_end_pfn, end_pfn);
  821. if (!pageblock_pfn_to_page(block_start_pfn,
  822. block_end_pfn, cc->zone))
  823. continue;
  824. pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
  825. ISOLATE_UNEVICTABLE);
  826. if (!pfn)
  827. break;
  828. if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
  829. break;
  830. }
  831. acct_isolated(cc->zone, cc);
  832. return pfn;
  833. }
  834. #endif /* CONFIG_COMPACTION || CONFIG_CMA */
  835. #ifdef CONFIG_COMPACTION
  836. /* Returns true if the page is within a block suitable for migration to */
  837. static bool suitable_migration_target(struct compact_control *cc,
  838. struct page *page)
  839. {
  840. if (cc->ignore_block_suitable)
  841. return true;
  842. /* If the page is a large free page, then disallow migration */
  843. if (PageBuddy(page)) {
  844. /*
  845. * We are checking page_order without zone->lock taken. But
  846. * the only small danger is that we skip a potentially suitable
  847. * pageblock, so it's not worth to check order for valid range.
  848. */
  849. if (page_order_unsafe(page) >= pageblock_order)
  850. return false;
  851. }
  852. /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
  853. if (migrate_async_suitable(get_pageblock_migratetype(page)))
  854. return true;
  855. /* Otherwise skip the block */
  856. return false;
  857. }
  858. /*
  859. * Test whether the free scanner has reached the same or lower pageblock than
  860. * the migration scanner, and compaction should thus terminate.
  861. */
  862. static inline bool compact_scanners_met(struct compact_control *cc)
  863. {
  864. return (cc->free_pfn >> pageblock_order)
  865. <= (cc->migrate_pfn >> pageblock_order);
  866. }
  867. /*
  868. * Based on information in the current compact_control, find blocks
  869. * suitable for isolating free pages from and then isolate them.
  870. */
  871. static void isolate_freepages(struct compact_control *cc)
  872. {
  873. struct zone *zone = cc->zone;
  874. struct page *page;
  875. unsigned long block_start_pfn; /* start of current pageblock */
  876. unsigned long isolate_start_pfn; /* exact pfn we start at */
  877. unsigned long block_end_pfn; /* end of current pageblock */
  878. unsigned long low_pfn; /* lowest pfn scanner is able to scan */
  879. struct list_head *freelist = &cc->freepages;
  880. /*
  881. * Initialise the free scanner. The starting point is where we last
  882. * successfully isolated from, zone-cached value, or the end of the
  883. * zone when isolating for the first time. For looping we also need
  884. * this pfn aligned down to the pageblock boundary, because we do
  885. * block_start_pfn -= pageblock_nr_pages in the for loop.
  886. * For ending point, take care when isolating in last pageblock of a
  887. * a zone which ends in the middle of a pageblock.
  888. * The low boundary is the end of the pageblock the migration scanner
  889. * is using.
  890. */
  891. isolate_start_pfn = cc->free_pfn;
  892. block_start_pfn = pageblock_start_pfn(cc->free_pfn);
  893. block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
  894. zone_end_pfn(zone));
  895. low_pfn = pageblock_end_pfn(cc->migrate_pfn);
  896. /*
  897. * Isolate free pages until enough are available to migrate the
  898. * pages on cc->migratepages. We stop searching if the migrate
  899. * and free page scanners meet or enough free pages are isolated.
  900. */
  901. for (; block_start_pfn >= low_pfn;
  902. block_end_pfn = block_start_pfn,
  903. block_start_pfn -= pageblock_nr_pages,
  904. isolate_start_pfn = block_start_pfn) {
  905. /*
  906. * This can iterate a massively long zone without finding any
  907. * suitable migration targets, so periodically check if we need
  908. * to schedule, or even abort async compaction.
  909. */
  910. if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
  911. && compact_should_abort(cc))
  912. break;
  913. page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
  914. zone);
  915. if (!page)
  916. continue;
  917. /* Check the block is suitable for migration */
  918. if (!suitable_migration_target(cc, page))
  919. continue;
  920. /* If isolation recently failed, do not retry */
  921. if (!isolation_suitable(cc, page))
  922. continue;
  923. /* Found a block suitable for isolating free pages from. */
  924. isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
  925. freelist, false);
  926. /*
  927. * If we isolated enough freepages, or aborted due to lock
  928. * contention, terminate.
  929. */
  930. if ((cc->nr_freepages >= cc->nr_migratepages)
  931. || cc->contended) {
  932. if (isolate_start_pfn >= block_end_pfn) {
  933. /*
  934. * Restart at previous pageblock if more
  935. * freepages can be isolated next time.
  936. */
  937. isolate_start_pfn =
  938. block_start_pfn - pageblock_nr_pages;
  939. }
  940. break;
  941. } else if (isolate_start_pfn < block_end_pfn) {
  942. /*
  943. * If isolation failed early, do not continue
  944. * needlessly.
  945. */
  946. break;
  947. }
  948. }
  949. /* __isolate_free_page() does not map the pages */
  950. map_pages(freelist);
  951. /*
  952. * Record where the free scanner will restart next time. Either we
  953. * broke from the loop and set isolate_start_pfn based on the last
  954. * call to isolate_freepages_block(), or we met the migration scanner
  955. * and the loop terminated due to isolate_start_pfn < low_pfn
  956. */
  957. cc->free_pfn = isolate_start_pfn;
  958. }
  959. /*
  960. * This is a migrate-callback that "allocates" freepages by taking pages
  961. * from the isolated freelists in the block we are migrating to.
  962. */
  963. static struct page *compaction_alloc(struct page *migratepage,
  964. unsigned long data,
  965. int **result)
  966. {
  967. struct compact_control *cc = (struct compact_control *)data;
  968. struct page *freepage;
  969. /*
  970. * Isolate free pages if necessary, and if we are not aborting due to
  971. * contention.
  972. */
  973. if (list_empty(&cc->freepages)) {
  974. if (!cc->contended)
  975. isolate_freepages(cc);
  976. if (list_empty(&cc->freepages))
  977. return NULL;
  978. }
  979. freepage = list_entry(cc->freepages.next, struct page, lru);
  980. list_del(&freepage->lru);
  981. cc->nr_freepages--;
  982. return freepage;
  983. }
  984. /*
  985. * This is a migrate-callback that "frees" freepages back to the isolated
  986. * freelist. All pages on the freelist are from the same zone, so there is no
  987. * special handling needed for NUMA.
  988. */
  989. static void compaction_free(struct page *page, unsigned long data)
  990. {
  991. struct compact_control *cc = (struct compact_control *)data;
  992. list_add(&page->lru, &cc->freepages);
  993. cc->nr_freepages++;
  994. }
  995. /* possible outcome of isolate_migratepages */
  996. typedef enum {
  997. ISOLATE_ABORT, /* Abort compaction now */
  998. ISOLATE_NONE, /* No pages isolated, continue scanning */
  999. ISOLATE_SUCCESS, /* Pages isolated, migrate */
  1000. } isolate_migrate_t;
  1001. /*
  1002. * Allow userspace to control policy on scanning the unevictable LRU for
  1003. * compactable pages.
  1004. */
  1005. int sysctl_compact_unevictable_allowed __read_mostly = 1;
  1006. /*
  1007. * Isolate all pages that can be migrated from the first suitable block,
  1008. * starting at the block pointed to by the migrate scanner pfn within
  1009. * compact_control.
  1010. */
  1011. static isolate_migrate_t isolate_migratepages(struct zone *zone,
  1012. struct compact_control *cc)
  1013. {
  1014. unsigned long block_start_pfn;
  1015. unsigned long block_end_pfn;
  1016. unsigned long low_pfn;
  1017. struct page *page;
  1018. const isolate_mode_t isolate_mode =
  1019. (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
  1020. (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
  1021. /*
  1022. * Start at where we last stopped, or beginning of the zone as
  1023. * initialized by compact_zone()
  1024. */
  1025. low_pfn = cc->migrate_pfn;
  1026. block_start_pfn = pageblock_start_pfn(low_pfn);
  1027. if (block_start_pfn < zone->zone_start_pfn)
  1028. block_start_pfn = zone->zone_start_pfn;
  1029. /* Only scan within a pageblock boundary */
  1030. block_end_pfn = pageblock_end_pfn(low_pfn);
  1031. /*
  1032. * Iterate over whole pageblocks until we find the first suitable.
  1033. * Do not cross the free scanner.
  1034. */
  1035. for (; block_end_pfn <= cc->free_pfn;
  1036. low_pfn = block_end_pfn,
  1037. block_start_pfn = block_end_pfn,
  1038. block_end_pfn += pageblock_nr_pages) {
  1039. /*
  1040. * This can potentially iterate a massively long zone with
  1041. * many pageblocks unsuitable, so periodically check if we
  1042. * need to schedule, or even abort async compaction.
  1043. */
  1044. if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
  1045. && compact_should_abort(cc))
  1046. break;
  1047. page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
  1048. zone);
  1049. if (!page)
  1050. continue;
  1051. /* If isolation recently failed, do not retry */
  1052. if (!isolation_suitable(cc, page))
  1053. continue;
  1054. /*
  1055. * For async compaction, also only scan in MOVABLE blocks.
  1056. * Async compaction is optimistic to see if the minimum amount
  1057. * of work satisfies the allocation.
  1058. */
  1059. if (cc->mode == MIGRATE_ASYNC &&
  1060. !migrate_async_suitable(get_pageblock_migratetype(page)))
  1061. continue;
  1062. /* Perform the isolation */
  1063. low_pfn = isolate_migratepages_block(cc, low_pfn,
  1064. block_end_pfn, isolate_mode);
  1065. if (!low_pfn || cc->contended) {
  1066. acct_isolated(zone, cc);
  1067. return ISOLATE_ABORT;
  1068. }
  1069. /*
  1070. * Either we isolated something and proceed with migration. Or
  1071. * we failed and compact_zone should decide if we should
  1072. * continue or not.
  1073. */
  1074. break;
  1075. }
  1076. acct_isolated(zone, cc);
  1077. /* Record where migration scanner will be restarted. */
  1078. cc->migrate_pfn = low_pfn;
  1079. return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
  1080. }
  1081. /*
  1082. * order == -1 is expected when compacting via
  1083. * /proc/sys/vm/compact_memory
  1084. */
  1085. static inline bool is_via_compact_memory(int order)
  1086. {
  1087. return order == -1;
  1088. }
  1089. static enum compact_result __compact_finished(struct zone *zone, struct compact_control *cc,
  1090. const int migratetype)
  1091. {
  1092. unsigned int order;
  1093. unsigned long watermark;
  1094. if (cc->contended || fatal_signal_pending(current))
  1095. return COMPACT_CONTENDED;
  1096. /* Compaction run completes if the migrate and free scanner meet */
  1097. if (compact_scanners_met(cc)) {
  1098. /* Let the next compaction start anew. */
  1099. reset_cached_positions(zone);
  1100. /*
  1101. * Mark that the PG_migrate_skip information should be cleared
  1102. * by kswapd when it goes to sleep. kcompactd does not set the
  1103. * flag itself as the decision to be clear should be directly
  1104. * based on an allocation request.
  1105. */
  1106. if (cc->direct_compaction)
  1107. zone->compact_blockskip_flush = true;
  1108. if (cc->whole_zone)
  1109. return COMPACT_COMPLETE;
  1110. else
  1111. return COMPACT_PARTIAL_SKIPPED;
  1112. }
  1113. if (is_via_compact_memory(cc->order))
  1114. return COMPACT_CONTINUE;
  1115. /* Compaction run is not finished if the watermark is not met */
  1116. watermark = zone->watermark[cc->alloc_flags & ALLOC_WMARK_MASK];
  1117. if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx,
  1118. cc->alloc_flags))
  1119. return COMPACT_CONTINUE;
  1120. /* Direct compactor: Is a suitable page free? */
  1121. for (order = cc->order; order < MAX_ORDER; order++) {
  1122. struct free_area *area = &zone->free_area[order];
  1123. bool can_steal;
  1124. /* Job done if page is free of the right migratetype */
  1125. if (!list_empty(&area->free_list[migratetype]))
  1126. return COMPACT_SUCCESS;
  1127. #ifdef CONFIG_CMA
  1128. /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
  1129. if (migratetype == MIGRATE_MOVABLE &&
  1130. !list_empty(&area->free_list[MIGRATE_CMA]))
  1131. return COMPACT_SUCCESS;
  1132. #endif
  1133. /*
  1134. * Job done if allocation would steal freepages from
  1135. * other migratetype buddy lists.
  1136. */
  1137. if (find_suitable_fallback(area, order, migratetype,
  1138. true, &can_steal) != -1)
  1139. return COMPACT_SUCCESS;
  1140. }
  1141. return COMPACT_NO_SUITABLE_PAGE;
  1142. }
  1143. static enum compact_result compact_finished(struct zone *zone,
  1144. struct compact_control *cc,
  1145. const int migratetype)
  1146. {
  1147. int ret;
  1148. ret = __compact_finished(zone, cc, migratetype);
  1149. trace_mm_compaction_finished(zone, cc->order, ret);
  1150. if (ret == COMPACT_NO_SUITABLE_PAGE)
  1151. ret = COMPACT_CONTINUE;
  1152. return ret;
  1153. }
  1154. /*
  1155. * compaction_suitable: Is this suitable to run compaction on this zone now?
  1156. * Returns
  1157. * COMPACT_SKIPPED - If there are too few free pages for compaction
  1158. * COMPACT_SUCCESS - If the allocation would succeed without compaction
  1159. * COMPACT_CONTINUE - If compaction should run now
  1160. */
  1161. static enum compact_result __compaction_suitable(struct zone *zone, int order,
  1162. unsigned int alloc_flags,
  1163. int classzone_idx,
  1164. unsigned long wmark_target)
  1165. {
  1166. unsigned long watermark;
  1167. if (is_via_compact_memory(order))
  1168. return COMPACT_CONTINUE;
  1169. watermark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
  1170. /*
  1171. * If watermarks for high-order allocation are already met, there
  1172. * should be no need for compaction at all.
  1173. */
  1174. if (zone_watermark_ok(zone, order, watermark, classzone_idx,
  1175. alloc_flags))
  1176. return COMPACT_SUCCESS;
  1177. /*
  1178. * Watermarks for order-0 must be met for compaction to be able to
  1179. * isolate free pages for migration targets. This means that the
  1180. * watermark and alloc_flags have to match, or be more pessimistic than
  1181. * the check in __isolate_free_page(). We don't use the direct
  1182. * compactor's alloc_flags, as they are not relevant for freepage
  1183. * isolation. We however do use the direct compactor's classzone_idx to
  1184. * skip over zones where lowmem reserves would prevent allocation even
  1185. * if compaction succeeds.
  1186. * For costly orders, we require low watermark instead of min for
  1187. * compaction to proceed to increase its chances.
  1188. * ALLOC_CMA is used, as pages in CMA pageblocks are considered
  1189. * suitable migration targets
  1190. */
  1191. watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
  1192. low_wmark_pages(zone) : min_wmark_pages(zone);
  1193. watermark += compact_gap(order);
  1194. if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx,
  1195. ALLOC_CMA, wmark_target))
  1196. return COMPACT_SKIPPED;
  1197. return COMPACT_CONTINUE;
  1198. }
  1199. enum compact_result compaction_suitable(struct zone *zone, int order,
  1200. unsigned int alloc_flags,
  1201. int classzone_idx)
  1202. {
  1203. enum compact_result ret;
  1204. int fragindex;
  1205. ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx,
  1206. zone_page_state(zone, NR_FREE_PAGES));
  1207. /*
  1208. * fragmentation index determines if allocation failures are due to
  1209. * low memory or external fragmentation
  1210. *
  1211. * index of -1000 would imply allocations might succeed depending on
  1212. * watermarks, but we already failed the high-order watermark check
  1213. * index towards 0 implies failure is due to lack of memory
  1214. * index towards 1000 implies failure is due to fragmentation
  1215. *
  1216. * Only compact if a failure would be due to fragmentation. Also
  1217. * ignore fragindex for non-costly orders where the alternative to
  1218. * a successful reclaim/compaction is OOM. Fragindex and the
  1219. * vm.extfrag_threshold sysctl is meant as a heuristic to prevent
  1220. * excessive compaction for costly orders, but it should not be at the
  1221. * expense of system stability.
  1222. */
  1223. if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) {
  1224. fragindex = fragmentation_index(zone, order);
  1225. if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
  1226. ret = COMPACT_NOT_SUITABLE_ZONE;
  1227. }
  1228. trace_mm_compaction_suitable(zone, order, ret);
  1229. if (ret == COMPACT_NOT_SUITABLE_ZONE)
  1230. ret = COMPACT_SKIPPED;
  1231. return ret;
  1232. }
  1233. bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
  1234. int alloc_flags)
  1235. {
  1236. struct zone *zone;
  1237. struct zoneref *z;
  1238. /*
  1239. * Make sure at least one zone would pass __compaction_suitable if we continue
  1240. * retrying the reclaim.
  1241. */
  1242. for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
  1243. ac->nodemask) {
  1244. unsigned long available;
  1245. enum compact_result compact_result;
  1246. /*
  1247. * Do not consider all the reclaimable memory because we do not
  1248. * want to trash just for a single high order allocation which
  1249. * is even not guaranteed to appear even if __compaction_suitable
  1250. * is happy about the watermark check.
  1251. */
  1252. available = zone_reclaimable_pages(zone) / order;
  1253. available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
  1254. compact_result = __compaction_suitable(zone, order, alloc_flags,
  1255. ac_classzone_idx(ac), available);
  1256. if (compact_result != COMPACT_SKIPPED)
  1257. return true;
  1258. }
  1259. return false;
  1260. }
  1261. static enum compact_result compact_zone(struct zone *zone, struct compact_control *cc)
  1262. {
  1263. enum compact_result ret;
  1264. unsigned long start_pfn = zone->zone_start_pfn;
  1265. unsigned long end_pfn = zone_end_pfn(zone);
  1266. const int migratetype = gfpflags_to_migratetype(cc->gfp_mask);
  1267. const bool sync = cc->mode != MIGRATE_ASYNC;
  1268. ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
  1269. cc->classzone_idx);
  1270. /* Compaction is likely to fail */
  1271. if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED)
  1272. return ret;
  1273. /* huh, compaction_suitable is returning something unexpected */
  1274. VM_BUG_ON(ret != COMPACT_CONTINUE);
  1275. /*
  1276. * Clear pageblock skip if there were failures recently and compaction
  1277. * is about to be retried after being deferred.
  1278. */
  1279. if (compaction_restarting(zone, cc->order))
  1280. __reset_isolation_suitable(zone);
  1281. /*
  1282. * Setup to move all movable pages to the end of the zone. Used cached
  1283. * information on where the scanners should start (unless we explicitly
  1284. * want to compact the whole zone), but check that it is initialised
  1285. * by ensuring the values are within zone boundaries.
  1286. */
  1287. if (cc->whole_zone) {
  1288. cc->migrate_pfn = start_pfn;
  1289. cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
  1290. } else {
  1291. cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
  1292. cc->free_pfn = zone->compact_cached_free_pfn;
  1293. if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
  1294. cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
  1295. zone->compact_cached_free_pfn = cc->free_pfn;
  1296. }
  1297. if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
  1298. cc->migrate_pfn = start_pfn;
  1299. zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
  1300. zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
  1301. }
  1302. if (cc->migrate_pfn == start_pfn)
  1303. cc->whole_zone = true;
  1304. }
  1305. cc->last_migrated_pfn = 0;
  1306. trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
  1307. cc->free_pfn, end_pfn, sync);
  1308. migrate_prep_local();
  1309. while ((ret = compact_finished(zone, cc, migratetype)) ==
  1310. COMPACT_CONTINUE) {
  1311. int err;
  1312. switch (isolate_migratepages(zone, cc)) {
  1313. case ISOLATE_ABORT:
  1314. ret = COMPACT_CONTENDED;
  1315. putback_movable_pages(&cc->migratepages);
  1316. cc->nr_migratepages = 0;
  1317. goto out;
  1318. case ISOLATE_NONE:
  1319. /*
  1320. * We haven't isolated and migrated anything, but
  1321. * there might still be unflushed migrations from
  1322. * previous cc->order aligned block.
  1323. */
  1324. goto check_drain;
  1325. case ISOLATE_SUCCESS:
  1326. ;
  1327. }
  1328. err = migrate_pages(&cc->migratepages, compaction_alloc,
  1329. compaction_free, (unsigned long)cc, cc->mode,
  1330. MR_COMPACTION);
  1331. trace_mm_compaction_migratepages(cc->nr_migratepages, err,
  1332. &cc->migratepages);
  1333. /* All pages were either migrated or will be released */
  1334. cc->nr_migratepages = 0;
  1335. if (err) {
  1336. putback_movable_pages(&cc->migratepages);
  1337. /*
  1338. * migrate_pages() may return -ENOMEM when scanners meet
  1339. * and we want compact_finished() to detect it
  1340. */
  1341. if (err == -ENOMEM && !compact_scanners_met(cc)) {
  1342. ret = COMPACT_CONTENDED;
  1343. goto out;
  1344. }
  1345. /*
  1346. * We failed to migrate at least one page in the current
  1347. * order-aligned block, so skip the rest of it.
  1348. */
  1349. if (cc->direct_compaction &&
  1350. (cc->mode == MIGRATE_ASYNC)) {
  1351. cc->migrate_pfn = block_end_pfn(
  1352. cc->migrate_pfn - 1, cc->order);
  1353. /* Draining pcplists is useless in this case */
  1354. cc->last_migrated_pfn = 0;
  1355. }
  1356. }
  1357. check_drain:
  1358. /*
  1359. * Has the migration scanner moved away from the previous
  1360. * cc->order aligned block where we migrated from? If yes,
  1361. * flush the pages that were freed, so that they can merge and
  1362. * compact_finished() can detect immediately if allocation
  1363. * would succeed.
  1364. */
  1365. if (cc->order > 0 && cc->last_migrated_pfn) {
  1366. int cpu;
  1367. unsigned long current_block_start =
  1368. block_start_pfn(cc->migrate_pfn, cc->order);
  1369. if (cc->last_migrated_pfn < current_block_start) {
  1370. cpu = get_cpu();
  1371. lru_add_drain_cpu(cpu);
  1372. drain_local_pages(zone);
  1373. put_cpu();
  1374. /* No more flushing until we migrate again */
  1375. cc->last_migrated_pfn = 0;
  1376. }
  1377. }
  1378. }
  1379. out:
  1380. /*
  1381. * Release free pages and update where the free scanner should restart,
  1382. * so we don't leave any returned pages behind in the next attempt.
  1383. */
  1384. if (cc->nr_freepages > 0) {
  1385. unsigned long free_pfn = release_freepages(&cc->freepages);
  1386. cc->nr_freepages = 0;
  1387. VM_BUG_ON(free_pfn == 0);
  1388. /* The cached pfn is always the first in a pageblock */
  1389. free_pfn = pageblock_start_pfn(free_pfn);
  1390. /*
  1391. * Only go back, not forward. The cached pfn might have been
  1392. * already reset to zone end in compact_finished()
  1393. */
  1394. if (free_pfn > zone->compact_cached_free_pfn)
  1395. zone->compact_cached_free_pfn = free_pfn;
  1396. }
  1397. trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
  1398. cc->free_pfn, end_pfn, sync, ret);
  1399. return ret;
  1400. }
  1401. static enum compact_result compact_zone_order(struct zone *zone, int order,
  1402. gfp_t gfp_mask, enum compact_priority prio,
  1403. unsigned int alloc_flags, int classzone_idx)
  1404. {
  1405. enum compact_result ret;
  1406. struct compact_control cc = {
  1407. .nr_freepages = 0,
  1408. .nr_migratepages = 0,
  1409. .order = order,
  1410. .gfp_mask = gfp_mask,
  1411. .zone = zone,
  1412. .mode = (prio == COMPACT_PRIO_ASYNC) ?
  1413. MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT,
  1414. .alloc_flags = alloc_flags,
  1415. .classzone_idx = classzone_idx,
  1416. .direct_compaction = true,
  1417. .whole_zone = (prio == MIN_COMPACT_PRIORITY),
  1418. .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY),
  1419. .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY)
  1420. };
  1421. INIT_LIST_HEAD(&cc.freepages);
  1422. INIT_LIST_HEAD(&cc.migratepages);
  1423. ret = compact_zone(zone, &cc);
  1424. VM_BUG_ON(!list_empty(&cc.freepages));
  1425. VM_BUG_ON(!list_empty(&cc.migratepages));
  1426. return ret;
  1427. }
  1428. int sysctl_extfrag_threshold = 500;
  1429. /**
  1430. * try_to_compact_pages - Direct compact to satisfy a high-order allocation
  1431. * @gfp_mask: The GFP mask of the current allocation
  1432. * @order: The order of the current allocation
  1433. * @alloc_flags: The allocation flags of the current allocation
  1434. * @ac: The context of current allocation
  1435. * @mode: The migration mode for async, sync light, or sync migration
  1436. *
  1437. * This is the main entry point for direct page compaction.
  1438. */
  1439. enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
  1440. unsigned int alloc_flags, const struct alloc_context *ac,
  1441. enum compact_priority prio)
  1442. {
  1443. int may_enter_fs = gfp_mask & __GFP_FS;
  1444. int may_perform_io = gfp_mask & __GFP_IO;
  1445. struct zoneref *z;
  1446. struct zone *zone;
  1447. enum compact_result rc = COMPACT_SKIPPED;
  1448. /* Check if the GFP flags allow compaction */
  1449. if (!may_enter_fs || !may_perform_io)
  1450. return COMPACT_SKIPPED;
  1451. trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
  1452. /* Compact each zone in the list */
  1453. for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
  1454. ac->nodemask) {
  1455. enum compact_result status;
  1456. if (prio > MIN_COMPACT_PRIORITY
  1457. && compaction_deferred(zone, order)) {
  1458. rc = max_t(enum compact_result, COMPACT_DEFERRED, rc);
  1459. continue;
  1460. }
  1461. status = compact_zone_order(zone, order, gfp_mask, prio,
  1462. alloc_flags, ac_classzone_idx(ac));
  1463. rc = max(status, rc);
  1464. /* The allocation should succeed, stop compacting */
  1465. if (status == COMPACT_SUCCESS) {
  1466. /*
  1467. * We think the allocation will succeed in this zone,
  1468. * but it is not certain, hence the false. The caller
  1469. * will repeat this with true if allocation indeed
  1470. * succeeds in this zone.
  1471. */
  1472. compaction_defer_reset(zone, order, false);
  1473. break;
  1474. }
  1475. if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE ||
  1476. status == COMPACT_PARTIAL_SKIPPED))
  1477. /*
  1478. * We think that allocation won't succeed in this zone
  1479. * so we defer compaction there. If it ends up
  1480. * succeeding after all, it will be reset.
  1481. */
  1482. defer_compaction(zone, order);
  1483. /*
  1484. * We might have stopped compacting due to need_resched() in
  1485. * async compaction, or due to a fatal signal detected. In that
  1486. * case do not try further zones
  1487. */
  1488. if ((prio == COMPACT_PRIO_ASYNC && need_resched())
  1489. || fatal_signal_pending(current))
  1490. break;
  1491. }
  1492. return rc;
  1493. }
  1494. /* Compact all zones within a node */
  1495. static void compact_node(int nid)
  1496. {
  1497. pg_data_t *pgdat = NODE_DATA(nid);
  1498. int zoneid;
  1499. struct zone *zone;
  1500. struct compact_control cc = {
  1501. .order = -1,
  1502. .mode = MIGRATE_SYNC,
  1503. .ignore_skip_hint = true,
  1504. .whole_zone = true,
  1505. };
  1506. for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
  1507. zone = &pgdat->node_zones[zoneid];
  1508. if (!populated_zone(zone))
  1509. continue;
  1510. cc.nr_freepages = 0;
  1511. cc.nr_migratepages = 0;
  1512. cc.zone = zone;
  1513. INIT_LIST_HEAD(&cc.freepages);
  1514. INIT_LIST_HEAD(&cc.migratepages);
  1515. compact_zone(zone, &cc);
  1516. VM_BUG_ON(!list_empty(&cc.freepages));
  1517. VM_BUG_ON(!list_empty(&cc.migratepages));
  1518. }
  1519. }
  1520. /* Compact all nodes in the system */
  1521. static void compact_nodes(void)
  1522. {
  1523. int nid;
  1524. /* Flush pending updates to the LRU lists */
  1525. lru_add_drain_all();
  1526. for_each_online_node(nid)
  1527. compact_node(nid);
  1528. }
  1529. /* The written value is actually unused, all memory is compacted */
  1530. int sysctl_compact_memory;
  1531. /*
  1532. * This is the entry point for compacting all nodes via
  1533. * /proc/sys/vm/compact_memory
  1534. */
  1535. int sysctl_compaction_handler(struct ctl_table *table, int write,
  1536. void __user *buffer, size_t *length, loff_t *ppos)
  1537. {
  1538. if (write)
  1539. compact_nodes();
  1540. return 0;
  1541. }
  1542. int sysctl_extfrag_handler(struct ctl_table *table, int write,
  1543. void __user *buffer, size_t *length, loff_t *ppos)
  1544. {
  1545. proc_dointvec_minmax(table, write, buffer, length, ppos);
  1546. return 0;
  1547. }
  1548. #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
  1549. static ssize_t sysfs_compact_node(struct device *dev,
  1550. struct device_attribute *attr,
  1551. const char *buf, size_t count)
  1552. {
  1553. int nid = dev->id;
  1554. if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
  1555. /* Flush pending updates to the LRU lists */
  1556. lru_add_drain_all();
  1557. compact_node(nid);
  1558. }
  1559. return count;
  1560. }
  1561. static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
  1562. int compaction_register_node(struct node *node)
  1563. {
  1564. return device_create_file(&node->dev, &dev_attr_compact);
  1565. }
  1566. void compaction_unregister_node(struct node *node)
  1567. {
  1568. return device_remove_file(&node->dev, &dev_attr_compact);
  1569. }
  1570. #endif /* CONFIG_SYSFS && CONFIG_NUMA */
  1571. static inline bool kcompactd_work_requested(pg_data_t *pgdat)
  1572. {
  1573. return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
  1574. }
  1575. static bool kcompactd_node_suitable(pg_data_t *pgdat)
  1576. {
  1577. int zoneid;
  1578. struct zone *zone;
  1579. enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx;
  1580. for (zoneid = 0; zoneid <= classzone_idx; zoneid++) {
  1581. zone = &pgdat->node_zones[zoneid];
  1582. if (!populated_zone(zone))
  1583. continue;
  1584. if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
  1585. classzone_idx) == COMPACT_CONTINUE)
  1586. return true;
  1587. }
  1588. return false;
  1589. }
  1590. static void kcompactd_do_work(pg_data_t *pgdat)
  1591. {
  1592. /*
  1593. * With no special task, compact all zones so that a page of requested
  1594. * order is allocatable.
  1595. */
  1596. int zoneid;
  1597. struct zone *zone;
  1598. struct compact_control cc = {
  1599. .order = pgdat->kcompactd_max_order,
  1600. .classzone_idx = pgdat->kcompactd_classzone_idx,
  1601. .mode = MIGRATE_SYNC_LIGHT,
  1602. .ignore_skip_hint = true,
  1603. };
  1604. trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
  1605. cc.classzone_idx);
  1606. count_vm_event(KCOMPACTD_WAKE);
  1607. for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) {
  1608. int status;
  1609. zone = &pgdat->node_zones[zoneid];
  1610. if (!populated_zone(zone))
  1611. continue;
  1612. if (compaction_deferred(zone, cc.order))
  1613. continue;
  1614. if (compaction_suitable(zone, cc.order, 0, zoneid) !=
  1615. COMPACT_CONTINUE)
  1616. continue;
  1617. cc.nr_freepages = 0;
  1618. cc.nr_migratepages = 0;
  1619. cc.zone = zone;
  1620. INIT_LIST_HEAD(&cc.freepages);
  1621. INIT_LIST_HEAD(&cc.migratepages);
  1622. if (kthread_should_stop())
  1623. return;
  1624. status = compact_zone(zone, &cc);
  1625. if (status == COMPACT_SUCCESS) {
  1626. compaction_defer_reset(zone, cc.order, false);
  1627. } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
  1628. /*
  1629. * We use sync migration mode here, so we defer like
  1630. * sync direct compaction does.
  1631. */
  1632. defer_compaction(zone, cc.order);
  1633. }
  1634. VM_BUG_ON(!list_empty(&cc.freepages));
  1635. VM_BUG_ON(!list_empty(&cc.migratepages));
  1636. }
  1637. /*
  1638. * Regardless of success, we are done until woken up next. But remember
  1639. * the requested order/classzone_idx in case it was higher/tighter than
  1640. * our current ones
  1641. */
  1642. if (pgdat->kcompactd_max_order <= cc.order)
  1643. pgdat->kcompactd_max_order = 0;
  1644. if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx)
  1645. pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
  1646. }
  1647. void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
  1648. {
  1649. if (!order)
  1650. return;
  1651. if (pgdat->kcompactd_max_order < order)
  1652. pgdat->kcompactd_max_order = order;
  1653. if (pgdat->kcompactd_classzone_idx > classzone_idx)
  1654. pgdat->kcompactd_classzone_idx = classzone_idx;
  1655. if (!waitqueue_active(&pgdat->kcompactd_wait))
  1656. return;
  1657. if (!kcompactd_node_suitable(pgdat))
  1658. return;
  1659. trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
  1660. classzone_idx);
  1661. wake_up_interruptible(&pgdat->kcompactd_wait);
  1662. }
  1663. /*
  1664. * The background compaction daemon, started as a kernel thread
  1665. * from the init process.
  1666. */
  1667. static int kcompactd(void *p)
  1668. {
  1669. pg_data_t *pgdat = (pg_data_t*)p;
  1670. struct task_struct *tsk = current;
  1671. const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
  1672. if (!cpumask_empty(cpumask))
  1673. set_cpus_allowed_ptr(tsk, cpumask);
  1674. set_freezable();
  1675. pgdat->kcompactd_max_order = 0;
  1676. pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
  1677. while (!kthread_should_stop()) {
  1678. trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
  1679. wait_event_freezable(pgdat->kcompactd_wait,
  1680. kcompactd_work_requested(pgdat));
  1681. kcompactd_do_work(pgdat);
  1682. }
  1683. return 0;
  1684. }
  1685. /*
  1686. * This kcompactd start function will be called by init and node-hot-add.
  1687. * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
  1688. */
  1689. int kcompactd_run(int nid)
  1690. {
  1691. pg_data_t *pgdat = NODE_DATA(nid);
  1692. int ret = 0;
  1693. if (pgdat->kcompactd)
  1694. return 0;
  1695. pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
  1696. if (IS_ERR(pgdat->kcompactd)) {
  1697. pr_err("Failed to start kcompactd on node %d\n", nid);
  1698. ret = PTR_ERR(pgdat->kcompactd);
  1699. pgdat->kcompactd = NULL;
  1700. }
  1701. return ret;
  1702. }
  1703. /*
  1704. * Called by memory hotplug when all memory in a node is offlined. Caller must
  1705. * hold mem_hotplug_begin/end().
  1706. */
  1707. void kcompactd_stop(int nid)
  1708. {
  1709. struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
  1710. if (kcompactd) {
  1711. kthread_stop(kcompactd);
  1712. NODE_DATA(nid)->kcompactd = NULL;
  1713. }
  1714. }
  1715. /*
  1716. * It's optimal to keep kcompactd on the same CPUs as their memory, but
  1717. * not required for correctness. So if the last cpu in a node goes
  1718. * away, we get changed to run anywhere: as the first one comes back,
  1719. * restore their cpu bindings.
  1720. */
  1721. static int cpu_callback(struct notifier_block *nfb, unsigned long action,
  1722. void *hcpu)
  1723. {
  1724. int nid;
  1725. if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
  1726. for_each_node_state(nid, N_MEMORY) {
  1727. pg_data_t *pgdat = NODE_DATA(nid);
  1728. const struct cpumask *mask;
  1729. mask = cpumask_of_node(pgdat->node_id);
  1730. if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
  1731. /* One of our CPUs online: restore mask */
  1732. set_cpus_allowed_ptr(pgdat->kcompactd, mask);
  1733. }
  1734. }
  1735. return NOTIFY_OK;
  1736. }
  1737. static int __init kcompactd_init(void)
  1738. {
  1739. int nid;
  1740. for_each_node_state(nid, N_MEMORY)
  1741. kcompactd_run(nid);
  1742. hotcpu_notifier(cpu_callback, 0);
  1743. return 0;
  1744. }
  1745. subsys_initcall(kcompactd_init)
  1746. #endif /* CONFIG_COMPACTION */