compaction.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291
  1. /*
  2. * linux/mm/compaction.c
  3. *
  4. * Memory compaction for the reduction of external fragmentation. Note that
  5. * this heavily depends upon page migration to do all the real heavy
  6. * lifting
  7. *
  8. * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
  9. */
  10. #include <linux/swap.h>
  11. #include <linux/migrate.h>
  12. #include <linux/compaction.h>
  13. #include <linux/mm_inline.h>
  14. #include <linux/backing-dev.h>
  15. #include <linux/sysctl.h>
  16. #include <linux/sysfs.h>
  17. #include <linux/balloon_compaction.h>
  18. #include <linux/page-isolation.h>
  19. #include "internal.h"
  20. #ifdef CONFIG_COMPACTION
  21. static inline void count_compact_event(enum vm_event_item item)
  22. {
  23. count_vm_event(item);
  24. }
  25. static inline void count_compact_events(enum vm_event_item item, long delta)
  26. {
  27. count_vm_events(item, delta);
  28. }
  29. #else
  30. #define count_compact_event(item) do { } while (0)
  31. #define count_compact_events(item, delta) do { } while (0)
  32. #endif
  33. #if defined CONFIG_COMPACTION || defined CONFIG_CMA
  34. #define CREATE_TRACE_POINTS
  35. #include <trace/events/compaction.h>
  36. static unsigned long release_freepages(struct list_head *freelist)
  37. {
  38. struct page *page, *next;
  39. unsigned long count = 0;
  40. list_for_each_entry_safe(page, next, freelist, lru) {
  41. list_del(&page->lru);
  42. __free_page(page);
  43. count++;
  44. }
  45. return count;
  46. }
  47. static void map_pages(struct list_head *list)
  48. {
  49. struct page *page;
  50. list_for_each_entry(page, list, lru) {
  51. arch_alloc_page(page, 0);
  52. kernel_map_pages(page, 1, 1);
  53. }
  54. }
  55. static inline bool migrate_async_suitable(int migratetype)
  56. {
  57. return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
  58. }
  59. #ifdef CONFIG_COMPACTION
  60. /* Returns true if the pageblock should be scanned for pages to isolate. */
  61. static inline bool isolation_suitable(struct compact_control *cc,
  62. struct page *page)
  63. {
  64. if (cc->ignore_skip_hint)
  65. return true;
  66. return !get_pageblock_skip(page);
  67. }
  68. /*
  69. * This function is called to clear all cached information on pageblocks that
  70. * should be skipped for page isolation when the migrate and free page scanner
  71. * meet.
  72. */
  73. static void __reset_isolation_suitable(struct zone *zone)
  74. {
  75. unsigned long start_pfn = zone->zone_start_pfn;
  76. unsigned long end_pfn = zone_end_pfn(zone);
  77. unsigned long pfn;
  78. zone->compact_cached_migrate_pfn[0] = start_pfn;
  79. zone->compact_cached_migrate_pfn[1] = start_pfn;
  80. zone->compact_cached_free_pfn = end_pfn;
  81. zone->compact_blockskip_flush = false;
  82. /* Walk the zone and mark every pageblock as suitable for isolation */
  83. for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
  84. struct page *page;
  85. cond_resched();
  86. if (!pfn_valid(pfn))
  87. continue;
  88. page = pfn_to_page(pfn);
  89. if (zone != page_zone(page))
  90. continue;
  91. clear_pageblock_skip(page);
  92. }
  93. }
  94. void reset_isolation_suitable(pg_data_t *pgdat)
  95. {
  96. int zoneid;
  97. for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
  98. struct zone *zone = &pgdat->node_zones[zoneid];
  99. if (!populated_zone(zone))
  100. continue;
  101. /* Only flush if a full compaction finished recently */
  102. if (zone->compact_blockskip_flush)
  103. __reset_isolation_suitable(zone);
  104. }
  105. }
  106. /*
  107. * If no pages were isolated then mark this pageblock to be skipped in the
  108. * future. The information is later cleared by __reset_isolation_suitable().
  109. */
  110. static void update_pageblock_skip(struct compact_control *cc,
  111. struct page *page, unsigned long nr_isolated,
  112. bool set_unsuitable, bool migrate_scanner)
  113. {
  114. struct zone *zone = cc->zone;
  115. unsigned long pfn;
  116. if (cc->ignore_skip_hint)
  117. return;
  118. if (!page)
  119. return;
  120. if (nr_isolated)
  121. return;
  122. /*
  123. * Only skip pageblocks when all forms of compaction will be known to
  124. * fail in the near future.
  125. */
  126. if (set_unsuitable)
  127. set_pageblock_skip(page);
  128. pfn = page_to_pfn(page);
  129. /* Update where async and sync compaction should restart */
  130. if (migrate_scanner) {
  131. if (cc->finished_update_migrate)
  132. return;
  133. if (pfn > zone->compact_cached_migrate_pfn[0])
  134. zone->compact_cached_migrate_pfn[0] = pfn;
  135. if (cc->mode != MIGRATE_ASYNC &&
  136. pfn > zone->compact_cached_migrate_pfn[1])
  137. zone->compact_cached_migrate_pfn[1] = pfn;
  138. } else {
  139. if (cc->finished_update_free)
  140. return;
  141. if (pfn < zone->compact_cached_free_pfn)
  142. zone->compact_cached_free_pfn = pfn;
  143. }
  144. }
  145. #else
  146. static inline bool isolation_suitable(struct compact_control *cc,
  147. struct page *page)
  148. {
  149. return true;
  150. }
  151. static void update_pageblock_skip(struct compact_control *cc,
  152. struct page *page, unsigned long nr_isolated,
  153. bool set_unsuitable, bool migrate_scanner)
  154. {
  155. }
  156. #endif /* CONFIG_COMPACTION */
  157. static inline bool should_release_lock(spinlock_t *lock)
  158. {
  159. return need_resched() || spin_is_contended(lock);
  160. }
  161. /*
  162. * Compaction requires the taking of some coarse locks that are potentially
  163. * very heavily contended. Check if the process needs to be scheduled or
  164. * if the lock is contended. For async compaction, back out in the event
  165. * if contention is severe. For sync compaction, schedule.
  166. *
  167. * Returns true if the lock is held.
  168. * Returns false if the lock is released and compaction should abort
  169. */
  170. static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
  171. bool locked, struct compact_control *cc)
  172. {
  173. if (should_release_lock(lock)) {
  174. if (locked) {
  175. spin_unlock_irqrestore(lock, *flags);
  176. locked = false;
  177. }
  178. /* async aborts if taking too long or contended */
  179. if (cc->mode == MIGRATE_ASYNC) {
  180. cc->contended = true;
  181. return false;
  182. }
  183. cond_resched();
  184. }
  185. if (!locked)
  186. spin_lock_irqsave(lock, *flags);
  187. return true;
  188. }
  189. /*
  190. * Aside from avoiding lock contention, compaction also periodically checks
  191. * need_resched() and either schedules in sync compaction or aborts async
  192. * compaction. This is similar to what compact_checklock_irqsave() does, but
  193. * is used where no lock is concerned.
  194. *
  195. * Returns false when no scheduling was needed, or sync compaction scheduled.
  196. * Returns true when async compaction should abort.
  197. */
  198. static inline bool compact_should_abort(struct compact_control *cc)
  199. {
  200. /* async compaction aborts if contended */
  201. if (need_resched()) {
  202. if (cc->mode == MIGRATE_ASYNC) {
  203. cc->contended = true;
  204. return true;
  205. }
  206. cond_resched();
  207. }
  208. return false;
  209. }
  210. /* Returns true if the page is within a block suitable for migration to */
  211. static bool suitable_migration_target(struct page *page)
  212. {
  213. /* If the page is a large free page, then disallow migration */
  214. if (PageBuddy(page) && page_order(page) >= pageblock_order)
  215. return false;
  216. /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
  217. if (migrate_async_suitable(get_pageblock_migratetype(page)))
  218. return true;
  219. /* Otherwise skip the block */
  220. return false;
  221. }
  222. /*
  223. * Isolate free pages onto a private freelist. If @strict is true, will abort
  224. * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
  225. * (even though it may still end up isolating some pages).
  226. */
  227. static unsigned long isolate_freepages_block(struct compact_control *cc,
  228. unsigned long blockpfn,
  229. unsigned long end_pfn,
  230. struct list_head *freelist,
  231. bool strict)
  232. {
  233. int nr_scanned = 0, total_isolated = 0;
  234. struct page *cursor, *valid_page = NULL;
  235. unsigned long flags;
  236. bool locked = false;
  237. bool checked_pageblock = false;
  238. cursor = pfn_to_page(blockpfn);
  239. /* Isolate free pages. */
  240. for (; blockpfn < end_pfn; blockpfn++, cursor++) {
  241. int isolated, i;
  242. struct page *page = cursor;
  243. nr_scanned++;
  244. if (!pfn_valid_within(blockpfn))
  245. goto isolate_fail;
  246. if (!valid_page)
  247. valid_page = page;
  248. if (!PageBuddy(page))
  249. goto isolate_fail;
  250. /*
  251. * The zone lock must be held to isolate freepages.
  252. * Unfortunately this is a very coarse lock and can be
  253. * heavily contended if there are parallel allocations
  254. * or parallel compactions. For async compaction do not
  255. * spin on the lock and we acquire the lock as late as
  256. * possible.
  257. */
  258. locked = compact_checklock_irqsave(&cc->zone->lock, &flags,
  259. locked, cc);
  260. if (!locked)
  261. break;
  262. /* Recheck this is a suitable migration target under lock */
  263. if (!strict && !checked_pageblock) {
  264. /*
  265. * We need to check suitability of pageblock only once
  266. * and this isolate_freepages_block() is called with
  267. * pageblock range, so just check once is sufficient.
  268. */
  269. checked_pageblock = true;
  270. if (!suitable_migration_target(page))
  271. break;
  272. }
  273. /* Recheck this is a buddy page under lock */
  274. if (!PageBuddy(page))
  275. goto isolate_fail;
  276. /* Found a free page, break it into order-0 pages */
  277. isolated = split_free_page(page);
  278. total_isolated += isolated;
  279. for (i = 0; i < isolated; i++) {
  280. list_add(&page->lru, freelist);
  281. page++;
  282. }
  283. /* If a page was split, advance to the end of it */
  284. if (isolated) {
  285. blockpfn += isolated - 1;
  286. cursor += isolated - 1;
  287. continue;
  288. }
  289. isolate_fail:
  290. if (strict)
  291. break;
  292. else
  293. continue;
  294. }
  295. trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
  296. /*
  297. * If strict isolation is requested by CMA then check that all the
  298. * pages requested were isolated. If there were any failures, 0 is
  299. * returned and CMA will fail.
  300. */
  301. if (strict && blockpfn < end_pfn)
  302. total_isolated = 0;
  303. if (locked)
  304. spin_unlock_irqrestore(&cc->zone->lock, flags);
  305. /* Update the pageblock-skip if the whole pageblock was scanned */
  306. if (blockpfn == end_pfn)
  307. update_pageblock_skip(cc, valid_page, total_isolated, true,
  308. false);
  309. count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
  310. if (total_isolated)
  311. count_compact_events(COMPACTISOLATED, total_isolated);
  312. return total_isolated;
  313. }
  314. /**
  315. * isolate_freepages_range() - isolate free pages.
  316. * @start_pfn: The first PFN to start isolating.
  317. * @end_pfn: The one-past-last PFN.
  318. *
  319. * Non-free pages, invalid PFNs, or zone boundaries within the
  320. * [start_pfn, end_pfn) range are considered errors, cause function to
  321. * undo its actions and return zero.
  322. *
  323. * Otherwise, function returns one-past-the-last PFN of isolated page
  324. * (which may be greater then end_pfn if end fell in a middle of
  325. * a free page).
  326. */
  327. unsigned long
  328. isolate_freepages_range(struct compact_control *cc,
  329. unsigned long start_pfn, unsigned long end_pfn)
  330. {
  331. unsigned long isolated, pfn, block_end_pfn;
  332. LIST_HEAD(freelist);
  333. for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
  334. if (!pfn_valid(pfn) || cc->zone != page_zone(pfn_to_page(pfn)))
  335. break;
  336. /*
  337. * On subsequent iterations ALIGN() is actually not needed,
  338. * but we keep it that we not to complicate the code.
  339. */
  340. block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
  341. block_end_pfn = min(block_end_pfn, end_pfn);
  342. isolated = isolate_freepages_block(cc, pfn, block_end_pfn,
  343. &freelist, true);
  344. /*
  345. * In strict mode, isolate_freepages_block() returns 0 if
  346. * there are any holes in the block (ie. invalid PFNs or
  347. * non-free pages).
  348. */
  349. if (!isolated)
  350. break;
  351. /*
  352. * If we managed to isolate pages, it is always (1 << n) *
  353. * pageblock_nr_pages for some non-negative n. (Max order
  354. * page may span two pageblocks).
  355. */
  356. }
  357. /* split_free_page does not map the pages */
  358. map_pages(&freelist);
  359. if (pfn < end_pfn) {
  360. /* Loop terminated early, cleanup. */
  361. release_freepages(&freelist);
  362. return 0;
  363. }
  364. /* We don't use freelists for anything. */
  365. return pfn;
  366. }
  367. /* Update the number of anon and file isolated pages in the zone */
  368. static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc)
  369. {
  370. struct page *page;
  371. unsigned int count[2] = { 0, };
  372. list_for_each_entry(page, &cc->migratepages, lru)
  373. count[!!page_is_file_cache(page)]++;
  374. /* If locked we can use the interrupt unsafe versions */
  375. if (locked) {
  376. __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
  377. __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
  378. } else {
  379. mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
  380. mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
  381. }
  382. }
  383. /* Similar to reclaim, but different enough that they don't share logic */
  384. static bool too_many_isolated(struct zone *zone)
  385. {
  386. unsigned long active, inactive, isolated;
  387. inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
  388. zone_page_state(zone, NR_INACTIVE_ANON);
  389. active = zone_page_state(zone, NR_ACTIVE_FILE) +
  390. zone_page_state(zone, NR_ACTIVE_ANON);
  391. isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
  392. zone_page_state(zone, NR_ISOLATED_ANON);
  393. return isolated > (inactive + active) / 2;
  394. }
  395. /**
  396. * isolate_migratepages_range() - isolate all migrate-able pages in range.
  397. * @zone: Zone pages are in.
  398. * @cc: Compaction control structure.
  399. * @low_pfn: The first PFN of the range.
  400. * @end_pfn: The one-past-the-last PFN of the range.
  401. * @unevictable: true if it allows to isolate unevictable pages
  402. *
  403. * Isolate all pages that can be migrated from the range specified by
  404. * [low_pfn, end_pfn). Returns zero if there is a fatal signal
  405. * pending), otherwise PFN of the first page that was not scanned
  406. * (which may be both less, equal to or more then end_pfn).
  407. *
  408. * Assumes that cc->migratepages is empty and cc->nr_migratepages is
  409. * zero.
  410. *
  411. * Apart from cc->migratepages and cc->nr_migratetypes this function
  412. * does not modify any cc's fields, in particular it does not modify
  413. * (or read for that matter) cc->migrate_pfn.
  414. */
  415. unsigned long
  416. isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
  417. unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
  418. {
  419. unsigned long last_pageblock_nr = 0, pageblock_nr;
  420. unsigned long nr_scanned = 0, nr_isolated = 0;
  421. struct list_head *migratelist = &cc->migratepages;
  422. struct lruvec *lruvec;
  423. unsigned long flags;
  424. bool locked = false;
  425. struct page *page = NULL, *valid_page = NULL;
  426. bool set_unsuitable = true;
  427. const isolate_mode_t mode = (cc->mode == MIGRATE_ASYNC ?
  428. ISOLATE_ASYNC_MIGRATE : 0) |
  429. (unevictable ? ISOLATE_UNEVICTABLE : 0);
  430. /*
  431. * Ensure that there are not too many pages isolated from the LRU
  432. * list by either parallel reclaimers or compaction. If there are,
  433. * delay for some time until fewer pages are isolated
  434. */
  435. while (unlikely(too_many_isolated(zone))) {
  436. /* async migration should just abort */
  437. if (cc->mode == MIGRATE_ASYNC)
  438. return 0;
  439. congestion_wait(BLK_RW_ASYNC, HZ/10);
  440. if (fatal_signal_pending(current))
  441. return 0;
  442. }
  443. if (compact_should_abort(cc))
  444. return 0;
  445. /* Time to isolate some pages for migration */
  446. for (; low_pfn < end_pfn; low_pfn++) {
  447. /* give a chance to irqs before checking need_resched() */
  448. if (locked && !(low_pfn % SWAP_CLUSTER_MAX)) {
  449. if (should_release_lock(&zone->lru_lock)) {
  450. spin_unlock_irqrestore(&zone->lru_lock, flags);
  451. locked = false;
  452. }
  453. }
  454. /*
  455. * migrate_pfn does not necessarily start aligned to a
  456. * pageblock. Ensure that pfn_valid is called when moving
  457. * into a new MAX_ORDER_NR_PAGES range in case of large
  458. * memory holes within the zone
  459. */
  460. if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
  461. if (!pfn_valid(low_pfn)) {
  462. low_pfn += MAX_ORDER_NR_PAGES - 1;
  463. continue;
  464. }
  465. }
  466. if (!pfn_valid_within(low_pfn))
  467. continue;
  468. nr_scanned++;
  469. /*
  470. * Get the page and ensure the page is within the same zone.
  471. * See the comment in isolate_freepages about overlapping
  472. * nodes. It is deliberate that the new zone lock is not taken
  473. * as memory compaction should not move pages between nodes.
  474. */
  475. page = pfn_to_page(low_pfn);
  476. if (page_zone(page) != zone)
  477. continue;
  478. if (!valid_page)
  479. valid_page = page;
  480. /* If isolation recently failed, do not retry */
  481. pageblock_nr = low_pfn >> pageblock_order;
  482. if (last_pageblock_nr != pageblock_nr) {
  483. int mt;
  484. last_pageblock_nr = pageblock_nr;
  485. if (!isolation_suitable(cc, page))
  486. goto next_pageblock;
  487. /*
  488. * For async migration, also only scan in MOVABLE
  489. * blocks. Async migration is optimistic to see if
  490. * the minimum amount of work satisfies the allocation
  491. */
  492. mt = get_pageblock_migratetype(page);
  493. if (cc->mode == MIGRATE_ASYNC &&
  494. !migrate_async_suitable(mt)) {
  495. set_unsuitable = false;
  496. goto next_pageblock;
  497. }
  498. }
  499. /*
  500. * Skip if free. page_order cannot be used without zone->lock
  501. * as nothing prevents parallel allocations or buddy merging.
  502. */
  503. if (PageBuddy(page))
  504. continue;
  505. /*
  506. * Check may be lockless but that's ok as we recheck later.
  507. * It's possible to migrate LRU pages and balloon pages
  508. * Skip any other type of page
  509. */
  510. if (!PageLRU(page)) {
  511. if (unlikely(balloon_page_movable(page))) {
  512. if (locked && balloon_page_isolate(page)) {
  513. /* Successfully isolated */
  514. goto isolate_success;
  515. }
  516. }
  517. continue;
  518. }
  519. /*
  520. * PageLRU is set. lru_lock normally excludes isolation
  521. * splitting and collapsing (collapsing has already happened
  522. * if PageLRU is set) but the lock is not necessarily taken
  523. * here and it is wasteful to take it just to check transhuge.
  524. * Check TransHuge without lock and skip the whole pageblock if
  525. * it's either a transhuge or hugetlbfs page, as calling
  526. * compound_order() without preventing THP from splitting the
  527. * page underneath us may return surprising results.
  528. */
  529. if (PageTransHuge(page)) {
  530. if (!locked)
  531. goto next_pageblock;
  532. low_pfn += (1 << compound_order(page)) - 1;
  533. continue;
  534. }
  535. /*
  536. * Migration will fail if an anonymous page is pinned in memory,
  537. * so avoid taking lru_lock and isolating it unnecessarily in an
  538. * admittedly racy check.
  539. */
  540. if (!page_mapping(page) &&
  541. page_count(page) > page_mapcount(page))
  542. continue;
  543. /* Check if it is ok to still hold the lock */
  544. locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
  545. locked, cc);
  546. if (!locked || fatal_signal_pending(current))
  547. break;
  548. /* Recheck PageLRU and PageTransHuge under lock */
  549. if (!PageLRU(page))
  550. continue;
  551. if (PageTransHuge(page)) {
  552. low_pfn += (1 << compound_order(page)) - 1;
  553. continue;
  554. }
  555. lruvec = mem_cgroup_page_lruvec(page, zone);
  556. /* Try isolate the page */
  557. if (__isolate_lru_page(page, mode) != 0)
  558. continue;
  559. VM_BUG_ON_PAGE(PageTransCompound(page), page);
  560. /* Successfully isolated */
  561. del_page_from_lru_list(page, lruvec, page_lru(page));
  562. isolate_success:
  563. cc->finished_update_migrate = true;
  564. list_add(&page->lru, migratelist);
  565. cc->nr_migratepages++;
  566. nr_isolated++;
  567. /* Avoid isolating too much */
  568. if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
  569. ++low_pfn;
  570. break;
  571. }
  572. continue;
  573. next_pageblock:
  574. low_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages) - 1;
  575. }
  576. acct_isolated(zone, locked, cc);
  577. if (locked)
  578. spin_unlock_irqrestore(&zone->lru_lock, flags);
  579. /*
  580. * Update the pageblock-skip information and cached scanner pfn,
  581. * if the whole pageblock was scanned without isolating any page.
  582. */
  583. if (low_pfn == end_pfn)
  584. update_pageblock_skip(cc, valid_page, nr_isolated,
  585. set_unsuitable, true);
  586. trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
  587. count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
  588. if (nr_isolated)
  589. count_compact_events(COMPACTISOLATED, nr_isolated);
  590. return low_pfn;
  591. }
  592. #endif /* CONFIG_COMPACTION || CONFIG_CMA */
  593. #ifdef CONFIG_COMPACTION
  594. /*
  595. * Based on information in the current compact_control, find blocks
  596. * suitable for isolating free pages from and then isolate them.
  597. */
  598. static void isolate_freepages(struct zone *zone,
  599. struct compact_control *cc)
  600. {
  601. struct page *page;
  602. unsigned long block_start_pfn; /* start of current pageblock */
  603. unsigned long block_end_pfn; /* end of current pageblock */
  604. unsigned long low_pfn; /* lowest pfn scanner is able to scan */
  605. int nr_freepages = cc->nr_freepages;
  606. struct list_head *freelist = &cc->freepages;
  607. /*
  608. * Initialise the free scanner. The starting point is where we last
  609. * successfully isolated from, zone-cached value, or the end of the
  610. * zone when isolating for the first time. We need this aligned to
  611. * the pageblock boundary, because we do
  612. * block_start_pfn -= pageblock_nr_pages in the for loop.
  613. * For ending point, take care when isolating in last pageblock of a
  614. * a zone which ends in the middle of a pageblock.
  615. * The low boundary is the end of the pageblock the migration scanner
  616. * is using.
  617. */
  618. block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
  619. block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
  620. zone_end_pfn(zone));
  621. low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
  622. /*
  623. * Isolate free pages until enough are available to migrate the
  624. * pages on cc->migratepages. We stop searching if the migrate
  625. * and free page scanners meet or enough free pages are isolated.
  626. */
  627. for (; block_start_pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
  628. block_end_pfn = block_start_pfn,
  629. block_start_pfn -= pageblock_nr_pages) {
  630. unsigned long isolated;
  631. /*
  632. * This can iterate a massively long zone without finding any
  633. * suitable migration targets, so periodically check if we need
  634. * to schedule, or even abort async compaction.
  635. */
  636. if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
  637. && compact_should_abort(cc))
  638. break;
  639. if (!pfn_valid(block_start_pfn))
  640. continue;
  641. /*
  642. * Check for overlapping nodes/zones. It's possible on some
  643. * configurations to have a setup like
  644. * node0 node1 node0
  645. * i.e. it's possible that all pages within a zones range of
  646. * pages do not belong to a single zone.
  647. */
  648. page = pfn_to_page(block_start_pfn);
  649. if (page_zone(page) != zone)
  650. continue;
  651. /* Check the block is suitable for migration */
  652. if (!suitable_migration_target(page))
  653. continue;
  654. /* If isolation recently failed, do not retry */
  655. if (!isolation_suitable(cc, page))
  656. continue;
  657. /* Found a block suitable for isolating free pages from */
  658. cc->free_pfn = block_start_pfn;
  659. isolated = isolate_freepages_block(cc, block_start_pfn,
  660. block_end_pfn, freelist, false);
  661. nr_freepages += isolated;
  662. /*
  663. * Set a flag that we successfully isolated in this pageblock.
  664. * In the next loop iteration, zone->compact_cached_free_pfn
  665. * will not be updated and thus it will effectively contain the
  666. * highest pageblock we isolated pages from.
  667. */
  668. if (isolated)
  669. cc->finished_update_free = true;
  670. /*
  671. * isolate_freepages_block() might have aborted due to async
  672. * compaction being contended
  673. */
  674. if (cc->contended)
  675. break;
  676. }
  677. /* split_free_page does not map the pages */
  678. map_pages(freelist);
  679. /*
  680. * If we crossed the migrate scanner, we want to keep it that way
  681. * so that compact_finished() may detect this
  682. */
  683. if (block_start_pfn < low_pfn)
  684. cc->free_pfn = cc->migrate_pfn;
  685. cc->nr_freepages = nr_freepages;
  686. }
  687. /*
  688. * This is a migrate-callback that "allocates" freepages by taking pages
  689. * from the isolated freelists in the block we are migrating to.
  690. */
  691. static struct page *compaction_alloc(struct page *migratepage,
  692. unsigned long data,
  693. int **result)
  694. {
  695. struct compact_control *cc = (struct compact_control *)data;
  696. struct page *freepage;
  697. /*
  698. * Isolate free pages if necessary, and if we are not aborting due to
  699. * contention.
  700. */
  701. if (list_empty(&cc->freepages)) {
  702. if (!cc->contended)
  703. isolate_freepages(cc->zone, cc);
  704. if (list_empty(&cc->freepages))
  705. return NULL;
  706. }
  707. freepage = list_entry(cc->freepages.next, struct page, lru);
  708. list_del(&freepage->lru);
  709. cc->nr_freepages--;
  710. return freepage;
  711. }
  712. /*
  713. * This is a migrate-callback that "frees" freepages back to the isolated
  714. * freelist. All pages on the freelist are from the same zone, so there is no
  715. * special handling needed for NUMA.
  716. */
  717. static void compaction_free(struct page *page, unsigned long data)
  718. {
  719. struct compact_control *cc = (struct compact_control *)data;
  720. list_add(&page->lru, &cc->freepages);
  721. cc->nr_freepages++;
  722. }
  723. /* possible outcome of isolate_migratepages */
  724. typedef enum {
  725. ISOLATE_ABORT, /* Abort compaction now */
  726. ISOLATE_NONE, /* No pages isolated, continue scanning */
  727. ISOLATE_SUCCESS, /* Pages isolated, migrate */
  728. } isolate_migrate_t;
  729. /*
  730. * Isolate all pages that can be migrated from the block pointed to by
  731. * the migrate scanner within compact_control.
  732. */
  733. static isolate_migrate_t isolate_migratepages(struct zone *zone,
  734. struct compact_control *cc)
  735. {
  736. unsigned long low_pfn, end_pfn;
  737. /* Do not scan outside zone boundaries */
  738. low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
  739. /* Only scan within a pageblock boundary */
  740. end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
  741. /* Do not cross the free scanner or scan within a memory hole */
  742. if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
  743. cc->migrate_pfn = end_pfn;
  744. return ISOLATE_NONE;
  745. }
  746. /* Perform the isolation */
  747. low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
  748. if (!low_pfn || cc->contended)
  749. return ISOLATE_ABORT;
  750. cc->migrate_pfn = low_pfn;
  751. return ISOLATE_SUCCESS;
  752. }
  753. static int compact_finished(struct zone *zone,
  754. struct compact_control *cc)
  755. {
  756. unsigned int order;
  757. unsigned long watermark;
  758. if (cc->contended || fatal_signal_pending(current))
  759. return COMPACT_PARTIAL;
  760. /* Compaction run completes if the migrate and free scanner meet */
  761. if (cc->free_pfn <= cc->migrate_pfn) {
  762. /* Let the next compaction start anew. */
  763. zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
  764. zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
  765. zone->compact_cached_free_pfn = zone_end_pfn(zone);
  766. /*
  767. * Mark that the PG_migrate_skip information should be cleared
  768. * by kswapd when it goes to sleep. kswapd does not set the
  769. * flag itself as the decision to be clear should be directly
  770. * based on an allocation request.
  771. */
  772. if (!current_is_kswapd())
  773. zone->compact_blockskip_flush = true;
  774. return COMPACT_COMPLETE;
  775. }
  776. /*
  777. * order == -1 is expected when compacting via
  778. * /proc/sys/vm/compact_memory
  779. */
  780. if (cc->order == -1)
  781. return COMPACT_CONTINUE;
  782. /* Compaction run is not finished if the watermark is not met */
  783. watermark = low_wmark_pages(zone);
  784. watermark += (1 << cc->order);
  785. if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
  786. return COMPACT_CONTINUE;
  787. /* Direct compactor: Is a suitable page free? */
  788. for (order = cc->order; order < MAX_ORDER; order++) {
  789. struct free_area *area = &zone->free_area[order];
  790. /* Job done if page is free of the right migratetype */
  791. if (!list_empty(&area->free_list[cc->migratetype]))
  792. return COMPACT_PARTIAL;
  793. /* Job done if allocation would set block type */
  794. if (cc->order >= pageblock_order && area->nr_free)
  795. return COMPACT_PARTIAL;
  796. }
  797. return COMPACT_CONTINUE;
  798. }
  799. /*
  800. * compaction_suitable: Is this suitable to run compaction on this zone now?
  801. * Returns
  802. * COMPACT_SKIPPED - If there are too few free pages for compaction
  803. * COMPACT_PARTIAL - If the allocation would succeed without compaction
  804. * COMPACT_CONTINUE - If compaction should run now
  805. */
  806. unsigned long compaction_suitable(struct zone *zone, int order)
  807. {
  808. int fragindex;
  809. unsigned long watermark;
  810. /*
  811. * order == -1 is expected when compacting via
  812. * /proc/sys/vm/compact_memory
  813. */
  814. if (order == -1)
  815. return COMPACT_CONTINUE;
  816. /*
  817. * Watermarks for order-0 must be met for compaction. Note the 2UL.
  818. * This is because during migration, copies of pages need to be
  819. * allocated and for a short time, the footprint is higher
  820. */
  821. watermark = low_wmark_pages(zone) + (2UL << order);
  822. if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
  823. return COMPACT_SKIPPED;
  824. /*
  825. * fragmentation index determines if allocation failures are due to
  826. * low memory or external fragmentation
  827. *
  828. * index of -1000 implies allocations might succeed depending on
  829. * watermarks
  830. * index towards 0 implies failure is due to lack of memory
  831. * index towards 1000 implies failure is due to fragmentation
  832. *
  833. * Only compact if a failure would be due to fragmentation.
  834. */
  835. fragindex = fragmentation_index(zone, order);
  836. if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
  837. return COMPACT_SKIPPED;
  838. if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark,
  839. 0, 0))
  840. return COMPACT_PARTIAL;
  841. return COMPACT_CONTINUE;
  842. }
  843. static int compact_zone(struct zone *zone, struct compact_control *cc)
  844. {
  845. int ret;
  846. unsigned long start_pfn = zone->zone_start_pfn;
  847. unsigned long end_pfn = zone_end_pfn(zone);
  848. const bool sync = cc->mode != MIGRATE_ASYNC;
  849. ret = compaction_suitable(zone, cc->order);
  850. switch (ret) {
  851. case COMPACT_PARTIAL:
  852. case COMPACT_SKIPPED:
  853. /* Compaction is likely to fail */
  854. return ret;
  855. case COMPACT_CONTINUE:
  856. /* Fall through to compaction */
  857. ;
  858. }
  859. /*
  860. * Clear pageblock skip if there were failures recently and compaction
  861. * is about to be retried after being deferred. kswapd does not do
  862. * this reset as it'll reset the cached information when going to sleep.
  863. */
  864. if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
  865. __reset_isolation_suitable(zone);
  866. /*
  867. * Setup to move all movable pages to the end of the zone. Used cached
  868. * information on where the scanners should start but check that it
  869. * is initialised by ensuring the values are within zone boundaries.
  870. */
  871. cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
  872. cc->free_pfn = zone->compact_cached_free_pfn;
  873. if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
  874. cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
  875. zone->compact_cached_free_pfn = cc->free_pfn;
  876. }
  877. if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
  878. cc->migrate_pfn = start_pfn;
  879. zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
  880. zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
  881. }
  882. trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn);
  883. migrate_prep_local();
  884. while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
  885. int err;
  886. switch (isolate_migratepages(zone, cc)) {
  887. case ISOLATE_ABORT:
  888. ret = COMPACT_PARTIAL;
  889. putback_movable_pages(&cc->migratepages);
  890. cc->nr_migratepages = 0;
  891. goto out;
  892. case ISOLATE_NONE:
  893. continue;
  894. case ISOLATE_SUCCESS:
  895. ;
  896. }
  897. if (!cc->nr_migratepages)
  898. continue;
  899. err = migrate_pages(&cc->migratepages, compaction_alloc,
  900. compaction_free, (unsigned long)cc, cc->mode,
  901. MR_COMPACTION);
  902. trace_mm_compaction_migratepages(cc->nr_migratepages, err,
  903. &cc->migratepages);
  904. /* All pages were either migrated or will be released */
  905. cc->nr_migratepages = 0;
  906. if (err) {
  907. putback_movable_pages(&cc->migratepages);
  908. /*
  909. * migrate_pages() may return -ENOMEM when scanners meet
  910. * and we want compact_finished() to detect it
  911. */
  912. if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) {
  913. ret = COMPACT_PARTIAL;
  914. goto out;
  915. }
  916. }
  917. }
  918. out:
  919. /* Release free pages and check accounting */
  920. cc->nr_freepages -= release_freepages(&cc->freepages);
  921. VM_BUG_ON(cc->nr_freepages != 0);
  922. trace_mm_compaction_end(ret);
  923. return ret;
  924. }
  925. static unsigned long compact_zone_order(struct zone *zone, int order,
  926. gfp_t gfp_mask, enum migrate_mode mode, bool *contended)
  927. {
  928. unsigned long ret;
  929. struct compact_control cc = {
  930. .nr_freepages = 0,
  931. .nr_migratepages = 0,
  932. .order = order,
  933. .migratetype = allocflags_to_migratetype(gfp_mask),
  934. .zone = zone,
  935. .mode = mode,
  936. };
  937. INIT_LIST_HEAD(&cc.freepages);
  938. INIT_LIST_HEAD(&cc.migratepages);
  939. ret = compact_zone(zone, &cc);
  940. VM_BUG_ON(!list_empty(&cc.freepages));
  941. VM_BUG_ON(!list_empty(&cc.migratepages));
  942. *contended = cc.contended;
  943. return ret;
  944. }
  945. int sysctl_extfrag_threshold = 500;
  946. /**
  947. * try_to_compact_pages - Direct compact to satisfy a high-order allocation
  948. * @zonelist: The zonelist used for the current allocation
  949. * @order: The order of the current allocation
  950. * @gfp_mask: The GFP mask of the current allocation
  951. * @nodemask: The allowed nodes to allocate from
  952. * @mode: The migration mode for async, sync light, or sync migration
  953. * @contended: Return value that is true if compaction was aborted due to lock contention
  954. * @page: Optionally capture a free page of the requested order during compaction
  955. *
  956. * This is the main entry point for direct page compaction.
  957. */
  958. unsigned long try_to_compact_pages(struct zonelist *zonelist,
  959. int order, gfp_t gfp_mask, nodemask_t *nodemask,
  960. enum migrate_mode mode, bool *contended)
  961. {
  962. enum zone_type high_zoneidx = gfp_zone(gfp_mask);
  963. int may_enter_fs = gfp_mask & __GFP_FS;
  964. int may_perform_io = gfp_mask & __GFP_IO;
  965. struct zoneref *z;
  966. struct zone *zone;
  967. int rc = COMPACT_SKIPPED;
  968. int alloc_flags = 0;
  969. /* Check if the GFP flags allow compaction */
  970. if (!order || !may_enter_fs || !may_perform_io)
  971. return rc;
  972. count_compact_event(COMPACTSTALL);
  973. #ifdef CONFIG_CMA
  974. if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
  975. alloc_flags |= ALLOC_CMA;
  976. #endif
  977. /* Compact each zone in the list */
  978. for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
  979. nodemask) {
  980. int status;
  981. status = compact_zone_order(zone, order, gfp_mask, mode,
  982. contended);
  983. rc = max(status, rc);
  984. /* If a normal allocation would succeed, stop compacting */
  985. if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0,
  986. alloc_flags))
  987. break;
  988. }
  989. return rc;
  990. }
  991. /* Compact all zones within a node */
  992. static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
  993. {
  994. int zoneid;
  995. struct zone *zone;
  996. for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
  997. zone = &pgdat->node_zones[zoneid];
  998. if (!populated_zone(zone))
  999. continue;
  1000. cc->nr_freepages = 0;
  1001. cc->nr_migratepages = 0;
  1002. cc->zone = zone;
  1003. INIT_LIST_HEAD(&cc->freepages);
  1004. INIT_LIST_HEAD(&cc->migratepages);
  1005. if (cc->order == -1 || !compaction_deferred(zone, cc->order))
  1006. compact_zone(zone, cc);
  1007. if (cc->order > 0) {
  1008. if (zone_watermark_ok(zone, cc->order,
  1009. low_wmark_pages(zone), 0, 0))
  1010. compaction_defer_reset(zone, cc->order, false);
  1011. }
  1012. VM_BUG_ON(!list_empty(&cc->freepages));
  1013. VM_BUG_ON(!list_empty(&cc->migratepages));
  1014. }
  1015. }
  1016. void compact_pgdat(pg_data_t *pgdat, int order)
  1017. {
  1018. struct compact_control cc = {
  1019. .order = order,
  1020. .mode = MIGRATE_ASYNC,
  1021. };
  1022. if (!order)
  1023. return;
  1024. __compact_pgdat(pgdat, &cc);
  1025. }
  1026. static void compact_node(int nid)
  1027. {
  1028. struct compact_control cc = {
  1029. .order = -1,
  1030. .mode = MIGRATE_SYNC,
  1031. .ignore_skip_hint = true,
  1032. };
  1033. __compact_pgdat(NODE_DATA(nid), &cc);
  1034. }
  1035. /* Compact all nodes in the system */
  1036. static void compact_nodes(void)
  1037. {
  1038. int nid;
  1039. /* Flush pending updates to the LRU lists */
  1040. lru_add_drain_all();
  1041. for_each_online_node(nid)
  1042. compact_node(nid);
  1043. }
  1044. /* The written value is actually unused, all memory is compacted */
  1045. int sysctl_compact_memory;
  1046. /* This is the entry point for compacting all nodes via /proc/sys/vm */
  1047. int sysctl_compaction_handler(struct ctl_table *table, int write,
  1048. void __user *buffer, size_t *length, loff_t *ppos)
  1049. {
  1050. if (write)
  1051. compact_nodes();
  1052. return 0;
  1053. }
  1054. int sysctl_extfrag_handler(struct ctl_table *table, int write,
  1055. void __user *buffer, size_t *length, loff_t *ppos)
  1056. {
  1057. proc_dointvec_minmax(table, write, buffer, length, ppos);
  1058. return 0;
  1059. }
  1060. #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
  1061. static ssize_t sysfs_compact_node(struct device *dev,
  1062. struct device_attribute *attr,
  1063. const char *buf, size_t count)
  1064. {
  1065. int nid = dev->id;
  1066. if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
  1067. /* Flush pending updates to the LRU lists */
  1068. lru_add_drain_all();
  1069. compact_node(nid);
  1070. }
  1071. return count;
  1072. }
  1073. static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
  1074. int compaction_register_node(struct node *node)
  1075. {
  1076. return device_create_file(&node->dev, &dev_attr_compact);
  1077. }
  1078. void compaction_unregister_node(struct node *node)
  1079. {
  1080. return device_remove_file(&node->dev, &dev_attr_compact);
  1081. }
  1082. #endif /* CONFIG_SYSFS && CONFIG_NUMA */
  1083. #endif /* CONFIG_COMPACTION */