compaction.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262
  1. /*
  2. * linux/mm/compaction.c
  3. *
  4. * Memory compaction for the reduction of external fragmentation. Note that
  5. * this heavily depends upon page migration to do all the real heavy
  6. * lifting
  7. *
  8. * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
  9. */
  10. #include <linux/swap.h>
  11. #include <linux/migrate.h>
  12. #include <linux/compaction.h>
  13. #include <linux/mm_inline.h>
  14. #include <linux/backing-dev.h>
  15. #include <linux/sysctl.h>
  16. #include <linux/sysfs.h>
  17. #include <linux/balloon_compaction.h>
  18. #include <linux/page-isolation.h>
  19. #include "internal.h"
  20. #ifdef CONFIG_COMPACTION
  21. static inline void count_compact_event(enum vm_event_item item)
  22. {
  23. count_vm_event(item);
  24. }
  25. static inline void count_compact_events(enum vm_event_item item, long delta)
  26. {
  27. count_vm_events(item, delta);
  28. }
  29. #else
  30. #define count_compact_event(item) do { } while (0)
  31. #define count_compact_events(item, delta) do { } while (0)
  32. #endif
  33. #if defined CONFIG_COMPACTION || defined CONFIG_CMA
  34. #define CREATE_TRACE_POINTS
  35. #include <trace/events/compaction.h>
  36. static unsigned long release_freepages(struct list_head *freelist)
  37. {
  38. struct page *page, *next;
  39. unsigned long count = 0;
  40. list_for_each_entry_safe(page, next, freelist, lru) {
  41. list_del(&page->lru);
  42. __free_page(page);
  43. count++;
  44. }
  45. return count;
  46. }
  47. static void map_pages(struct list_head *list)
  48. {
  49. struct page *page;
  50. list_for_each_entry(page, list, lru) {
  51. arch_alloc_page(page, 0);
  52. kernel_map_pages(page, 1, 1);
  53. }
  54. }
  55. static inline bool migrate_async_suitable(int migratetype)
  56. {
  57. return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
  58. }
  59. #ifdef CONFIG_COMPACTION
  60. /* Returns true if the pageblock should be scanned for pages to isolate. */
  61. static inline bool isolation_suitable(struct compact_control *cc,
  62. struct page *page)
  63. {
  64. if (cc->ignore_skip_hint)
  65. return true;
  66. return !get_pageblock_skip(page);
  67. }
  68. /*
  69. * This function is called to clear all cached information on pageblocks that
  70. * should be skipped for page isolation when the migrate and free page scanner
  71. * meet.
  72. */
  73. static void __reset_isolation_suitable(struct zone *zone)
  74. {
  75. unsigned long start_pfn = zone->zone_start_pfn;
  76. unsigned long end_pfn = zone_end_pfn(zone);
  77. unsigned long pfn;
  78. zone->compact_cached_migrate_pfn = start_pfn;
  79. zone->compact_cached_free_pfn = end_pfn;
  80. zone->compact_blockskip_flush = false;
  81. /* Walk the zone and mark every pageblock as suitable for isolation */
  82. for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
  83. struct page *page;
  84. cond_resched();
  85. if (!pfn_valid(pfn))
  86. continue;
  87. page = pfn_to_page(pfn);
  88. if (zone != page_zone(page))
  89. continue;
  90. clear_pageblock_skip(page);
  91. }
  92. }
  93. void reset_isolation_suitable(pg_data_t *pgdat)
  94. {
  95. int zoneid;
  96. for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
  97. struct zone *zone = &pgdat->node_zones[zoneid];
  98. if (!populated_zone(zone))
  99. continue;
  100. /* Only flush if a full compaction finished recently */
  101. if (zone->compact_blockskip_flush)
  102. __reset_isolation_suitable(zone);
  103. }
  104. }
  105. /*
  106. * If no pages were isolated then mark this pageblock to be skipped in the
  107. * future. The information is later cleared by __reset_isolation_suitable().
  108. */
  109. static void update_pageblock_skip(struct compact_control *cc,
  110. struct page *page, unsigned long nr_isolated,
  111. bool migrate_scanner)
  112. {
  113. struct zone *zone = cc->zone;
  114. if (cc->ignore_skip_hint)
  115. return;
  116. if (!page)
  117. return;
  118. if (!nr_isolated) {
  119. unsigned long pfn = page_to_pfn(page);
  120. set_pageblock_skip(page);
  121. /* Update where compaction should restart */
  122. if (migrate_scanner) {
  123. if (!cc->finished_update_migrate &&
  124. pfn > zone->compact_cached_migrate_pfn)
  125. zone->compact_cached_migrate_pfn = pfn;
  126. } else {
  127. if (!cc->finished_update_free &&
  128. pfn < zone->compact_cached_free_pfn)
  129. zone->compact_cached_free_pfn = pfn;
  130. }
  131. }
  132. }
  133. #else
  134. static inline bool isolation_suitable(struct compact_control *cc,
  135. struct page *page)
  136. {
  137. return true;
  138. }
  139. static void update_pageblock_skip(struct compact_control *cc,
  140. struct page *page, unsigned long nr_isolated,
  141. bool migrate_scanner)
  142. {
  143. }
  144. #endif /* CONFIG_COMPACTION */
  145. static inline bool should_release_lock(spinlock_t *lock)
  146. {
  147. return need_resched() || spin_is_contended(lock);
  148. }
  149. /*
  150. * Compaction requires the taking of some coarse locks that are potentially
  151. * very heavily contended. Check if the process needs to be scheduled or
  152. * if the lock is contended. For async compaction, back out in the event
  153. * if contention is severe. For sync compaction, schedule.
  154. *
  155. * Returns true if the lock is held.
  156. * Returns false if the lock is released and compaction should abort
  157. */
  158. static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
  159. bool locked, struct compact_control *cc)
  160. {
  161. if (should_release_lock(lock)) {
  162. if (locked) {
  163. spin_unlock_irqrestore(lock, *flags);
  164. locked = false;
  165. }
  166. /* async aborts if taking too long or contended */
  167. if (!cc->sync) {
  168. cc->contended = true;
  169. return false;
  170. }
  171. cond_resched();
  172. }
  173. if (!locked)
  174. spin_lock_irqsave(lock, *flags);
  175. return true;
  176. }
  177. static inline bool compact_trylock_irqsave(spinlock_t *lock,
  178. unsigned long *flags, struct compact_control *cc)
  179. {
  180. return compact_checklock_irqsave(lock, flags, false, cc);
  181. }
  182. /* Returns true if the page is within a block suitable for migration to */
  183. static bool suitable_migration_target(struct page *page)
  184. {
  185. /* If the page is a large free page, then disallow migration */
  186. if (PageBuddy(page) && page_order(page) >= pageblock_order)
  187. return false;
  188. /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
  189. if (migrate_async_suitable(get_pageblock_migratetype(page)))
  190. return true;
  191. /* Otherwise skip the block */
  192. return false;
  193. }
  194. /*
  195. * Isolate free pages onto a private freelist. If @strict is true, will abort
  196. * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
  197. * (even though it may still end up isolating some pages).
  198. */
  199. static unsigned long isolate_freepages_block(struct compact_control *cc,
  200. unsigned long blockpfn,
  201. unsigned long end_pfn,
  202. struct list_head *freelist,
  203. bool strict)
  204. {
  205. int nr_scanned = 0, total_isolated = 0;
  206. struct page *cursor, *valid_page = NULL;
  207. unsigned long flags;
  208. bool locked = false;
  209. bool checked_pageblock = false;
  210. cursor = pfn_to_page(blockpfn);
  211. /* Isolate free pages. */
  212. for (; blockpfn < end_pfn; blockpfn++, cursor++) {
  213. int isolated, i;
  214. struct page *page = cursor;
  215. nr_scanned++;
  216. if (!pfn_valid_within(blockpfn))
  217. goto isolate_fail;
  218. if (!valid_page)
  219. valid_page = page;
  220. if (!PageBuddy(page))
  221. goto isolate_fail;
  222. /*
  223. * The zone lock must be held to isolate freepages.
  224. * Unfortunately this is a very coarse lock and can be
  225. * heavily contended if there are parallel allocations
  226. * or parallel compactions. For async compaction do not
  227. * spin on the lock and we acquire the lock as late as
  228. * possible.
  229. */
  230. locked = compact_checklock_irqsave(&cc->zone->lock, &flags,
  231. locked, cc);
  232. if (!locked)
  233. break;
  234. /* Recheck this is a suitable migration target under lock */
  235. if (!strict && !checked_pageblock) {
  236. /*
  237. * We need to check suitability of pageblock only once
  238. * and this isolate_freepages_block() is called with
  239. * pageblock range, so just check once is sufficient.
  240. */
  241. checked_pageblock = true;
  242. if (!suitable_migration_target(page))
  243. break;
  244. }
  245. /* Recheck this is a buddy page under lock */
  246. if (!PageBuddy(page))
  247. goto isolate_fail;
  248. /* Found a free page, break it into order-0 pages */
  249. isolated = split_free_page(page);
  250. total_isolated += isolated;
  251. for (i = 0; i < isolated; i++) {
  252. list_add(&page->lru, freelist);
  253. page++;
  254. }
  255. /* If a page was split, advance to the end of it */
  256. if (isolated) {
  257. blockpfn += isolated - 1;
  258. cursor += isolated - 1;
  259. continue;
  260. }
  261. isolate_fail:
  262. if (strict)
  263. break;
  264. else
  265. continue;
  266. }
  267. trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
  268. /*
  269. * If strict isolation is requested by CMA then check that all the
  270. * pages requested were isolated. If there were any failures, 0 is
  271. * returned and CMA will fail.
  272. */
  273. if (strict && blockpfn < end_pfn)
  274. total_isolated = 0;
  275. if (locked)
  276. spin_unlock_irqrestore(&cc->zone->lock, flags);
  277. /* Update the pageblock-skip if the whole pageblock was scanned */
  278. if (blockpfn == end_pfn)
  279. update_pageblock_skip(cc, valid_page, total_isolated, false);
  280. count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
  281. if (total_isolated)
  282. count_compact_events(COMPACTISOLATED, total_isolated);
  283. return total_isolated;
  284. }
  285. /**
  286. * isolate_freepages_range() - isolate free pages.
  287. * @start_pfn: The first PFN to start isolating.
  288. * @end_pfn: The one-past-last PFN.
  289. *
  290. * Non-free pages, invalid PFNs, or zone boundaries within the
  291. * [start_pfn, end_pfn) range are considered errors, cause function to
  292. * undo its actions and return zero.
  293. *
  294. * Otherwise, function returns one-past-the-last PFN of isolated page
  295. * (which may be greater then end_pfn if end fell in a middle of
  296. * a free page).
  297. */
  298. unsigned long
  299. isolate_freepages_range(struct compact_control *cc,
  300. unsigned long start_pfn, unsigned long end_pfn)
  301. {
  302. unsigned long isolated, pfn, block_end_pfn;
  303. LIST_HEAD(freelist);
  304. for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
  305. if (!pfn_valid(pfn) || cc->zone != page_zone(pfn_to_page(pfn)))
  306. break;
  307. /*
  308. * On subsequent iterations ALIGN() is actually not needed,
  309. * but we keep it that we not to complicate the code.
  310. */
  311. block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
  312. block_end_pfn = min(block_end_pfn, end_pfn);
  313. isolated = isolate_freepages_block(cc, pfn, block_end_pfn,
  314. &freelist, true);
  315. /*
  316. * In strict mode, isolate_freepages_block() returns 0 if
  317. * there are any holes in the block (ie. invalid PFNs or
  318. * non-free pages).
  319. */
  320. if (!isolated)
  321. break;
  322. /*
  323. * If we managed to isolate pages, it is always (1 << n) *
  324. * pageblock_nr_pages for some non-negative n. (Max order
  325. * page may span two pageblocks).
  326. */
  327. }
  328. /* split_free_page does not map the pages */
  329. map_pages(&freelist);
  330. if (pfn < end_pfn) {
  331. /* Loop terminated early, cleanup. */
  332. release_freepages(&freelist);
  333. return 0;
  334. }
  335. /* We don't use freelists for anything. */
  336. return pfn;
  337. }
  338. /* Update the number of anon and file isolated pages in the zone */
  339. static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc)
  340. {
  341. struct page *page;
  342. unsigned int count[2] = { 0, };
  343. list_for_each_entry(page, &cc->migratepages, lru)
  344. count[!!page_is_file_cache(page)]++;
  345. /* If locked we can use the interrupt unsafe versions */
  346. if (locked) {
  347. __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
  348. __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
  349. } else {
  350. mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
  351. mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
  352. }
  353. }
  354. /* Similar to reclaim, but different enough that they don't share logic */
  355. static bool too_many_isolated(struct zone *zone)
  356. {
  357. unsigned long active, inactive, isolated;
  358. inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
  359. zone_page_state(zone, NR_INACTIVE_ANON);
  360. active = zone_page_state(zone, NR_ACTIVE_FILE) +
  361. zone_page_state(zone, NR_ACTIVE_ANON);
  362. isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
  363. zone_page_state(zone, NR_ISOLATED_ANON);
  364. return isolated > (inactive + active) / 2;
  365. }
  366. /**
  367. * isolate_migratepages_range() - isolate all migrate-able pages in range.
  368. * @zone: Zone pages are in.
  369. * @cc: Compaction control structure.
  370. * @low_pfn: The first PFN of the range.
  371. * @end_pfn: The one-past-the-last PFN of the range.
  372. * @unevictable: true if it allows to isolate unevictable pages
  373. *
  374. * Isolate all pages that can be migrated from the range specified by
  375. * [low_pfn, end_pfn). Returns zero if there is a fatal signal
  376. * pending), otherwise PFN of the first page that was not scanned
  377. * (which may be both less, equal to or more then end_pfn).
  378. *
  379. * Assumes that cc->migratepages is empty and cc->nr_migratepages is
  380. * zero.
  381. *
  382. * Apart from cc->migratepages and cc->nr_migratetypes this function
  383. * does not modify any cc's fields, in particular it does not modify
  384. * (or read for that matter) cc->migrate_pfn.
  385. */
  386. unsigned long
  387. isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
  388. unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
  389. {
  390. unsigned long last_pageblock_nr = 0, pageblock_nr;
  391. unsigned long nr_scanned = 0, nr_isolated = 0;
  392. struct list_head *migratelist = &cc->migratepages;
  393. struct lruvec *lruvec;
  394. unsigned long flags;
  395. bool locked = false;
  396. struct page *page = NULL, *valid_page = NULL;
  397. bool skipped_async_unsuitable = false;
  398. const isolate_mode_t mode = (!cc->sync ? ISOLATE_ASYNC_MIGRATE : 0) |
  399. (unevictable ? ISOLATE_UNEVICTABLE : 0);
  400. /*
  401. * Ensure that there are not too many pages isolated from the LRU
  402. * list by either parallel reclaimers or compaction. If there are,
  403. * delay for some time until fewer pages are isolated
  404. */
  405. while (unlikely(too_many_isolated(zone))) {
  406. /* async migration should just abort */
  407. if (!cc->sync)
  408. return 0;
  409. congestion_wait(BLK_RW_ASYNC, HZ/10);
  410. if (fatal_signal_pending(current))
  411. return 0;
  412. }
  413. /* Time to isolate some pages for migration */
  414. cond_resched();
  415. for (; low_pfn < end_pfn; low_pfn++) {
  416. /* give a chance to irqs before checking need_resched() */
  417. if (locked && !(low_pfn % SWAP_CLUSTER_MAX)) {
  418. if (should_release_lock(&zone->lru_lock)) {
  419. spin_unlock_irqrestore(&zone->lru_lock, flags);
  420. locked = false;
  421. }
  422. }
  423. /*
  424. * migrate_pfn does not necessarily start aligned to a
  425. * pageblock. Ensure that pfn_valid is called when moving
  426. * into a new MAX_ORDER_NR_PAGES range in case of large
  427. * memory holes within the zone
  428. */
  429. if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
  430. if (!pfn_valid(low_pfn)) {
  431. low_pfn += MAX_ORDER_NR_PAGES - 1;
  432. continue;
  433. }
  434. }
  435. if (!pfn_valid_within(low_pfn))
  436. continue;
  437. nr_scanned++;
  438. /*
  439. * Get the page and ensure the page is within the same zone.
  440. * See the comment in isolate_freepages about overlapping
  441. * nodes. It is deliberate that the new zone lock is not taken
  442. * as memory compaction should not move pages between nodes.
  443. */
  444. page = pfn_to_page(low_pfn);
  445. if (page_zone(page) != zone)
  446. continue;
  447. if (!valid_page)
  448. valid_page = page;
  449. /* If isolation recently failed, do not retry */
  450. pageblock_nr = low_pfn >> pageblock_order;
  451. if (last_pageblock_nr != pageblock_nr) {
  452. int mt;
  453. last_pageblock_nr = pageblock_nr;
  454. if (!isolation_suitable(cc, page))
  455. goto next_pageblock;
  456. /*
  457. * For async migration, also only scan in MOVABLE
  458. * blocks. Async migration is optimistic to see if
  459. * the minimum amount of work satisfies the allocation
  460. */
  461. mt = get_pageblock_migratetype(page);
  462. if (!cc->sync && !migrate_async_suitable(mt)) {
  463. cc->finished_update_migrate = true;
  464. skipped_async_unsuitable = true;
  465. goto next_pageblock;
  466. }
  467. }
  468. /*
  469. * Skip if free. page_order cannot be used without zone->lock
  470. * as nothing prevents parallel allocations or buddy merging.
  471. */
  472. if (PageBuddy(page))
  473. continue;
  474. /*
  475. * Check may be lockless but that's ok as we recheck later.
  476. * It's possible to migrate LRU pages and balloon pages
  477. * Skip any other type of page
  478. */
  479. if (!PageLRU(page)) {
  480. if (unlikely(balloon_page_movable(page))) {
  481. if (locked && balloon_page_isolate(page)) {
  482. /* Successfully isolated */
  483. goto isolate_success;
  484. }
  485. }
  486. continue;
  487. }
  488. /*
  489. * PageLRU is set. lru_lock normally excludes isolation
  490. * splitting and collapsing (collapsing has already happened
  491. * if PageLRU is set) but the lock is not necessarily taken
  492. * here and it is wasteful to take it just to check transhuge.
  493. * Check TransHuge without lock and skip the whole pageblock if
  494. * it's either a transhuge or hugetlbfs page, as calling
  495. * compound_order() without preventing THP from splitting the
  496. * page underneath us may return surprising results.
  497. */
  498. if (PageTransHuge(page)) {
  499. if (!locked)
  500. goto next_pageblock;
  501. low_pfn += (1 << compound_order(page)) - 1;
  502. continue;
  503. }
  504. /*
  505. * Migration will fail if an anonymous page is pinned in memory,
  506. * so avoid taking lru_lock and isolating it unnecessarily in an
  507. * admittedly racy check.
  508. */
  509. if (!page_mapping(page) &&
  510. page_count(page) > page_mapcount(page))
  511. continue;
  512. /* Check if it is ok to still hold the lock */
  513. locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
  514. locked, cc);
  515. if (!locked || fatal_signal_pending(current))
  516. break;
  517. /* Recheck PageLRU and PageTransHuge under lock */
  518. if (!PageLRU(page))
  519. continue;
  520. if (PageTransHuge(page)) {
  521. low_pfn += (1 << compound_order(page)) - 1;
  522. continue;
  523. }
  524. lruvec = mem_cgroup_page_lruvec(page, zone);
  525. /* Try isolate the page */
  526. if (__isolate_lru_page(page, mode) != 0)
  527. continue;
  528. VM_BUG_ON_PAGE(PageTransCompound(page), page);
  529. /* Successfully isolated */
  530. del_page_from_lru_list(page, lruvec, page_lru(page));
  531. isolate_success:
  532. cc->finished_update_migrate = true;
  533. list_add(&page->lru, migratelist);
  534. cc->nr_migratepages++;
  535. nr_isolated++;
  536. /* Avoid isolating too much */
  537. if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
  538. ++low_pfn;
  539. break;
  540. }
  541. continue;
  542. next_pageblock:
  543. low_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages) - 1;
  544. }
  545. acct_isolated(zone, locked, cc);
  546. if (locked)
  547. spin_unlock_irqrestore(&zone->lru_lock, flags);
  548. /*
  549. * Update the pageblock-skip information and cached scanner pfn,
  550. * if the whole pageblock was scanned without isolating any page.
  551. * This is not done when pageblock was skipped due to being unsuitable
  552. * for async compaction, so that eventual sync compaction can try.
  553. */
  554. if (low_pfn == end_pfn && !skipped_async_unsuitable)
  555. update_pageblock_skip(cc, valid_page, nr_isolated, true);
  556. trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
  557. count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
  558. if (nr_isolated)
  559. count_compact_events(COMPACTISOLATED, nr_isolated);
  560. return low_pfn;
  561. }
  562. #endif /* CONFIG_COMPACTION || CONFIG_CMA */
  563. #ifdef CONFIG_COMPACTION
  564. /*
  565. * Based on information in the current compact_control, find blocks
  566. * suitable for isolating free pages from and then isolate them.
  567. */
  568. static void isolate_freepages(struct zone *zone,
  569. struct compact_control *cc)
  570. {
  571. struct page *page;
  572. unsigned long high_pfn, low_pfn, pfn, z_end_pfn, end_pfn;
  573. int nr_freepages = cc->nr_freepages;
  574. struct list_head *freelist = &cc->freepages;
  575. /*
  576. * Initialise the free scanner. The starting point is where we last
  577. * scanned from (or the end of the zone if starting). The low point
  578. * is the end of the pageblock the migration scanner is using.
  579. */
  580. pfn = cc->free_pfn;
  581. low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
  582. /*
  583. * Take care that if the migration scanner is at the end of the zone
  584. * that the free scanner does not accidentally move to the next zone
  585. * in the next isolation cycle.
  586. */
  587. high_pfn = min(low_pfn, pfn);
  588. z_end_pfn = zone_end_pfn(zone);
  589. /*
  590. * Isolate free pages until enough are available to migrate the
  591. * pages on cc->migratepages. We stop searching if the migrate
  592. * and free page scanners meet or enough free pages are isolated.
  593. */
  594. for (; pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
  595. pfn -= pageblock_nr_pages) {
  596. unsigned long isolated;
  597. /*
  598. * This can iterate a massively long zone without finding any
  599. * suitable migration targets, so periodically check if we need
  600. * to schedule.
  601. */
  602. cond_resched();
  603. if (!pfn_valid(pfn))
  604. continue;
  605. /*
  606. * Check for overlapping nodes/zones. It's possible on some
  607. * configurations to have a setup like
  608. * node0 node1 node0
  609. * i.e. it's possible that all pages within a zones range of
  610. * pages do not belong to a single zone.
  611. */
  612. page = pfn_to_page(pfn);
  613. if (page_zone(page) != zone)
  614. continue;
  615. /* Check the block is suitable for migration */
  616. if (!suitable_migration_target(page))
  617. continue;
  618. /* If isolation recently failed, do not retry */
  619. if (!isolation_suitable(cc, page))
  620. continue;
  621. /* Found a block suitable for isolating free pages from */
  622. isolated = 0;
  623. /*
  624. * As pfn may not start aligned, pfn+pageblock_nr_page
  625. * may cross a MAX_ORDER_NR_PAGES boundary and miss
  626. * a pfn_valid check. Ensure isolate_freepages_block()
  627. * only scans within a pageblock
  628. */
  629. end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
  630. end_pfn = min(end_pfn, z_end_pfn);
  631. isolated = isolate_freepages_block(cc, pfn, end_pfn,
  632. freelist, false);
  633. nr_freepages += isolated;
  634. /*
  635. * Record the highest PFN we isolated pages from. When next
  636. * looking for free pages, the search will restart here as
  637. * page migration may have returned some pages to the allocator
  638. */
  639. if (isolated) {
  640. cc->finished_update_free = true;
  641. high_pfn = max(high_pfn, pfn);
  642. }
  643. }
  644. /* split_free_page does not map the pages */
  645. map_pages(freelist);
  646. /*
  647. * If we crossed the migrate scanner, we want to keep it that way
  648. * so that compact_finished() may detect this
  649. */
  650. if (pfn < low_pfn)
  651. cc->free_pfn = max(pfn, zone->zone_start_pfn);
  652. else
  653. cc->free_pfn = high_pfn;
  654. cc->nr_freepages = nr_freepages;
  655. }
  656. /*
  657. * This is a migrate-callback that "allocates" freepages by taking pages
  658. * from the isolated freelists in the block we are migrating to.
  659. */
  660. static struct page *compaction_alloc(struct page *migratepage,
  661. unsigned long data,
  662. int **result)
  663. {
  664. struct compact_control *cc = (struct compact_control *)data;
  665. struct page *freepage;
  666. /* Isolate free pages if necessary */
  667. if (list_empty(&cc->freepages)) {
  668. isolate_freepages(cc->zone, cc);
  669. if (list_empty(&cc->freepages))
  670. return NULL;
  671. }
  672. freepage = list_entry(cc->freepages.next, struct page, lru);
  673. list_del(&freepage->lru);
  674. cc->nr_freepages--;
  675. return freepage;
  676. }
  677. /*
  678. * We cannot control nr_migratepages and nr_freepages fully when migration is
  679. * running as migrate_pages() has no knowledge of compact_control. When
  680. * migration is complete, we count the number of pages on the lists by hand.
  681. */
  682. static void update_nr_listpages(struct compact_control *cc)
  683. {
  684. int nr_migratepages = 0;
  685. int nr_freepages = 0;
  686. struct page *page;
  687. list_for_each_entry(page, &cc->migratepages, lru)
  688. nr_migratepages++;
  689. list_for_each_entry(page, &cc->freepages, lru)
  690. nr_freepages++;
  691. cc->nr_migratepages = nr_migratepages;
  692. cc->nr_freepages = nr_freepages;
  693. }
  694. /* possible outcome of isolate_migratepages */
  695. typedef enum {
  696. ISOLATE_ABORT, /* Abort compaction now */
  697. ISOLATE_NONE, /* No pages isolated, continue scanning */
  698. ISOLATE_SUCCESS, /* Pages isolated, migrate */
  699. } isolate_migrate_t;
  700. /*
  701. * Isolate all pages that can be migrated from the block pointed to by
  702. * the migrate scanner within compact_control.
  703. */
  704. static isolate_migrate_t isolate_migratepages(struct zone *zone,
  705. struct compact_control *cc)
  706. {
  707. unsigned long low_pfn, end_pfn;
  708. /* Do not scan outside zone boundaries */
  709. low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
  710. /* Only scan within a pageblock boundary */
  711. end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
  712. /* Do not cross the free scanner or scan within a memory hole */
  713. if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
  714. cc->migrate_pfn = end_pfn;
  715. return ISOLATE_NONE;
  716. }
  717. /* Perform the isolation */
  718. low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
  719. if (!low_pfn || cc->contended)
  720. return ISOLATE_ABORT;
  721. cc->migrate_pfn = low_pfn;
  722. return ISOLATE_SUCCESS;
  723. }
  724. static int compact_finished(struct zone *zone,
  725. struct compact_control *cc)
  726. {
  727. unsigned int order;
  728. unsigned long watermark;
  729. if (fatal_signal_pending(current))
  730. return COMPACT_PARTIAL;
  731. /* Compaction run completes if the migrate and free scanner meet */
  732. if (cc->free_pfn <= cc->migrate_pfn) {
  733. /* Let the next compaction start anew. */
  734. zone->compact_cached_migrate_pfn = zone->zone_start_pfn;
  735. zone->compact_cached_free_pfn = zone_end_pfn(zone);
  736. /*
  737. * Mark that the PG_migrate_skip information should be cleared
  738. * by kswapd when it goes to sleep. kswapd does not set the
  739. * flag itself as the decision to be clear should be directly
  740. * based on an allocation request.
  741. */
  742. if (!current_is_kswapd())
  743. zone->compact_blockskip_flush = true;
  744. return COMPACT_COMPLETE;
  745. }
  746. /*
  747. * order == -1 is expected when compacting via
  748. * /proc/sys/vm/compact_memory
  749. */
  750. if (cc->order == -1)
  751. return COMPACT_CONTINUE;
  752. /* Compaction run is not finished if the watermark is not met */
  753. watermark = low_wmark_pages(zone);
  754. watermark += (1 << cc->order);
  755. if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
  756. return COMPACT_CONTINUE;
  757. /* Direct compactor: Is a suitable page free? */
  758. for (order = cc->order; order < MAX_ORDER; order++) {
  759. struct free_area *area = &zone->free_area[order];
  760. /* Job done if page is free of the right migratetype */
  761. if (!list_empty(&area->free_list[cc->migratetype]))
  762. return COMPACT_PARTIAL;
  763. /* Job done if allocation would set block type */
  764. if (cc->order >= pageblock_order && area->nr_free)
  765. return COMPACT_PARTIAL;
  766. }
  767. return COMPACT_CONTINUE;
  768. }
  769. /*
  770. * compaction_suitable: Is this suitable to run compaction on this zone now?
  771. * Returns
  772. * COMPACT_SKIPPED - If there are too few free pages for compaction
  773. * COMPACT_PARTIAL - If the allocation would succeed without compaction
  774. * COMPACT_CONTINUE - If compaction should run now
  775. */
  776. unsigned long compaction_suitable(struct zone *zone, int order)
  777. {
  778. int fragindex;
  779. unsigned long watermark;
  780. /*
  781. * order == -1 is expected when compacting via
  782. * /proc/sys/vm/compact_memory
  783. */
  784. if (order == -1)
  785. return COMPACT_CONTINUE;
  786. /*
  787. * Watermarks for order-0 must be met for compaction. Note the 2UL.
  788. * This is because during migration, copies of pages need to be
  789. * allocated and for a short time, the footprint is higher
  790. */
  791. watermark = low_wmark_pages(zone) + (2UL << order);
  792. if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
  793. return COMPACT_SKIPPED;
  794. /*
  795. * fragmentation index determines if allocation failures are due to
  796. * low memory or external fragmentation
  797. *
  798. * index of -1000 implies allocations might succeed depending on
  799. * watermarks
  800. * index towards 0 implies failure is due to lack of memory
  801. * index towards 1000 implies failure is due to fragmentation
  802. *
  803. * Only compact if a failure would be due to fragmentation.
  804. */
  805. fragindex = fragmentation_index(zone, order);
  806. if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
  807. return COMPACT_SKIPPED;
  808. if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark,
  809. 0, 0))
  810. return COMPACT_PARTIAL;
  811. return COMPACT_CONTINUE;
  812. }
  813. static int compact_zone(struct zone *zone, struct compact_control *cc)
  814. {
  815. int ret;
  816. unsigned long start_pfn = zone->zone_start_pfn;
  817. unsigned long end_pfn = zone_end_pfn(zone);
  818. ret = compaction_suitable(zone, cc->order);
  819. switch (ret) {
  820. case COMPACT_PARTIAL:
  821. case COMPACT_SKIPPED:
  822. /* Compaction is likely to fail */
  823. return ret;
  824. case COMPACT_CONTINUE:
  825. /* Fall through to compaction */
  826. ;
  827. }
  828. /*
  829. * Clear pageblock skip if there were failures recently and compaction
  830. * is about to be retried after being deferred. kswapd does not do
  831. * this reset as it'll reset the cached information when going to sleep.
  832. */
  833. if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
  834. __reset_isolation_suitable(zone);
  835. /*
  836. * Setup to move all movable pages to the end of the zone. Used cached
  837. * information on where the scanners should start but check that it
  838. * is initialised by ensuring the values are within zone boundaries.
  839. */
  840. cc->migrate_pfn = zone->compact_cached_migrate_pfn;
  841. cc->free_pfn = zone->compact_cached_free_pfn;
  842. if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
  843. cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
  844. zone->compact_cached_free_pfn = cc->free_pfn;
  845. }
  846. if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
  847. cc->migrate_pfn = start_pfn;
  848. zone->compact_cached_migrate_pfn = cc->migrate_pfn;
  849. }
  850. trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn);
  851. migrate_prep_local();
  852. while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
  853. unsigned long nr_migrate, nr_remaining;
  854. int err;
  855. switch (isolate_migratepages(zone, cc)) {
  856. case ISOLATE_ABORT:
  857. ret = COMPACT_PARTIAL;
  858. putback_movable_pages(&cc->migratepages);
  859. cc->nr_migratepages = 0;
  860. goto out;
  861. case ISOLATE_NONE:
  862. continue;
  863. case ISOLATE_SUCCESS:
  864. ;
  865. }
  866. nr_migrate = cc->nr_migratepages;
  867. err = migrate_pages(&cc->migratepages, compaction_alloc,
  868. (unsigned long)cc,
  869. cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC,
  870. MR_COMPACTION);
  871. update_nr_listpages(cc);
  872. nr_remaining = cc->nr_migratepages;
  873. trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
  874. nr_remaining);
  875. /* Release isolated pages not migrated */
  876. if (err) {
  877. putback_movable_pages(&cc->migratepages);
  878. cc->nr_migratepages = 0;
  879. /*
  880. * migrate_pages() may return -ENOMEM when scanners meet
  881. * and we want compact_finished() to detect it
  882. */
  883. if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) {
  884. ret = COMPACT_PARTIAL;
  885. goto out;
  886. }
  887. }
  888. }
  889. out:
  890. /* Release free pages and check accounting */
  891. cc->nr_freepages -= release_freepages(&cc->freepages);
  892. VM_BUG_ON(cc->nr_freepages != 0);
  893. trace_mm_compaction_end(ret);
  894. return ret;
  895. }
  896. static unsigned long compact_zone_order(struct zone *zone,
  897. int order, gfp_t gfp_mask,
  898. bool sync, bool *contended)
  899. {
  900. unsigned long ret;
  901. struct compact_control cc = {
  902. .nr_freepages = 0,
  903. .nr_migratepages = 0,
  904. .order = order,
  905. .migratetype = allocflags_to_migratetype(gfp_mask),
  906. .zone = zone,
  907. .sync = sync,
  908. };
  909. INIT_LIST_HEAD(&cc.freepages);
  910. INIT_LIST_HEAD(&cc.migratepages);
  911. ret = compact_zone(zone, &cc);
  912. VM_BUG_ON(!list_empty(&cc.freepages));
  913. VM_BUG_ON(!list_empty(&cc.migratepages));
  914. *contended = cc.contended;
  915. return ret;
  916. }
  917. int sysctl_extfrag_threshold = 500;
  918. /**
  919. * try_to_compact_pages - Direct compact to satisfy a high-order allocation
  920. * @zonelist: The zonelist used for the current allocation
  921. * @order: The order of the current allocation
  922. * @gfp_mask: The GFP mask of the current allocation
  923. * @nodemask: The allowed nodes to allocate from
  924. * @sync: Whether migration is synchronous or not
  925. * @contended: Return value that is true if compaction was aborted due to lock contention
  926. * @page: Optionally capture a free page of the requested order during compaction
  927. *
  928. * This is the main entry point for direct page compaction.
  929. */
  930. unsigned long try_to_compact_pages(struct zonelist *zonelist,
  931. int order, gfp_t gfp_mask, nodemask_t *nodemask,
  932. bool sync, bool *contended)
  933. {
  934. enum zone_type high_zoneidx = gfp_zone(gfp_mask);
  935. int may_enter_fs = gfp_mask & __GFP_FS;
  936. int may_perform_io = gfp_mask & __GFP_IO;
  937. struct zoneref *z;
  938. struct zone *zone;
  939. int rc = COMPACT_SKIPPED;
  940. int alloc_flags = 0;
  941. /* Check if the GFP flags allow compaction */
  942. if (!order || !may_enter_fs || !may_perform_io)
  943. return rc;
  944. count_compact_event(COMPACTSTALL);
  945. #ifdef CONFIG_CMA
  946. if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
  947. alloc_flags |= ALLOC_CMA;
  948. #endif
  949. /* Compact each zone in the list */
  950. for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
  951. nodemask) {
  952. int status;
  953. status = compact_zone_order(zone, order, gfp_mask, sync,
  954. contended);
  955. rc = max(status, rc);
  956. /* If a normal allocation would succeed, stop compacting */
  957. if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0,
  958. alloc_flags))
  959. break;
  960. }
  961. return rc;
  962. }
  963. /* Compact all zones within a node */
  964. static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
  965. {
  966. int zoneid;
  967. struct zone *zone;
  968. for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
  969. zone = &pgdat->node_zones[zoneid];
  970. if (!populated_zone(zone))
  971. continue;
  972. cc->nr_freepages = 0;
  973. cc->nr_migratepages = 0;
  974. cc->zone = zone;
  975. INIT_LIST_HEAD(&cc->freepages);
  976. INIT_LIST_HEAD(&cc->migratepages);
  977. if (cc->order == -1 || !compaction_deferred(zone, cc->order))
  978. compact_zone(zone, cc);
  979. if (cc->order > 0) {
  980. if (zone_watermark_ok(zone, cc->order,
  981. low_wmark_pages(zone), 0, 0))
  982. compaction_defer_reset(zone, cc->order, false);
  983. /* Currently async compaction is never deferred. */
  984. else if (cc->sync)
  985. defer_compaction(zone, cc->order);
  986. }
  987. VM_BUG_ON(!list_empty(&cc->freepages));
  988. VM_BUG_ON(!list_empty(&cc->migratepages));
  989. }
  990. }
  991. void compact_pgdat(pg_data_t *pgdat, int order)
  992. {
  993. struct compact_control cc = {
  994. .order = order,
  995. .sync = false,
  996. };
  997. if (!order)
  998. return;
  999. __compact_pgdat(pgdat, &cc);
  1000. }
  1001. static void compact_node(int nid)
  1002. {
  1003. struct compact_control cc = {
  1004. .order = -1,
  1005. .sync = true,
  1006. .ignore_skip_hint = true,
  1007. };
  1008. __compact_pgdat(NODE_DATA(nid), &cc);
  1009. }
  1010. /* Compact all nodes in the system */
  1011. static void compact_nodes(void)
  1012. {
  1013. int nid;
  1014. /* Flush pending updates to the LRU lists */
  1015. lru_add_drain_all();
  1016. for_each_online_node(nid)
  1017. compact_node(nid);
  1018. }
  1019. /* The written value is actually unused, all memory is compacted */
  1020. int sysctl_compact_memory;
  1021. /* This is the entry point for compacting all nodes via /proc/sys/vm */
  1022. int sysctl_compaction_handler(struct ctl_table *table, int write,
  1023. void __user *buffer, size_t *length, loff_t *ppos)
  1024. {
  1025. if (write)
  1026. compact_nodes();
  1027. return 0;
  1028. }
  1029. int sysctl_extfrag_handler(struct ctl_table *table, int write,
  1030. void __user *buffer, size_t *length, loff_t *ppos)
  1031. {
  1032. proc_dointvec_minmax(table, write, buffer, length, ppos);
  1033. return 0;
  1034. }
  1035. #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
  1036. static ssize_t sysfs_compact_node(struct device *dev,
  1037. struct device_attribute *attr,
  1038. const char *buf, size_t count)
  1039. {
  1040. int nid = dev->id;
  1041. if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
  1042. /* Flush pending updates to the LRU lists */
  1043. lru_add_drain_all();
  1044. compact_node(nid);
  1045. }
  1046. return count;
  1047. }
  1048. static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
  1049. int compaction_register_node(struct node *node)
  1050. {
  1051. return device_create_file(&node->dev, &dev_attr_compact);
  1052. }
  1053. void compaction_unregister_node(struct node *node)
  1054. {
  1055. return device_remove_file(&node->dev, &dev_attr_compact);
  1056. }
  1057. #endif /* CONFIG_SYSFS && CONFIG_NUMA */
  1058. #endif /* CONFIG_COMPACTION */