memory_hotplug.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898
  1. /*
  2. * linux/mm/memory_hotplug.c
  3. *
  4. * Copyright (C)
  5. */
  6. #include <linux/stddef.h>
  7. #include <linux/mm.h>
  8. #include <linux/sched/signal.h>
  9. #include <linux/swap.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/pagemap.h>
  12. #include <linux/compiler.h>
  13. #include <linux/export.h>
  14. #include <linux/pagevec.h>
  15. #include <linux/writeback.h>
  16. #include <linux/slab.h>
  17. #include <linux/sysctl.h>
  18. #include <linux/cpu.h>
  19. #include <linux/memory.h>
  20. #include <linux/memremap.h>
  21. #include <linux/memory_hotplug.h>
  22. #include <linux/highmem.h>
  23. #include <linux/vmalloc.h>
  24. #include <linux/ioport.h>
  25. #include <linux/delay.h>
  26. #include <linux/migrate.h>
  27. #include <linux/page-isolation.h>
  28. #include <linux/pfn.h>
  29. #include <linux/suspend.h>
  30. #include <linux/mm_inline.h>
  31. #include <linux/firmware-map.h>
  32. #include <linux/stop_machine.h>
  33. #include <linux/hugetlb.h>
  34. #include <linux/memblock.h>
  35. #include <linux/bootmem.h>
  36. #include <linux/compaction.h>
  37. #include <asm/tlbflush.h>
  38. #include "internal.h"
  39. /*
  40. * online_page_callback contains pointer to current page onlining function.
  41. * Initially it is generic_online_page(). If it is required it could be
  42. * changed by calling set_online_page_callback() for callback registration
  43. * and restore_online_page_callback() for generic callback restore.
  44. */
  45. static void generic_online_page(struct page *page);
  46. static online_page_callback_t online_page_callback = generic_online_page;
  47. static DEFINE_MUTEX(online_page_callback_lock);
  48. DEFINE_STATIC_PERCPU_RWSEM(mem_hotplug_lock);
  49. void get_online_mems(void)
  50. {
  51. percpu_down_read(&mem_hotplug_lock);
  52. }
  53. void put_online_mems(void)
  54. {
  55. percpu_up_read(&mem_hotplug_lock);
  56. }
  57. bool movable_node_enabled = false;
  58. #ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE
  59. bool memhp_auto_online;
  60. #else
  61. bool memhp_auto_online = true;
  62. #endif
  63. EXPORT_SYMBOL_GPL(memhp_auto_online);
  64. static int __init setup_memhp_default_state(char *str)
  65. {
  66. if (!strcmp(str, "online"))
  67. memhp_auto_online = true;
  68. else if (!strcmp(str, "offline"))
  69. memhp_auto_online = false;
  70. return 1;
  71. }
  72. __setup("memhp_default_state=", setup_memhp_default_state);
  73. void mem_hotplug_begin(void)
  74. {
  75. cpus_read_lock();
  76. percpu_down_write(&mem_hotplug_lock);
  77. }
  78. void mem_hotplug_done(void)
  79. {
  80. percpu_up_write(&mem_hotplug_lock);
  81. cpus_read_unlock();
  82. }
  83. /* add this memory to iomem resource */
  84. static struct resource *register_memory_resource(u64 start, u64 size)
  85. {
  86. struct resource *res, *conflict;
  87. res = kzalloc(sizeof(struct resource), GFP_KERNEL);
  88. if (!res)
  89. return ERR_PTR(-ENOMEM);
  90. res->name = "System RAM";
  91. res->start = start;
  92. res->end = start + size - 1;
  93. res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
  94. conflict = request_resource_conflict(&iomem_resource, res);
  95. if (conflict) {
  96. if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) {
  97. pr_debug("Device unaddressable memory block "
  98. "memory hotplug at %#010llx !\n",
  99. (unsigned long long)start);
  100. }
  101. pr_debug("System RAM resource %pR cannot be added\n", res);
  102. kfree(res);
  103. return ERR_PTR(-EEXIST);
  104. }
  105. return res;
  106. }
  107. static void release_memory_resource(struct resource *res)
  108. {
  109. if (!res)
  110. return;
  111. release_resource(res);
  112. kfree(res);
  113. return;
  114. }
  115. #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
  116. void get_page_bootmem(unsigned long info, struct page *page,
  117. unsigned long type)
  118. {
  119. page->freelist = (void *)type;
  120. SetPagePrivate(page);
  121. set_page_private(page, info);
  122. page_ref_inc(page);
  123. }
  124. void put_page_bootmem(struct page *page)
  125. {
  126. unsigned long type;
  127. type = (unsigned long) page->freelist;
  128. BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
  129. type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
  130. if (page_ref_dec_return(page) == 1) {
  131. page->freelist = NULL;
  132. ClearPagePrivate(page);
  133. set_page_private(page, 0);
  134. INIT_LIST_HEAD(&page->lru);
  135. free_reserved_page(page);
  136. }
  137. }
  138. #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
  139. #ifndef CONFIG_SPARSEMEM_VMEMMAP
  140. static void register_page_bootmem_info_section(unsigned long start_pfn)
  141. {
  142. unsigned long *usemap, mapsize, section_nr, i;
  143. struct mem_section *ms;
  144. struct page *page, *memmap;
  145. section_nr = pfn_to_section_nr(start_pfn);
  146. ms = __nr_to_section(section_nr);
  147. /* Get section's memmap address */
  148. memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
  149. /*
  150. * Get page for the memmap's phys address
  151. * XXX: need more consideration for sparse_vmemmap...
  152. */
  153. page = virt_to_page(memmap);
  154. mapsize = sizeof(struct page) * PAGES_PER_SECTION;
  155. mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
  156. /* remember memmap's page */
  157. for (i = 0; i < mapsize; i++, page++)
  158. get_page_bootmem(section_nr, page, SECTION_INFO);
  159. usemap = ms->pageblock_flags;
  160. page = virt_to_page(usemap);
  161. mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
  162. for (i = 0; i < mapsize; i++, page++)
  163. get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
  164. }
  165. #else /* CONFIG_SPARSEMEM_VMEMMAP */
  166. static void register_page_bootmem_info_section(unsigned long start_pfn)
  167. {
  168. unsigned long *usemap, mapsize, section_nr, i;
  169. struct mem_section *ms;
  170. struct page *page, *memmap;
  171. section_nr = pfn_to_section_nr(start_pfn);
  172. ms = __nr_to_section(section_nr);
  173. memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
  174. register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
  175. usemap = ms->pageblock_flags;
  176. page = virt_to_page(usemap);
  177. mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
  178. for (i = 0; i < mapsize; i++, page++)
  179. get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
  180. }
  181. #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
  182. void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
  183. {
  184. unsigned long i, pfn, end_pfn, nr_pages;
  185. int node = pgdat->node_id;
  186. struct page *page;
  187. nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
  188. page = virt_to_page(pgdat);
  189. for (i = 0; i < nr_pages; i++, page++)
  190. get_page_bootmem(node, page, NODE_INFO);
  191. pfn = pgdat->node_start_pfn;
  192. end_pfn = pgdat_end_pfn(pgdat);
  193. /* register section info */
  194. for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
  195. /*
  196. * Some platforms can assign the same pfn to multiple nodes - on
  197. * node0 as well as nodeN. To avoid registering a pfn against
  198. * multiple nodes we check that this pfn does not already
  199. * reside in some other nodes.
  200. */
  201. if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
  202. register_page_bootmem_info_section(pfn);
  203. }
  204. }
  205. #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
  206. static int __meminit __add_section(int nid, unsigned long phys_start_pfn,
  207. bool want_memblock)
  208. {
  209. int ret;
  210. int i;
  211. if (pfn_valid(phys_start_pfn))
  212. return -EEXIST;
  213. ret = sparse_add_one_section(NODE_DATA(nid), phys_start_pfn);
  214. if (ret < 0)
  215. return ret;
  216. /*
  217. * Make all the pages reserved so that nobody will stumble over half
  218. * initialized state.
  219. * FIXME: We also have to associate it with a node because page_to_nid
  220. * relies on having page with the proper node.
  221. */
  222. for (i = 0; i < PAGES_PER_SECTION; i++) {
  223. unsigned long pfn = phys_start_pfn + i;
  224. struct page *page;
  225. if (!pfn_valid(pfn))
  226. continue;
  227. page = pfn_to_page(pfn);
  228. set_page_node(page, nid);
  229. SetPageReserved(page);
  230. }
  231. if (!want_memblock)
  232. return 0;
  233. return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
  234. }
  235. /*
  236. * Reasonably generic function for adding memory. It is
  237. * expected that archs that support memory hotplug will
  238. * call this function after deciding the zone to which to
  239. * add the new pages.
  240. */
  241. int __ref __add_pages(int nid, unsigned long phys_start_pfn,
  242. unsigned long nr_pages, bool want_memblock)
  243. {
  244. unsigned long i;
  245. int err = 0;
  246. int start_sec, end_sec;
  247. struct vmem_altmap *altmap;
  248. /* during initialize mem_map, align hot-added range to section */
  249. start_sec = pfn_to_section_nr(phys_start_pfn);
  250. end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
  251. altmap = to_vmem_altmap((unsigned long) pfn_to_page(phys_start_pfn));
  252. if (altmap) {
  253. /*
  254. * Validate altmap is within bounds of the total request
  255. */
  256. if (altmap->base_pfn != phys_start_pfn
  257. || vmem_altmap_offset(altmap) > nr_pages) {
  258. pr_warn_once("memory add fail, invalid altmap\n");
  259. err = -EINVAL;
  260. goto out;
  261. }
  262. altmap->alloc = 0;
  263. }
  264. for (i = start_sec; i <= end_sec; i++) {
  265. err = __add_section(nid, section_nr_to_pfn(i), want_memblock);
  266. /*
  267. * EEXIST is finally dealt with by ioresource collision
  268. * check. see add_memory() => register_memory_resource()
  269. * Warning will be printed if there is collision.
  270. */
  271. if (err && (err != -EEXIST))
  272. break;
  273. err = 0;
  274. cond_resched();
  275. }
  276. vmemmap_populate_print_last();
  277. out:
  278. return err;
  279. }
  280. EXPORT_SYMBOL_GPL(__add_pages);
  281. #ifdef CONFIG_MEMORY_HOTREMOVE
  282. /* find the smallest valid pfn in the range [start_pfn, end_pfn) */
  283. static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
  284. unsigned long start_pfn,
  285. unsigned long end_pfn)
  286. {
  287. struct mem_section *ms;
  288. for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) {
  289. ms = __pfn_to_section(start_pfn);
  290. if (unlikely(!valid_section(ms)))
  291. continue;
  292. if (unlikely(pfn_to_nid(start_pfn) != nid))
  293. continue;
  294. if (zone && zone != page_zone(pfn_to_page(start_pfn)))
  295. continue;
  296. return start_pfn;
  297. }
  298. return 0;
  299. }
  300. /* find the biggest valid pfn in the range [start_pfn, end_pfn). */
  301. static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
  302. unsigned long start_pfn,
  303. unsigned long end_pfn)
  304. {
  305. struct mem_section *ms;
  306. unsigned long pfn;
  307. /* pfn is the end pfn of a memory section. */
  308. pfn = end_pfn - 1;
  309. for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) {
  310. ms = __pfn_to_section(pfn);
  311. if (unlikely(!valid_section(ms)))
  312. continue;
  313. if (unlikely(pfn_to_nid(pfn) != nid))
  314. continue;
  315. if (zone && zone != page_zone(pfn_to_page(pfn)))
  316. continue;
  317. return pfn;
  318. }
  319. return 0;
  320. }
  321. static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
  322. unsigned long end_pfn)
  323. {
  324. unsigned long zone_start_pfn = zone->zone_start_pfn;
  325. unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */
  326. unsigned long zone_end_pfn = z;
  327. unsigned long pfn;
  328. struct mem_section *ms;
  329. int nid = zone_to_nid(zone);
  330. zone_span_writelock(zone);
  331. if (zone_start_pfn == start_pfn) {
  332. /*
  333. * If the section is smallest section in the zone, it need
  334. * shrink zone->zone_start_pfn and zone->zone_spanned_pages.
  335. * In this case, we find second smallest valid mem_section
  336. * for shrinking zone.
  337. */
  338. pfn = find_smallest_section_pfn(nid, zone, end_pfn,
  339. zone_end_pfn);
  340. if (pfn) {
  341. zone->zone_start_pfn = pfn;
  342. zone->spanned_pages = zone_end_pfn - pfn;
  343. }
  344. } else if (zone_end_pfn == end_pfn) {
  345. /*
  346. * If the section is biggest section in the zone, it need
  347. * shrink zone->spanned_pages.
  348. * In this case, we find second biggest valid mem_section for
  349. * shrinking zone.
  350. */
  351. pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn,
  352. start_pfn);
  353. if (pfn)
  354. zone->spanned_pages = pfn - zone_start_pfn + 1;
  355. }
  356. /*
  357. * The section is not biggest or smallest mem_section in the zone, it
  358. * only creates a hole in the zone. So in this case, we need not
  359. * change the zone. But perhaps, the zone has only hole data. Thus
  360. * it check the zone has only hole or not.
  361. */
  362. pfn = zone_start_pfn;
  363. for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) {
  364. ms = __pfn_to_section(pfn);
  365. if (unlikely(!valid_section(ms)))
  366. continue;
  367. if (page_zone(pfn_to_page(pfn)) != zone)
  368. continue;
  369. /* If the section is current section, it continues the loop */
  370. if (start_pfn == pfn)
  371. continue;
  372. /* If we find valid section, we have nothing to do */
  373. zone_span_writeunlock(zone);
  374. return;
  375. }
  376. /* The zone has no valid section */
  377. zone->zone_start_pfn = 0;
  378. zone->spanned_pages = 0;
  379. zone_span_writeunlock(zone);
  380. }
  381. static void shrink_pgdat_span(struct pglist_data *pgdat,
  382. unsigned long start_pfn, unsigned long end_pfn)
  383. {
  384. unsigned long pgdat_start_pfn = pgdat->node_start_pfn;
  385. unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */
  386. unsigned long pgdat_end_pfn = p;
  387. unsigned long pfn;
  388. struct mem_section *ms;
  389. int nid = pgdat->node_id;
  390. if (pgdat_start_pfn == start_pfn) {
  391. /*
  392. * If the section is smallest section in the pgdat, it need
  393. * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages.
  394. * In this case, we find second smallest valid mem_section
  395. * for shrinking zone.
  396. */
  397. pfn = find_smallest_section_pfn(nid, NULL, end_pfn,
  398. pgdat_end_pfn);
  399. if (pfn) {
  400. pgdat->node_start_pfn = pfn;
  401. pgdat->node_spanned_pages = pgdat_end_pfn - pfn;
  402. }
  403. } else if (pgdat_end_pfn == end_pfn) {
  404. /*
  405. * If the section is biggest section in the pgdat, it need
  406. * shrink pgdat->node_spanned_pages.
  407. * In this case, we find second biggest valid mem_section for
  408. * shrinking zone.
  409. */
  410. pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn,
  411. start_pfn);
  412. if (pfn)
  413. pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1;
  414. }
  415. /*
  416. * If the section is not biggest or smallest mem_section in the pgdat,
  417. * it only creates a hole in the pgdat. So in this case, we need not
  418. * change the pgdat.
  419. * But perhaps, the pgdat has only hole data. Thus it check the pgdat
  420. * has only hole or not.
  421. */
  422. pfn = pgdat_start_pfn;
  423. for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) {
  424. ms = __pfn_to_section(pfn);
  425. if (unlikely(!valid_section(ms)))
  426. continue;
  427. if (pfn_to_nid(pfn) != nid)
  428. continue;
  429. /* If the section is current section, it continues the loop */
  430. if (start_pfn == pfn)
  431. continue;
  432. /* If we find valid section, we have nothing to do */
  433. return;
  434. }
  435. /* The pgdat has no valid section */
  436. pgdat->node_start_pfn = 0;
  437. pgdat->node_spanned_pages = 0;
  438. }
  439. static void __remove_zone(struct zone *zone, unsigned long start_pfn)
  440. {
  441. struct pglist_data *pgdat = zone->zone_pgdat;
  442. int nr_pages = PAGES_PER_SECTION;
  443. unsigned long flags;
  444. pgdat_resize_lock(zone->zone_pgdat, &flags);
  445. shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
  446. shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages);
  447. pgdat_resize_unlock(zone->zone_pgdat, &flags);
  448. }
  449. static int __remove_section(struct zone *zone, struct mem_section *ms,
  450. unsigned long map_offset)
  451. {
  452. unsigned long start_pfn;
  453. int scn_nr;
  454. int ret = -EINVAL;
  455. if (!valid_section(ms))
  456. return ret;
  457. ret = unregister_memory_section(ms);
  458. if (ret)
  459. return ret;
  460. scn_nr = __section_nr(ms);
  461. start_pfn = section_nr_to_pfn((unsigned long)scn_nr);
  462. __remove_zone(zone, start_pfn);
  463. sparse_remove_one_section(zone, ms, map_offset);
  464. return 0;
  465. }
  466. /**
  467. * __remove_pages() - remove sections of pages from a zone
  468. * @zone: zone from which pages need to be removed
  469. * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
  470. * @nr_pages: number of pages to remove (must be multiple of section size)
  471. *
  472. * Generic helper function to remove section mappings and sysfs entries
  473. * for the section of the memory we are removing. Caller needs to make
  474. * sure that pages are marked reserved and zones are adjust properly by
  475. * calling offline_pages().
  476. */
  477. int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
  478. unsigned long nr_pages)
  479. {
  480. unsigned long i;
  481. unsigned long map_offset = 0;
  482. int sections_to_remove, ret = 0;
  483. /* In the ZONE_DEVICE case device driver owns the memory region */
  484. if (is_dev_zone(zone)) {
  485. struct page *page = pfn_to_page(phys_start_pfn);
  486. struct vmem_altmap *altmap;
  487. altmap = to_vmem_altmap((unsigned long) page);
  488. if (altmap)
  489. map_offset = vmem_altmap_offset(altmap);
  490. } else {
  491. resource_size_t start, size;
  492. start = phys_start_pfn << PAGE_SHIFT;
  493. size = nr_pages * PAGE_SIZE;
  494. ret = release_mem_region_adjustable(&iomem_resource, start,
  495. size);
  496. if (ret) {
  497. resource_size_t endres = start + size - 1;
  498. pr_warn("Unable to release resource <%pa-%pa> (%d)\n",
  499. &start, &endres, ret);
  500. }
  501. }
  502. clear_zone_contiguous(zone);
  503. /*
  504. * We can only remove entire sections
  505. */
  506. BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
  507. BUG_ON(nr_pages % PAGES_PER_SECTION);
  508. sections_to_remove = nr_pages / PAGES_PER_SECTION;
  509. for (i = 0; i < sections_to_remove; i++) {
  510. unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
  511. ret = __remove_section(zone, __pfn_to_section(pfn), map_offset);
  512. map_offset = 0;
  513. if (ret)
  514. break;
  515. }
  516. set_zone_contiguous(zone);
  517. return ret;
  518. }
  519. #endif /* CONFIG_MEMORY_HOTREMOVE */
  520. int set_online_page_callback(online_page_callback_t callback)
  521. {
  522. int rc = -EINVAL;
  523. get_online_mems();
  524. mutex_lock(&online_page_callback_lock);
  525. if (online_page_callback == generic_online_page) {
  526. online_page_callback = callback;
  527. rc = 0;
  528. }
  529. mutex_unlock(&online_page_callback_lock);
  530. put_online_mems();
  531. return rc;
  532. }
  533. EXPORT_SYMBOL_GPL(set_online_page_callback);
  534. int restore_online_page_callback(online_page_callback_t callback)
  535. {
  536. int rc = -EINVAL;
  537. get_online_mems();
  538. mutex_lock(&online_page_callback_lock);
  539. if (online_page_callback == callback) {
  540. online_page_callback = generic_online_page;
  541. rc = 0;
  542. }
  543. mutex_unlock(&online_page_callback_lock);
  544. put_online_mems();
  545. return rc;
  546. }
  547. EXPORT_SYMBOL_GPL(restore_online_page_callback);
  548. void __online_page_set_limits(struct page *page)
  549. {
  550. }
  551. EXPORT_SYMBOL_GPL(__online_page_set_limits);
  552. void __online_page_increment_counters(struct page *page)
  553. {
  554. adjust_managed_page_count(page, 1);
  555. }
  556. EXPORT_SYMBOL_GPL(__online_page_increment_counters);
  557. void __online_page_free(struct page *page)
  558. {
  559. __free_reserved_page(page);
  560. }
  561. EXPORT_SYMBOL_GPL(__online_page_free);
  562. static void generic_online_page(struct page *page)
  563. {
  564. __online_page_set_limits(page);
  565. __online_page_increment_counters(page);
  566. __online_page_free(page);
  567. }
  568. static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
  569. void *arg)
  570. {
  571. unsigned long i;
  572. unsigned long onlined_pages = *(unsigned long *)arg;
  573. struct page *page;
  574. if (PageReserved(pfn_to_page(start_pfn)))
  575. for (i = 0; i < nr_pages; i++) {
  576. page = pfn_to_page(start_pfn + i);
  577. (*online_page_callback)(page);
  578. onlined_pages++;
  579. }
  580. online_mem_sections(start_pfn, start_pfn + nr_pages);
  581. *(unsigned long *)arg = onlined_pages;
  582. return 0;
  583. }
  584. /* check which state of node_states will be changed when online memory */
  585. static void node_states_check_changes_online(unsigned long nr_pages,
  586. struct zone *zone, struct memory_notify *arg)
  587. {
  588. int nid = zone_to_nid(zone);
  589. enum zone_type zone_last = ZONE_NORMAL;
  590. /*
  591. * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
  592. * contains nodes which have zones of 0...ZONE_NORMAL,
  593. * set zone_last to ZONE_NORMAL.
  594. *
  595. * If we don't have HIGHMEM nor movable node,
  596. * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
  597. * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
  598. */
  599. if (N_MEMORY == N_NORMAL_MEMORY)
  600. zone_last = ZONE_MOVABLE;
  601. /*
  602. * if the memory to be online is in a zone of 0...zone_last, and
  603. * the zones of 0...zone_last don't have memory before online, we will
  604. * need to set the node to node_states[N_NORMAL_MEMORY] after
  605. * the memory is online.
  606. */
  607. if (zone_idx(zone) <= zone_last && !node_state(nid, N_NORMAL_MEMORY))
  608. arg->status_change_nid_normal = nid;
  609. else
  610. arg->status_change_nid_normal = -1;
  611. #ifdef CONFIG_HIGHMEM
  612. /*
  613. * If we have movable node, node_states[N_HIGH_MEMORY]
  614. * contains nodes which have zones of 0...ZONE_HIGHMEM,
  615. * set zone_last to ZONE_HIGHMEM.
  616. *
  617. * If we don't have movable node, node_states[N_NORMAL_MEMORY]
  618. * contains nodes which have zones of 0...ZONE_MOVABLE,
  619. * set zone_last to ZONE_MOVABLE.
  620. */
  621. zone_last = ZONE_HIGHMEM;
  622. if (N_MEMORY == N_HIGH_MEMORY)
  623. zone_last = ZONE_MOVABLE;
  624. if (zone_idx(zone) <= zone_last && !node_state(nid, N_HIGH_MEMORY))
  625. arg->status_change_nid_high = nid;
  626. else
  627. arg->status_change_nid_high = -1;
  628. #else
  629. arg->status_change_nid_high = arg->status_change_nid_normal;
  630. #endif
  631. /*
  632. * if the node don't have memory befor online, we will need to
  633. * set the node to node_states[N_MEMORY] after the memory
  634. * is online.
  635. */
  636. if (!node_state(nid, N_MEMORY))
  637. arg->status_change_nid = nid;
  638. else
  639. arg->status_change_nid = -1;
  640. }
  641. static void node_states_set_node(int node, struct memory_notify *arg)
  642. {
  643. if (arg->status_change_nid_normal >= 0)
  644. node_set_state(node, N_NORMAL_MEMORY);
  645. if (arg->status_change_nid_high >= 0)
  646. node_set_state(node, N_HIGH_MEMORY);
  647. node_set_state(node, N_MEMORY);
  648. }
  649. static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn,
  650. unsigned long nr_pages)
  651. {
  652. unsigned long old_end_pfn = zone_end_pfn(zone);
  653. if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
  654. zone->zone_start_pfn = start_pfn;
  655. zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn;
  656. }
  657. static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn,
  658. unsigned long nr_pages)
  659. {
  660. unsigned long old_end_pfn = pgdat_end_pfn(pgdat);
  661. if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
  662. pgdat->node_start_pfn = start_pfn;
  663. pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn;
  664. }
  665. void __ref move_pfn_range_to_zone(struct zone *zone,
  666. unsigned long start_pfn, unsigned long nr_pages)
  667. {
  668. struct pglist_data *pgdat = zone->zone_pgdat;
  669. int nid = pgdat->node_id;
  670. unsigned long flags;
  671. if (zone_is_empty(zone))
  672. init_currently_empty_zone(zone, start_pfn, nr_pages);
  673. clear_zone_contiguous(zone);
  674. /* TODO Huh pgdat is irqsave while zone is not. It used to be like that before */
  675. pgdat_resize_lock(pgdat, &flags);
  676. zone_span_writelock(zone);
  677. resize_zone_range(zone, start_pfn, nr_pages);
  678. zone_span_writeunlock(zone);
  679. resize_pgdat_range(pgdat, start_pfn, nr_pages);
  680. pgdat_resize_unlock(pgdat, &flags);
  681. /*
  682. * TODO now we have a visible range of pages which are not associated
  683. * with their zone properly. Not nice but set_pfnblock_flags_mask
  684. * expects the zone spans the pfn range. All the pages in the range
  685. * are reserved so nobody should be touching them so we should be safe
  686. */
  687. memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, MEMMAP_HOTPLUG);
  688. set_zone_contiguous(zone);
  689. }
  690. /*
  691. * Returns a default kernel memory zone for the given pfn range.
  692. * If no kernel zone covers this pfn range it will automatically go
  693. * to the ZONE_NORMAL.
  694. */
  695. static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn,
  696. unsigned long nr_pages)
  697. {
  698. struct pglist_data *pgdat = NODE_DATA(nid);
  699. int zid;
  700. for (zid = 0; zid <= ZONE_NORMAL; zid++) {
  701. struct zone *zone = &pgdat->node_zones[zid];
  702. if (zone_intersects(zone, start_pfn, nr_pages))
  703. return zone;
  704. }
  705. return &pgdat->node_zones[ZONE_NORMAL];
  706. }
  707. static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn,
  708. unsigned long nr_pages)
  709. {
  710. struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn,
  711. nr_pages);
  712. struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
  713. bool in_kernel = zone_intersects(kernel_zone, start_pfn, nr_pages);
  714. bool in_movable = zone_intersects(movable_zone, start_pfn, nr_pages);
  715. /*
  716. * We inherit the existing zone in a simple case where zones do not
  717. * overlap in the given range
  718. */
  719. if (in_kernel ^ in_movable)
  720. return (in_kernel) ? kernel_zone : movable_zone;
  721. /*
  722. * If the range doesn't belong to any zone or two zones overlap in the
  723. * given range then we use movable zone only if movable_node is
  724. * enabled because we always online to a kernel zone by default.
  725. */
  726. return movable_node_enabled ? movable_zone : kernel_zone;
  727. }
  728. struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
  729. unsigned long nr_pages)
  730. {
  731. if (online_type == MMOP_ONLINE_KERNEL)
  732. return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages);
  733. if (online_type == MMOP_ONLINE_MOVABLE)
  734. return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
  735. return default_zone_for_pfn(nid, start_pfn, nr_pages);
  736. }
  737. /*
  738. * Associates the given pfn range with the given node and the zone appropriate
  739. * for the given online type.
  740. */
  741. static struct zone * __meminit move_pfn_range(int online_type, int nid,
  742. unsigned long start_pfn, unsigned long nr_pages)
  743. {
  744. struct zone *zone;
  745. zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages);
  746. move_pfn_range_to_zone(zone, start_pfn, nr_pages);
  747. return zone;
  748. }
  749. /* Must be protected by mem_hotplug_begin() or a device_lock */
  750. int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type)
  751. {
  752. unsigned long flags;
  753. unsigned long onlined_pages = 0;
  754. struct zone *zone;
  755. int need_zonelists_rebuild = 0;
  756. int nid;
  757. int ret;
  758. struct memory_notify arg;
  759. nid = pfn_to_nid(pfn);
  760. /* associate pfn range with the zone */
  761. zone = move_pfn_range(online_type, nid, pfn, nr_pages);
  762. arg.start_pfn = pfn;
  763. arg.nr_pages = nr_pages;
  764. node_states_check_changes_online(nr_pages, zone, &arg);
  765. ret = memory_notify(MEM_GOING_ONLINE, &arg);
  766. ret = notifier_to_errno(ret);
  767. if (ret)
  768. goto failed_addition;
  769. /*
  770. * If this zone is not populated, then it is not in zonelist.
  771. * This means the page allocator ignores this zone.
  772. * So, zonelist must be updated after online.
  773. */
  774. if (!populated_zone(zone)) {
  775. need_zonelists_rebuild = 1;
  776. setup_zone_pageset(zone);
  777. }
  778. ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
  779. online_pages_range);
  780. if (ret) {
  781. if (need_zonelists_rebuild)
  782. zone_pcp_reset(zone);
  783. goto failed_addition;
  784. }
  785. zone->present_pages += onlined_pages;
  786. pgdat_resize_lock(zone->zone_pgdat, &flags);
  787. zone->zone_pgdat->node_present_pages += onlined_pages;
  788. pgdat_resize_unlock(zone->zone_pgdat, &flags);
  789. if (onlined_pages) {
  790. node_states_set_node(nid, &arg);
  791. if (need_zonelists_rebuild)
  792. build_all_zonelists(NULL);
  793. else
  794. zone_pcp_update(zone);
  795. }
  796. init_per_zone_wmark_min();
  797. if (onlined_pages) {
  798. kswapd_run(nid);
  799. kcompactd_run(nid);
  800. }
  801. vm_total_pages = nr_free_pagecache_pages();
  802. writeback_set_ratelimit();
  803. if (onlined_pages)
  804. memory_notify(MEM_ONLINE, &arg);
  805. return 0;
  806. failed_addition:
  807. pr_debug("online_pages [mem %#010llx-%#010llx] failed\n",
  808. (unsigned long long) pfn << PAGE_SHIFT,
  809. (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
  810. memory_notify(MEM_CANCEL_ONLINE, &arg);
  811. return ret;
  812. }
  813. #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
  814. static void reset_node_present_pages(pg_data_t *pgdat)
  815. {
  816. struct zone *z;
  817. for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
  818. z->present_pages = 0;
  819. pgdat->node_present_pages = 0;
  820. }
  821. /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
  822. static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
  823. {
  824. struct pglist_data *pgdat;
  825. unsigned long zones_size[MAX_NR_ZONES] = {0};
  826. unsigned long zholes_size[MAX_NR_ZONES] = {0};
  827. unsigned long start_pfn = PFN_DOWN(start);
  828. pgdat = NODE_DATA(nid);
  829. if (!pgdat) {
  830. pgdat = arch_alloc_nodedata(nid);
  831. if (!pgdat)
  832. return NULL;
  833. arch_refresh_nodedata(nid, pgdat);
  834. } else {
  835. /*
  836. * Reset the nr_zones, order and classzone_idx before reuse.
  837. * Note that kswapd will init kswapd_classzone_idx properly
  838. * when it starts in the near future.
  839. */
  840. pgdat->nr_zones = 0;
  841. pgdat->kswapd_order = 0;
  842. pgdat->kswapd_classzone_idx = 0;
  843. }
  844. /* we can use NODE_DATA(nid) from here */
  845. /* init node's zones as empty zones, we don't have any present pages.*/
  846. free_area_init_node(nid, zones_size, start_pfn, zholes_size);
  847. pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
  848. /*
  849. * The node we allocated has no zone fallback lists. For avoiding
  850. * to access not-initialized zonelist, build here.
  851. */
  852. build_all_zonelists(pgdat);
  853. /*
  854. * zone->managed_pages is set to an approximate value in
  855. * free_area_init_core(), which will cause
  856. * /sys/device/system/node/nodeX/meminfo has wrong data.
  857. * So reset it to 0 before any memory is onlined.
  858. */
  859. reset_node_managed_pages(pgdat);
  860. /*
  861. * When memory is hot-added, all the memory is in offline state. So
  862. * clear all zones' present_pages because they will be updated in
  863. * online_pages() and offline_pages().
  864. */
  865. reset_node_present_pages(pgdat);
  866. return pgdat;
  867. }
  868. static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
  869. {
  870. arch_refresh_nodedata(nid, NULL);
  871. free_percpu(pgdat->per_cpu_nodestats);
  872. arch_free_nodedata(pgdat);
  873. return;
  874. }
  875. /**
  876. * try_online_node - online a node if offlined
  877. *
  878. * called by cpu_up() to online a node without onlined memory.
  879. */
  880. int try_online_node(int nid)
  881. {
  882. pg_data_t *pgdat;
  883. int ret;
  884. if (node_online(nid))
  885. return 0;
  886. mem_hotplug_begin();
  887. pgdat = hotadd_new_pgdat(nid, 0);
  888. if (!pgdat) {
  889. pr_err("Cannot online node %d due to NULL pgdat\n", nid);
  890. ret = -ENOMEM;
  891. goto out;
  892. }
  893. node_set_online(nid);
  894. ret = register_one_node(nid);
  895. BUG_ON(ret);
  896. out:
  897. mem_hotplug_done();
  898. return ret;
  899. }
  900. static int check_hotplug_memory_range(u64 start, u64 size)
  901. {
  902. u64 start_pfn = PFN_DOWN(start);
  903. u64 nr_pages = size >> PAGE_SHIFT;
  904. /* Memory range must be aligned with section */
  905. if ((start_pfn & ~PAGE_SECTION_MASK) ||
  906. (nr_pages % PAGES_PER_SECTION) || (!nr_pages)) {
  907. pr_err("Section-unaligned hotplug range: start 0x%llx, size 0x%llx\n",
  908. (unsigned long long)start,
  909. (unsigned long long)size);
  910. return -EINVAL;
  911. }
  912. return 0;
  913. }
  914. static int online_memory_block(struct memory_block *mem, void *arg)
  915. {
  916. return device_online(&mem->dev);
  917. }
  918. /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
  919. int __ref add_memory_resource(int nid, struct resource *res, bool online)
  920. {
  921. u64 start, size;
  922. pg_data_t *pgdat = NULL;
  923. bool new_pgdat;
  924. bool new_node;
  925. int ret;
  926. start = res->start;
  927. size = resource_size(res);
  928. ret = check_hotplug_memory_range(start, size);
  929. if (ret)
  930. return ret;
  931. { /* Stupid hack to suppress address-never-null warning */
  932. void *p = NODE_DATA(nid);
  933. new_pgdat = !p;
  934. }
  935. mem_hotplug_begin();
  936. /*
  937. * Add new range to memblock so that when hotadd_new_pgdat() is called
  938. * to allocate new pgdat, get_pfn_range_for_nid() will be able to find
  939. * this new range and calculate total pages correctly. The range will
  940. * be removed at hot-remove time.
  941. */
  942. memblock_add_node(start, size, nid);
  943. new_node = !node_online(nid);
  944. if (new_node) {
  945. pgdat = hotadd_new_pgdat(nid, start);
  946. ret = -ENOMEM;
  947. if (!pgdat)
  948. goto error;
  949. }
  950. /* call arch's memory hotadd */
  951. ret = arch_add_memory(nid, start, size, true);
  952. if (ret < 0)
  953. goto error;
  954. /* we online node here. we can't roll back from here. */
  955. node_set_online(nid);
  956. if (new_node) {
  957. unsigned long start_pfn = start >> PAGE_SHIFT;
  958. unsigned long nr_pages = size >> PAGE_SHIFT;
  959. ret = __register_one_node(nid);
  960. if (ret)
  961. goto register_fail;
  962. /*
  963. * link memory sections under this node. This is already
  964. * done when creatig memory section in register_new_memory
  965. * but that depends to have the node registered so offline
  966. * nodes have to go through register_node.
  967. * TODO clean up this mess.
  968. */
  969. ret = link_mem_sections(nid, start_pfn, nr_pages);
  970. register_fail:
  971. /*
  972. * If sysfs file of new node can't create, cpu on the node
  973. * can't be hot-added. There is no rollback way now.
  974. * So, check by BUG_ON() to catch it reluctantly..
  975. */
  976. BUG_ON(ret);
  977. }
  978. /* create new memmap entry */
  979. firmware_map_add_hotplug(start, start + size, "System RAM");
  980. /* online pages if requested */
  981. if (online)
  982. walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1),
  983. NULL, online_memory_block);
  984. goto out;
  985. error:
  986. /* rollback pgdat allocation and others */
  987. if (new_pgdat && pgdat)
  988. rollback_node_hotadd(nid, pgdat);
  989. memblock_remove(start, size);
  990. out:
  991. mem_hotplug_done();
  992. return ret;
  993. }
  994. EXPORT_SYMBOL_GPL(add_memory_resource);
  995. int __ref add_memory(int nid, u64 start, u64 size)
  996. {
  997. struct resource *res;
  998. int ret;
  999. res = register_memory_resource(start, size);
  1000. if (IS_ERR(res))
  1001. return PTR_ERR(res);
  1002. ret = add_memory_resource(nid, res, memhp_auto_online);
  1003. if (ret < 0)
  1004. release_memory_resource(res);
  1005. return ret;
  1006. }
  1007. EXPORT_SYMBOL_GPL(add_memory);
  1008. #ifdef CONFIG_MEMORY_HOTREMOVE
  1009. /*
  1010. * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
  1011. * set and the size of the free page is given by page_order(). Using this,
  1012. * the function determines if the pageblock contains only free pages.
  1013. * Due to buddy contraints, a free page at least the size of a pageblock will
  1014. * be located at the start of the pageblock
  1015. */
  1016. static inline int pageblock_free(struct page *page)
  1017. {
  1018. return PageBuddy(page) && page_order(page) >= pageblock_order;
  1019. }
  1020. /* Return the start of the next active pageblock after a given page */
  1021. static struct page *next_active_pageblock(struct page *page)
  1022. {
  1023. /* Ensure the starting page is pageblock-aligned */
  1024. BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
  1025. /* If the entire pageblock is free, move to the end of free page */
  1026. if (pageblock_free(page)) {
  1027. int order;
  1028. /* be careful. we don't have locks, page_order can be changed.*/
  1029. order = page_order(page);
  1030. if ((order < MAX_ORDER) && (order >= pageblock_order))
  1031. return page + (1 << order);
  1032. }
  1033. return page + pageblock_nr_pages;
  1034. }
  1035. /* Checks if this range of memory is likely to be hot-removable. */
  1036. bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
  1037. {
  1038. struct page *page = pfn_to_page(start_pfn);
  1039. struct page *end_page = page + nr_pages;
  1040. /* Check the starting page of each pageblock within the range */
  1041. for (; page < end_page; page = next_active_pageblock(page)) {
  1042. if (!is_pageblock_removable_nolock(page))
  1043. return false;
  1044. cond_resched();
  1045. }
  1046. /* All pageblocks in the memory block are likely to be hot-removable */
  1047. return true;
  1048. }
  1049. /*
  1050. * Confirm all pages in a range [start, end) belong to the same zone.
  1051. * When true, return its valid [start, end).
  1052. */
  1053. int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
  1054. unsigned long *valid_start, unsigned long *valid_end)
  1055. {
  1056. unsigned long pfn, sec_end_pfn;
  1057. unsigned long start, end;
  1058. struct zone *zone = NULL;
  1059. struct page *page;
  1060. int i;
  1061. for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
  1062. pfn < end_pfn;
  1063. pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
  1064. /* Make sure the memory section is present first */
  1065. if (!present_section_nr(pfn_to_section_nr(pfn)))
  1066. continue;
  1067. for (; pfn < sec_end_pfn && pfn < end_pfn;
  1068. pfn += MAX_ORDER_NR_PAGES) {
  1069. i = 0;
  1070. /* This is just a CONFIG_HOLES_IN_ZONE check.*/
  1071. while ((i < MAX_ORDER_NR_PAGES) &&
  1072. !pfn_valid_within(pfn + i))
  1073. i++;
  1074. if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
  1075. continue;
  1076. page = pfn_to_page(pfn + i);
  1077. if (zone && page_zone(page) != zone)
  1078. return 0;
  1079. if (!zone)
  1080. start = pfn + i;
  1081. zone = page_zone(page);
  1082. end = pfn + MAX_ORDER_NR_PAGES;
  1083. }
  1084. }
  1085. if (zone) {
  1086. *valid_start = start;
  1087. *valid_end = min(end, end_pfn);
  1088. return 1;
  1089. } else {
  1090. return 0;
  1091. }
  1092. }
  1093. /*
  1094. * Scan pfn range [start,end) to find movable/migratable pages (LRU pages,
  1095. * non-lru movable pages and hugepages). We scan pfn because it's much
  1096. * easier than scanning over linked list. This function returns the pfn
  1097. * of the first found movable page if it's found, otherwise 0.
  1098. */
  1099. static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
  1100. {
  1101. unsigned long pfn;
  1102. struct page *page;
  1103. for (pfn = start; pfn < end; pfn++) {
  1104. if (pfn_valid(pfn)) {
  1105. page = pfn_to_page(pfn);
  1106. if (PageLRU(page))
  1107. return pfn;
  1108. if (__PageMovable(page))
  1109. return pfn;
  1110. if (PageHuge(page)) {
  1111. if (page_huge_active(page))
  1112. return pfn;
  1113. else
  1114. pfn = round_up(pfn + 1,
  1115. 1 << compound_order(page)) - 1;
  1116. }
  1117. }
  1118. }
  1119. return 0;
  1120. }
  1121. static struct page *new_node_page(struct page *page, unsigned long private,
  1122. int **result)
  1123. {
  1124. int nid = page_to_nid(page);
  1125. nodemask_t nmask = node_states[N_MEMORY];
  1126. /*
  1127. * try to allocate from a different node but reuse this node if there
  1128. * are no other online nodes to be used (e.g. we are offlining a part
  1129. * of the only existing node)
  1130. */
  1131. node_clear(nid, nmask);
  1132. if (nodes_empty(nmask))
  1133. node_set(nid, nmask);
  1134. return new_page_nodemask(page, nid, &nmask);
  1135. }
  1136. #define NR_OFFLINE_AT_ONCE_PAGES (256)
  1137. static int
  1138. do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
  1139. {
  1140. unsigned long pfn;
  1141. struct page *page;
  1142. int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
  1143. int not_managed = 0;
  1144. int ret = 0;
  1145. LIST_HEAD(source);
  1146. for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
  1147. if (!pfn_valid(pfn))
  1148. continue;
  1149. page = pfn_to_page(pfn);
  1150. if (PageHuge(page)) {
  1151. struct page *head = compound_head(page);
  1152. pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1;
  1153. if (compound_order(head) > PFN_SECTION_SHIFT) {
  1154. ret = -EBUSY;
  1155. break;
  1156. }
  1157. if (isolate_huge_page(page, &source))
  1158. move_pages -= 1 << compound_order(head);
  1159. continue;
  1160. } else if (thp_migration_supported() && PageTransHuge(page))
  1161. pfn = page_to_pfn(compound_head(page))
  1162. + hpage_nr_pages(page) - 1;
  1163. if (!get_page_unless_zero(page))
  1164. continue;
  1165. /*
  1166. * We can skip free pages. And we can deal with pages on
  1167. * LRU and non-lru movable pages.
  1168. */
  1169. if (PageLRU(page))
  1170. ret = isolate_lru_page(page);
  1171. else
  1172. ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
  1173. if (!ret) { /* Success */
  1174. put_page(page);
  1175. list_add_tail(&page->lru, &source);
  1176. move_pages--;
  1177. if (!__PageMovable(page))
  1178. inc_node_page_state(page, NR_ISOLATED_ANON +
  1179. page_is_file_cache(page));
  1180. } else {
  1181. #ifdef CONFIG_DEBUG_VM
  1182. pr_alert("failed to isolate pfn %lx\n", pfn);
  1183. dump_page(page, "isolation failed");
  1184. #endif
  1185. put_page(page);
  1186. /* Because we don't have big zone->lock. we should
  1187. check this again here. */
  1188. if (page_count(page)) {
  1189. not_managed++;
  1190. ret = -EBUSY;
  1191. break;
  1192. }
  1193. }
  1194. }
  1195. if (!list_empty(&source)) {
  1196. if (not_managed) {
  1197. putback_movable_pages(&source);
  1198. goto out;
  1199. }
  1200. /* Allocate a new page from the nearest neighbor node */
  1201. ret = migrate_pages(&source, new_node_page, NULL, 0,
  1202. MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
  1203. if (ret)
  1204. putback_movable_pages(&source);
  1205. }
  1206. out:
  1207. return ret;
  1208. }
  1209. /*
  1210. * remove from free_area[] and mark all as Reserved.
  1211. */
  1212. static int
  1213. offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
  1214. void *data)
  1215. {
  1216. __offline_isolated_pages(start, start + nr_pages);
  1217. return 0;
  1218. }
  1219. static void
  1220. offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
  1221. {
  1222. walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
  1223. offline_isolated_pages_cb);
  1224. }
  1225. /*
  1226. * Check all pages in range, recoreded as memory resource, are isolated.
  1227. */
  1228. static int
  1229. check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
  1230. void *data)
  1231. {
  1232. int ret;
  1233. long offlined = *(long *)data;
  1234. ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true);
  1235. offlined = nr_pages;
  1236. if (!ret)
  1237. *(long *)data += offlined;
  1238. return ret;
  1239. }
  1240. static long
  1241. check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
  1242. {
  1243. long offlined = 0;
  1244. int ret;
  1245. ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
  1246. check_pages_isolated_cb);
  1247. if (ret < 0)
  1248. offlined = (long)ret;
  1249. return offlined;
  1250. }
  1251. static int __init cmdline_parse_movable_node(char *p)
  1252. {
  1253. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  1254. movable_node_enabled = true;
  1255. #else
  1256. pr_warn("movable_node parameter depends on CONFIG_HAVE_MEMBLOCK_NODE_MAP to work properly\n");
  1257. #endif
  1258. return 0;
  1259. }
  1260. early_param("movable_node", cmdline_parse_movable_node);
  1261. /* check which state of node_states will be changed when offline memory */
  1262. static void node_states_check_changes_offline(unsigned long nr_pages,
  1263. struct zone *zone, struct memory_notify *arg)
  1264. {
  1265. struct pglist_data *pgdat = zone->zone_pgdat;
  1266. unsigned long present_pages = 0;
  1267. enum zone_type zt, zone_last = ZONE_NORMAL;
  1268. /*
  1269. * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
  1270. * contains nodes which have zones of 0...ZONE_NORMAL,
  1271. * set zone_last to ZONE_NORMAL.
  1272. *
  1273. * If we don't have HIGHMEM nor movable node,
  1274. * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
  1275. * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
  1276. */
  1277. if (N_MEMORY == N_NORMAL_MEMORY)
  1278. zone_last = ZONE_MOVABLE;
  1279. /*
  1280. * check whether node_states[N_NORMAL_MEMORY] will be changed.
  1281. * If the memory to be offline is in a zone of 0...zone_last,
  1282. * and it is the last present memory, 0...zone_last will
  1283. * become empty after offline , thus we can determind we will
  1284. * need to clear the node from node_states[N_NORMAL_MEMORY].
  1285. */
  1286. for (zt = 0; zt <= zone_last; zt++)
  1287. present_pages += pgdat->node_zones[zt].present_pages;
  1288. if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
  1289. arg->status_change_nid_normal = zone_to_nid(zone);
  1290. else
  1291. arg->status_change_nid_normal = -1;
  1292. #ifdef CONFIG_HIGHMEM
  1293. /*
  1294. * If we have movable node, node_states[N_HIGH_MEMORY]
  1295. * contains nodes which have zones of 0...ZONE_HIGHMEM,
  1296. * set zone_last to ZONE_HIGHMEM.
  1297. *
  1298. * If we don't have movable node, node_states[N_NORMAL_MEMORY]
  1299. * contains nodes which have zones of 0...ZONE_MOVABLE,
  1300. * set zone_last to ZONE_MOVABLE.
  1301. */
  1302. zone_last = ZONE_HIGHMEM;
  1303. if (N_MEMORY == N_HIGH_MEMORY)
  1304. zone_last = ZONE_MOVABLE;
  1305. for (; zt <= zone_last; zt++)
  1306. present_pages += pgdat->node_zones[zt].present_pages;
  1307. if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
  1308. arg->status_change_nid_high = zone_to_nid(zone);
  1309. else
  1310. arg->status_change_nid_high = -1;
  1311. #else
  1312. arg->status_change_nid_high = arg->status_change_nid_normal;
  1313. #endif
  1314. /*
  1315. * node_states[N_HIGH_MEMORY] contains nodes which have 0...ZONE_MOVABLE
  1316. */
  1317. zone_last = ZONE_MOVABLE;
  1318. /*
  1319. * check whether node_states[N_HIGH_MEMORY] will be changed
  1320. * If we try to offline the last present @nr_pages from the node,
  1321. * we can determind we will need to clear the node from
  1322. * node_states[N_HIGH_MEMORY].
  1323. */
  1324. for (; zt <= zone_last; zt++)
  1325. present_pages += pgdat->node_zones[zt].present_pages;
  1326. if (nr_pages >= present_pages)
  1327. arg->status_change_nid = zone_to_nid(zone);
  1328. else
  1329. arg->status_change_nid = -1;
  1330. }
  1331. static void node_states_clear_node(int node, struct memory_notify *arg)
  1332. {
  1333. if (arg->status_change_nid_normal >= 0)
  1334. node_clear_state(node, N_NORMAL_MEMORY);
  1335. if ((N_MEMORY != N_NORMAL_MEMORY) &&
  1336. (arg->status_change_nid_high >= 0))
  1337. node_clear_state(node, N_HIGH_MEMORY);
  1338. if ((N_MEMORY != N_HIGH_MEMORY) &&
  1339. (arg->status_change_nid >= 0))
  1340. node_clear_state(node, N_MEMORY);
  1341. }
  1342. static int __ref __offline_pages(unsigned long start_pfn,
  1343. unsigned long end_pfn)
  1344. {
  1345. unsigned long pfn, nr_pages;
  1346. long offlined_pages;
  1347. int ret, node;
  1348. unsigned long flags;
  1349. unsigned long valid_start, valid_end;
  1350. struct zone *zone;
  1351. struct memory_notify arg;
  1352. /* at least, alignment against pageblock is necessary */
  1353. if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
  1354. return -EINVAL;
  1355. if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
  1356. return -EINVAL;
  1357. /* This makes hotplug much easier...and readable.
  1358. we assume this for now. .*/
  1359. if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
  1360. return -EINVAL;
  1361. zone = page_zone(pfn_to_page(valid_start));
  1362. node = zone_to_nid(zone);
  1363. nr_pages = end_pfn - start_pfn;
  1364. /* set above range as isolated */
  1365. ret = start_isolate_page_range(start_pfn, end_pfn,
  1366. MIGRATE_MOVABLE, true);
  1367. if (ret)
  1368. return ret;
  1369. arg.start_pfn = start_pfn;
  1370. arg.nr_pages = nr_pages;
  1371. node_states_check_changes_offline(nr_pages, zone, &arg);
  1372. ret = memory_notify(MEM_GOING_OFFLINE, &arg);
  1373. ret = notifier_to_errno(ret);
  1374. if (ret)
  1375. goto failed_removal;
  1376. pfn = start_pfn;
  1377. repeat:
  1378. /* start memory hot removal */
  1379. ret = -EINTR;
  1380. if (signal_pending(current))
  1381. goto failed_removal;
  1382. cond_resched();
  1383. lru_add_drain_all();
  1384. drain_all_pages(zone);
  1385. pfn = scan_movable_pages(start_pfn, end_pfn);
  1386. if (pfn) { /* We have movable pages */
  1387. ret = do_migrate_range(pfn, end_pfn);
  1388. goto repeat;
  1389. }
  1390. /*
  1391. * dissolve free hugepages in the memory block before doing offlining
  1392. * actually in order to make hugetlbfs's object counting consistent.
  1393. */
  1394. ret = dissolve_free_huge_pages(start_pfn, end_pfn);
  1395. if (ret)
  1396. goto failed_removal;
  1397. /* check again */
  1398. offlined_pages = check_pages_isolated(start_pfn, end_pfn);
  1399. if (offlined_pages < 0)
  1400. goto repeat;
  1401. pr_info("Offlined Pages %ld\n", offlined_pages);
  1402. /* Ok, all of our target is isolated.
  1403. We cannot do rollback at this point. */
  1404. offline_isolated_pages(start_pfn, end_pfn);
  1405. /* reset pagetype flags and makes migrate type to be MOVABLE */
  1406. undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
  1407. /* removal success */
  1408. adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
  1409. zone->present_pages -= offlined_pages;
  1410. pgdat_resize_lock(zone->zone_pgdat, &flags);
  1411. zone->zone_pgdat->node_present_pages -= offlined_pages;
  1412. pgdat_resize_unlock(zone->zone_pgdat, &flags);
  1413. init_per_zone_wmark_min();
  1414. if (!populated_zone(zone)) {
  1415. zone_pcp_reset(zone);
  1416. build_all_zonelists(NULL);
  1417. } else
  1418. zone_pcp_update(zone);
  1419. node_states_clear_node(node, &arg);
  1420. if (arg.status_change_nid >= 0) {
  1421. kswapd_stop(node);
  1422. kcompactd_stop(node);
  1423. }
  1424. vm_total_pages = nr_free_pagecache_pages();
  1425. writeback_set_ratelimit();
  1426. memory_notify(MEM_OFFLINE, &arg);
  1427. return 0;
  1428. failed_removal:
  1429. pr_debug("memory offlining [mem %#010llx-%#010llx] failed\n",
  1430. (unsigned long long) start_pfn << PAGE_SHIFT,
  1431. ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
  1432. memory_notify(MEM_CANCEL_OFFLINE, &arg);
  1433. /* pushback to free area */
  1434. undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
  1435. return ret;
  1436. }
  1437. /* Must be protected by mem_hotplug_begin() or a device_lock */
  1438. int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
  1439. {
  1440. return __offline_pages(start_pfn, start_pfn + nr_pages);
  1441. }
  1442. #endif /* CONFIG_MEMORY_HOTREMOVE */
  1443. /**
  1444. * walk_memory_range - walks through all mem sections in [start_pfn, end_pfn)
  1445. * @start_pfn: start pfn of the memory range
  1446. * @end_pfn: end pfn of the memory range
  1447. * @arg: argument passed to func
  1448. * @func: callback for each memory section walked
  1449. *
  1450. * This function walks through all present mem sections in range
  1451. * [start_pfn, end_pfn) and call func on each mem section.
  1452. *
  1453. * Returns the return value of func.
  1454. */
  1455. int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
  1456. void *arg, int (*func)(struct memory_block *, void *))
  1457. {
  1458. struct memory_block *mem = NULL;
  1459. struct mem_section *section;
  1460. unsigned long pfn, section_nr;
  1461. int ret;
  1462. for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
  1463. section_nr = pfn_to_section_nr(pfn);
  1464. if (!present_section_nr(section_nr))
  1465. continue;
  1466. section = __nr_to_section(section_nr);
  1467. /* same memblock? */
  1468. if (mem)
  1469. if ((section_nr >= mem->start_section_nr) &&
  1470. (section_nr <= mem->end_section_nr))
  1471. continue;
  1472. mem = find_memory_block_hinted(section, mem);
  1473. if (!mem)
  1474. continue;
  1475. ret = func(mem, arg);
  1476. if (ret) {
  1477. kobject_put(&mem->dev.kobj);
  1478. return ret;
  1479. }
  1480. }
  1481. if (mem)
  1482. kobject_put(&mem->dev.kobj);
  1483. return 0;
  1484. }
  1485. #ifdef CONFIG_MEMORY_HOTREMOVE
  1486. static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
  1487. {
  1488. int ret = !is_memblock_offlined(mem);
  1489. if (unlikely(ret)) {
  1490. phys_addr_t beginpa, endpa;
  1491. beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr));
  1492. endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1;
  1493. pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n",
  1494. &beginpa, &endpa);
  1495. }
  1496. return ret;
  1497. }
  1498. static int check_cpu_on_node(pg_data_t *pgdat)
  1499. {
  1500. int cpu;
  1501. for_each_present_cpu(cpu) {
  1502. if (cpu_to_node(cpu) == pgdat->node_id)
  1503. /*
  1504. * the cpu on this node isn't removed, and we can't
  1505. * offline this node.
  1506. */
  1507. return -EBUSY;
  1508. }
  1509. return 0;
  1510. }
  1511. static void unmap_cpu_on_node(pg_data_t *pgdat)
  1512. {
  1513. #ifdef CONFIG_ACPI_NUMA
  1514. int cpu;
  1515. for_each_possible_cpu(cpu)
  1516. if (cpu_to_node(cpu) == pgdat->node_id)
  1517. numa_clear_node(cpu);
  1518. #endif
  1519. }
  1520. static int check_and_unmap_cpu_on_node(pg_data_t *pgdat)
  1521. {
  1522. int ret;
  1523. ret = check_cpu_on_node(pgdat);
  1524. if (ret)
  1525. return ret;
  1526. /*
  1527. * the node will be offlined when we come here, so we can clear
  1528. * the cpu_to_node() now.
  1529. */
  1530. unmap_cpu_on_node(pgdat);
  1531. return 0;
  1532. }
  1533. /**
  1534. * try_offline_node
  1535. *
  1536. * Offline a node if all memory sections and cpus of the node are removed.
  1537. *
  1538. * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
  1539. * and online/offline operations before this call.
  1540. */
  1541. void try_offline_node(int nid)
  1542. {
  1543. pg_data_t *pgdat = NODE_DATA(nid);
  1544. unsigned long start_pfn = pgdat->node_start_pfn;
  1545. unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
  1546. unsigned long pfn;
  1547. for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
  1548. unsigned long section_nr = pfn_to_section_nr(pfn);
  1549. if (!present_section_nr(section_nr))
  1550. continue;
  1551. if (pfn_to_nid(pfn) != nid)
  1552. continue;
  1553. /*
  1554. * some memory sections of this node are not removed, and we
  1555. * can't offline node now.
  1556. */
  1557. return;
  1558. }
  1559. if (check_and_unmap_cpu_on_node(pgdat))
  1560. return;
  1561. /*
  1562. * all memory/cpu of this node are removed, we can offline this
  1563. * node now.
  1564. */
  1565. node_set_offline(nid);
  1566. unregister_one_node(nid);
  1567. }
  1568. EXPORT_SYMBOL(try_offline_node);
  1569. /**
  1570. * remove_memory
  1571. *
  1572. * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
  1573. * and online/offline operations before this call, as required by
  1574. * try_offline_node().
  1575. */
  1576. void __ref remove_memory(int nid, u64 start, u64 size)
  1577. {
  1578. int ret;
  1579. BUG_ON(check_hotplug_memory_range(start, size));
  1580. mem_hotplug_begin();
  1581. /*
  1582. * All memory blocks must be offlined before removing memory. Check
  1583. * whether all memory blocks in question are offline and trigger a BUG()
  1584. * if this is not the case.
  1585. */
  1586. ret = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL,
  1587. check_memblock_offlined_cb);
  1588. if (ret)
  1589. BUG();
  1590. /* remove memmap entry */
  1591. firmware_map_remove(start, start + size, "System RAM");
  1592. memblock_free(start, size);
  1593. memblock_remove(start, size);
  1594. arch_remove_memory(start, size);
  1595. try_offline_node(nid);
  1596. mem_hotplug_done();
  1597. }
  1598. EXPORT_SYMBOL_GPL(remove_memory);
  1599. #endif /* CONFIG_MEMORY_HOTREMOVE */