memory_hotplug.c 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106
  1. /*
  2. * linux/mm/memory_hotplug.c
  3. *
  4. * Copyright (C)
  5. */
  6. #include <linux/stddef.h>
  7. #include <linux/mm.h>
  8. #include <linux/swap.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/pagemap.h>
  11. #include <linux/compiler.h>
  12. #include <linux/export.h>
  13. #include <linux/pagevec.h>
  14. #include <linux/writeback.h>
  15. #include <linux/slab.h>
  16. #include <linux/sysctl.h>
  17. #include <linux/cpu.h>
  18. #include <linux/memory.h>
  19. #include <linux/memremap.h>
  20. #include <linux/memory_hotplug.h>
  21. #include <linux/highmem.h>
  22. #include <linux/vmalloc.h>
  23. #include <linux/ioport.h>
  24. #include <linux/delay.h>
  25. #include <linux/migrate.h>
  26. #include <linux/page-isolation.h>
  27. #include <linux/pfn.h>
  28. #include <linux/suspend.h>
  29. #include <linux/mm_inline.h>
  30. #include <linux/firmware-map.h>
  31. #include <linux/stop_machine.h>
  32. #include <linux/hugetlb.h>
  33. #include <linux/memblock.h>
  34. #include <linux/bootmem.h>
  35. #include <asm/tlbflush.h>
  36. #include "internal.h"
  37. /*
  38. * online_page_callback contains pointer to current page onlining function.
  39. * Initially it is generic_online_page(). If it is required it could be
  40. * changed by calling set_online_page_callback() for callback registration
  41. * and restore_online_page_callback() for generic callback restore.
  42. */
  43. static void generic_online_page(struct page *page);
  44. static online_page_callback_t online_page_callback = generic_online_page;
  45. static DEFINE_MUTEX(online_page_callback_lock);
  46. /* The same as the cpu_hotplug lock, but for memory hotplug. */
  47. static struct {
  48. struct task_struct *active_writer;
  49. struct mutex lock; /* Synchronizes accesses to refcount, */
  50. /*
  51. * Also blocks the new readers during
  52. * an ongoing mem hotplug operation.
  53. */
  54. int refcount;
  55. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  56. struct lockdep_map dep_map;
  57. #endif
  58. } mem_hotplug = {
  59. .active_writer = NULL,
  60. .lock = __MUTEX_INITIALIZER(mem_hotplug.lock),
  61. .refcount = 0,
  62. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  63. .dep_map = {.name = "mem_hotplug.lock" },
  64. #endif
  65. };
  66. /* Lockdep annotations for get/put_online_mems() and mem_hotplug_begin/end() */
  67. #define memhp_lock_acquire_read() lock_map_acquire_read(&mem_hotplug.dep_map)
  68. #define memhp_lock_acquire() lock_map_acquire(&mem_hotplug.dep_map)
  69. #define memhp_lock_release() lock_map_release(&mem_hotplug.dep_map)
  70. bool memhp_auto_online;
  71. EXPORT_SYMBOL_GPL(memhp_auto_online);
  72. void get_online_mems(void)
  73. {
  74. might_sleep();
  75. if (mem_hotplug.active_writer == current)
  76. return;
  77. memhp_lock_acquire_read();
  78. mutex_lock(&mem_hotplug.lock);
  79. mem_hotplug.refcount++;
  80. mutex_unlock(&mem_hotplug.lock);
  81. }
  82. void put_online_mems(void)
  83. {
  84. if (mem_hotplug.active_writer == current)
  85. return;
  86. mutex_lock(&mem_hotplug.lock);
  87. if (WARN_ON(!mem_hotplug.refcount))
  88. mem_hotplug.refcount++; /* try to fix things up */
  89. if (!--mem_hotplug.refcount && unlikely(mem_hotplug.active_writer))
  90. wake_up_process(mem_hotplug.active_writer);
  91. mutex_unlock(&mem_hotplug.lock);
  92. memhp_lock_release();
  93. }
  94. void mem_hotplug_begin(void)
  95. {
  96. mem_hotplug.active_writer = current;
  97. memhp_lock_acquire();
  98. for (;;) {
  99. mutex_lock(&mem_hotplug.lock);
  100. if (likely(!mem_hotplug.refcount))
  101. break;
  102. __set_current_state(TASK_UNINTERRUPTIBLE);
  103. mutex_unlock(&mem_hotplug.lock);
  104. schedule();
  105. }
  106. }
  107. void mem_hotplug_done(void)
  108. {
  109. mem_hotplug.active_writer = NULL;
  110. mutex_unlock(&mem_hotplug.lock);
  111. memhp_lock_release();
  112. }
  113. /* add this memory to iomem resource */
  114. static struct resource *register_memory_resource(u64 start, u64 size)
  115. {
  116. struct resource *res;
  117. res = kzalloc(sizeof(struct resource), GFP_KERNEL);
  118. if (!res)
  119. return ERR_PTR(-ENOMEM);
  120. res->name = "System RAM";
  121. res->start = start;
  122. res->end = start + size - 1;
  123. res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
  124. if (request_resource(&iomem_resource, res) < 0) {
  125. pr_debug("System RAM resource %pR cannot be added\n", res);
  126. kfree(res);
  127. return ERR_PTR(-EEXIST);
  128. }
  129. return res;
  130. }
  131. static void release_memory_resource(struct resource *res)
  132. {
  133. if (!res)
  134. return;
  135. release_resource(res);
  136. kfree(res);
  137. return;
  138. }
  139. #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
  140. void get_page_bootmem(unsigned long info, struct page *page,
  141. unsigned long type)
  142. {
  143. page->lru.next = (struct list_head *) type;
  144. SetPagePrivate(page);
  145. set_page_private(page, info);
  146. atomic_inc(&page->_count);
  147. }
  148. void put_page_bootmem(struct page *page)
  149. {
  150. unsigned long type;
  151. type = (unsigned long) page->lru.next;
  152. BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
  153. type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
  154. if (atomic_dec_return(&page->_count) == 1) {
  155. ClearPagePrivate(page);
  156. set_page_private(page, 0);
  157. INIT_LIST_HEAD(&page->lru);
  158. free_reserved_page(page);
  159. }
  160. }
  161. #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
  162. #ifndef CONFIG_SPARSEMEM_VMEMMAP
  163. static void register_page_bootmem_info_section(unsigned long start_pfn)
  164. {
  165. unsigned long *usemap, mapsize, section_nr, i;
  166. struct mem_section *ms;
  167. struct page *page, *memmap;
  168. section_nr = pfn_to_section_nr(start_pfn);
  169. ms = __nr_to_section(section_nr);
  170. /* Get section's memmap address */
  171. memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
  172. /*
  173. * Get page for the memmap's phys address
  174. * XXX: need more consideration for sparse_vmemmap...
  175. */
  176. page = virt_to_page(memmap);
  177. mapsize = sizeof(struct page) * PAGES_PER_SECTION;
  178. mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
  179. /* remember memmap's page */
  180. for (i = 0; i < mapsize; i++, page++)
  181. get_page_bootmem(section_nr, page, SECTION_INFO);
  182. usemap = __nr_to_section(section_nr)->pageblock_flags;
  183. page = virt_to_page(usemap);
  184. mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
  185. for (i = 0; i < mapsize; i++, page++)
  186. get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
  187. }
  188. #else /* CONFIG_SPARSEMEM_VMEMMAP */
  189. static void register_page_bootmem_info_section(unsigned long start_pfn)
  190. {
  191. unsigned long *usemap, mapsize, section_nr, i;
  192. struct mem_section *ms;
  193. struct page *page, *memmap;
  194. if (!pfn_valid(start_pfn))
  195. return;
  196. section_nr = pfn_to_section_nr(start_pfn);
  197. ms = __nr_to_section(section_nr);
  198. memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
  199. register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
  200. usemap = __nr_to_section(section_nr)->pageblock_flags;
  201. page = virt_to_page(usemap);
  202. mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
  203. for (i = 0; i < mapsize; i++, page++)
  204. get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
  205. }
  206. #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
  207. void register_page_bootmem_info_node(struct pglist_data *pgdat)
  208. {
  209. unsigned long i, pfn, end_pfn, nr_pages;
  210. int node = pgdat->node_id;
  211. struct page *page;
  212. struct zone *zone;
  213. nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
  214. page = virt_to_page(pgdat);
  215. for (i = 0; i < nr_pages; i++, page++)
  216. get_page_bootmem(node, page, NODE_INFO);
  217. zone = &pgdat->node_zones[0];
  218. for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
  219. if (zone_is_initialized(zone)) {
  220. nr_pages = zone->wait_table_hash_nr_entries
  221. * sizeof(wait_queue_head_t);
  222. nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;
  223. page = virt_to_page(zone->wait_table);
  224. for (i = 0; i < nr_pages; i++, page++)
  225. get_page_bootmem(node, page, NODE_INFO);
  226. }
  227. }
  228. pfn = pgdat->node_start_pfn;
  229. end_pfn = pgdat_end_pfn(pgdat);
  230. /* register section info */
  231. for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
  232. /*
  233. * Some platforms can assign the same pfn to multiple nodes - on
  234. * node0 as well as nodeN. To avoid registering a pfn against
  235. * multiple nodes we check that this pfn does not already
  236. * reside in some other nodes.
  237. */
  238. if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node))
  239. register_page_bootmem_info_section(pfn);
  240. }
  241. }
  242. #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
  243. static void __meminit grow_zone_span(struct zone *zone, unsigned long start_pfn,
  244. unsigned long end_pfn)
  245. {
  246. unsigned long old_zone_end_pfn;
  247. zone_span_writelock(zone);
  248. old_zone_end_pfn = zone_end_pfn(zone);
  249. if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
  250. zone->zone_start_pfn = start_pfn;
  251. zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
  252. zone->zone_start_pfn;
  253. zone_span_writeunlock(zone);
  254. }
  255. static void resize_zone(struct zone *zone, unsigned long start_pfn,
  256. unsigned long end_pfn)
  257. {
  258. zone_span_writelock(zone);
  259. if (end_pfn - start_pfn) {
  260. zone->zone_start_pfn = start_pfn;
  261. zone->spanned_pages = end_pfn - start_pfn;
  262. } else {
  263. /*
  264. * make it consist as free_area_init_core(),
  265. * if spanned_pages = 0, then keep start_pfn = 0
  266. */
  267. zone->zone_start_pfn = 0;
  268. zone->spanned_pages = 0;
  269. }
  270. zone_span_writeunlock(zone);
  271. }
  272. static void fix_zone_id(struct zone *zone, unsigned long start_pfn,
  273. unsigned long end_pfn)
  274. {
  275. enum zone_type zid = zone_idx(zone);
  276. int nid = zone->zone_pgdat->node_id;
  277. unsigned long pfn;
  278. for (pfn = start_pfn; pfn < end_pfn; pfn++)
  279. set_page_links(pfn_to_page(pfn), zid, nid, pfn);
  280. }
  281. /* Can fail with -ENOMEM from allocating a wait table with vmalloc() or
  282. * alloc_bootmem_node_nopanic()/memblock_virt_alloc_node_nopanic() */
  283. static int __ref ensure_zone_is_initialized(struct zone *zone,
  284. unsigned long start_pfn, unsigned long num_pages)
  285. {
  286. if (!zone_is_initialized(zone))
  287. return init_currently_empty_zone(zone, start_pfn, num_pages);
  288. return 0;
  289. }
  290. static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2,
  291. unsigned long start_pfn, unsigned long end_pfn)
  292. {
  293. int ret;
  294. unsigned long flags;
  295. unsigned long z1_start_pfn;
  296. ret = ensure_zone_is_initialized(z1, start_pfn, end_pfn - start_pfn);
  297. if (ret)
  298. return ret;
  299. pgdat_resize_lock(z1->zone_pgdat, &flags);
  300. /* can't move pfns which are higher than @z2 */
  301. if (end_pfn > zone_end_pfn(z2))
  302. goto out_fail;
  303. /* the move out part must be at the left most of @z2 */
  304. if (start_pfn > z2->zone_start_pfn)
  305. goto out_fail;
  306. /* must included/overlap */
  307. if (end_pfn <= z2->zone_start_pfn)
  308. goto out_fail;
  309. /* use start_pfn for z1's start_pfn if z1 is empty */
  310. if (!zone_is_empty(z1))
  311. z1_start_pfn = z1->zone_start_pfn;
  312. else
  313. z1_start_pfn = start_pfn;
  314. resize_zone(z1, z1_start_pfn, end_pfn);
  315. resize_zone(z2, end_pfn, zone_end_pfn(z2));
  316. pgdat_resize_unlock(z1->zone_pgdat, &flags);
  317. fix_zone_id(z1, start_pfn, end_pfn);
  318. return 0;
  319. out_fail:
  320. pgdat_resize_unlock(z1->zone_pgdat, &flags);
  321. return -1;
  322. }
  323. static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2,
  324. unsigned long start_pfn, unsigned long end_pfn)
  325. {
  326. int ret;
  327. unsigned long flags;
  328. unsigned long z2_end_pfn;
  329. ret = ensure_zone_is_initialized(z2, start_pfn, end_pfn - start_pfn);
  330. if (ret)
  331. return ret;
  332. pgdat_resize_lock(z1->zone_pgdat, &flags);
  333. /* can't move pfns which are lower than @z1 */
  334. if (z1->zone_start_pfn > start_pfn)
  335. goto out_fail;
  336. /* the move out part mast at the right most of @z1 */
  337. if (zone_end_pfn(z1) > end_pfn)
  338. goto out_fail;
  339. /* must included/overlap */
  340. if (start_pfn >= zone_end_pfn(z1))
  341. goto out_fail;
  342. /* use end_pfn for z2's end_pfn if z2 is empty */
  343. if (!zone_is_empty(z2))
  344. z2_end_pfn = zone_end_pfn(z2);
  345. else
  346. z2_end_pfn = end_pfn;
  347. resize_zone(z1, z1->zone_start_pfn, start_pfn);
  348. resize_zone(z2, start_pfn, z2_end_pfn);
  349. pgdat_resize_unlock(z1->zone_pgdat, &flags);
  350. fix_zone_id(z2, start_pfn, end_pfn);
  351. return 0;
  352. out_fail:
  353. pgdat_resize_unlock(z1->zone_pgdat, &flags);
  354. return -1;
  355. }
  356. static void __meminit grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
  357. unsigned long end_pfn)
  358. {
  359. unsigned long old_pgdat_end_pfn = pgdat_end_pfn(pgdat);
  360. if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
  361. pgdat->node_start_pfn = start_pfn;
  362. pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
  363. pgdat->node_start_pfn;
  364. }
  365. static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
  366. {
  367. struct pglist_data *pgdat = zone->zone_pgdat;
  368. int nr_pages = PAGES_PER_SECTION;
  369. int nid = pgdat->node_id;
  370. int zone_type;
  371. unsigned long flags, pfn;
  372. int ret;
  373. zone_type = zone - pgdat->node_zones;
  374. ret = ensure_zone_is_initialized(zone, phys_start_pfn, nr_pages);
  375. if (ret)
  376. return ret;
  377. pgdat_resize_lock(zone->zone_pgdat, &flags);
  378. grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages);
  379. grow_pgdat_span(zone->zone_pgdat, phys_start_pfn,
  380. phys_start_pfn + nr_pages);
  381. pgdat_resize_unlock(zone->zone_pgdat, &flags);
  382. memmap_init_zone(nr_pages, nid, zone_type,
  383. phys_start_pfn, MEMMAP_HOTPLUG);
  384. /* online_page_range is called later and expects pages reserved */
  385. for (pfn = phys_start_pfn; pfn < phys_start_pfn + nr_pages; pfn++) {
  386. if (!pfn_valid(pfn))
  387. continue;
  388. SetPageReserved(pfn_to_page(pfn));
  389. }
  390. return 0;
  391. }
  392. static int __meminit __add_section(int nid, struct zone *zone,
  393. unsigned long phys_start_pfn)
  394. {
  395. int ret;
  396. if (pfn_valid(phys_start_pfn))
  397. return -EEXIST;
  398. ret = sparse_add_one_section(zone, phys_start_pfn);
  399. if (ret < 0)
  400. return ret;
  401. ret = __add_zone(zone, phys_start_pfn);
  402. if (ret < 0)
  403. return ret;
  404. return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
  405. }
  406. /*
  407. * Reasonably generic function for adding memory. It is
  408. * expected that archs that support memory hotplug will
  409. * call this function after deciding the zone to which to
  410. * add the new pages.
  411. */
  412. int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
  413. unsigned long nr_pages)
  414. {
  415. unsigned long i;
  416. int err = 0;
  417. int start_sec, end_sec;
  418. struct vmem_altmap *altmap;
  419. /* during initialize mem_map, align hot-added range to section */
  420. start_sec = pfn_to_section_nr(phys_start_pfn);
  421. end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
  422. altmap = to_vmem_altmap((unsigned long) pfn_to_page(phys_start_pfn));
  423. if (altmap) {
  424. /*
  425. * Validate altmap is within bounds of the total request
  426. */
  427. if (altmap->base_pfn != phys_start_pfn
  428. || vmem_altmap_offset(altmap) > nr_pages) {
  429. pr_warn_once("memory add fail, invalid altmap\n");
  430. return -EINVAL;
  431. }
  432. altmap->alloc = 0;
  433. }
  434. for (i = start_sec; i <= end_sec; i++) {
  435. err = __add_section(nid, zone, section_nr_to_pfn(i));
  436. /*
  437. * EEXIST is finally dealt with by ioresource collision
  438. * check. see add_memory() => register_memory_resource()
  439. * Warning will be printed if there is collision.
  440. */
  441. if (err && (err != -EEXIST))
  442. break;
  443. err = 0;
  444. }
  445. vmemmap_populate_print_last();
  446. return err;
  447. }
  448. EXPORT_SYMBOL_GPL(__add_pages);
  449. #ifdef CONFIG_MEMORY_HOTREMOVE
  450. /* find the smallest valid pfn in the range [start_pfn, end_pfn) */
  451. static int find_smallest_section_pfn(int nid, struct zone *zone,
  452. unsigned long start_pfn,
  453. unsigned long end_pfn)
  454. {
  455. struct mem_section *ms;
  456. for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) {
  457. ms = __pfn_to_section(start_pfn);
  458. if (unlikely(!valid_section(ms)))
  459. continue;
  460. if (unlikely(pfn_to_nid(start_pfn) != nid))
  461. continue;
  462. if (zone && zone != page_zone(pfn_to_page(start_pfn)))
  463. continue;
  464. return start_pfn;
  465. }
  466. return 0;
  467. }
  468. /* find the biggest valid pfn in the range [start_pfn, end_pfn). */
  469. static int find_biggest_section_pfn(int nid, struct zone *zone,
  470. unsigned long start_pfn,
  471. unsigned long end_pfn)
  472. {
  473. struct mem_section *ms;
  474. unsigned long pfn;
  475. /* pfn is the end pfn of a memory section. */
  476. pfn = end_pfn - 1;
  477. for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) {
  478. ms = __pfn_to_section(pfn);
  479. if (unlikely(!valid_section(ms)))
  480. continue;
  481. if (unlikely(pfn_to_nid(pfn) != nid))
  482. continue;
  483. if (zone && zone != page_zone(pfn_to_page(pfn)))
  484. continue;
  485. return pfn;
  486. }
  487. return 0;
  488. }
  489. static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
  490. unsigned long end_pfn)
  491. {
  492. unsigned long zone_start_pfn = zone->zone_start_pfn;
  493. unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */
  494. unsigned long zone_end_pfn = z;
  495. unsigned long pfn;
  496. struct mem_section *ms;
  497. int nid = zone_to_nid(zone);
  498. zone_span_writelock(zone);
  499. if (zone_start_pfn == start_pfn) {
  500. /*
  501. * If the section is smallest section in the zone, it need
  502. * shrink zone->zone_start_pfn and zone->zone_spanned_pages.
  503. * In this case, we find second smallest valid mem_section
  504. * for shrinking zone.
  505. */
  506. pfn = find_smallest_section_pfn(nid, zone, end_pfn,
  507. zone_end_pfn);
  508. if (pfn) {
  509. zone->zone_start_pfn = pfn;
  510. zone->spanned_pages = zone_end_pfn - pfn;
  511. }
  512. } else if (zone_end_pfn == end_pfn) {
  513. /*
  514. * If the section is biggest section in the zone, it need
  515. * shrink zone->spanned_pages.
  516. * In this case, we find second biggest valid mem_section for
  517. * shrinking zone.
  518. */
  519. pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn,
  520. start_pfn);
  521. if (pfn)
  522. zone->spanned_pages = pfn - zone_start_pfn + 1;
  523. }
  524. /*
  525. * The section is not biggest or smallest mem_section in the zone, it
  526. * only creates a hole in the zone. So in this case, we need not
  527. * change the zone. But perhaps, the zone has only hole data. Thus
  528. * it check the zone has only hole or not.
  529. */
  530. pfn = zone_start_pfn;
  531. for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) {
  532. ms = __pfn_to_section(pfn);
  533. if (unlikely(!valid_section(ms)))
  534. continue;
  535. if (page_zone(pfn_to_page(pfn)) != zone)
  536. continue;
  537. /* If the section is current section, it continues the loop */
  538. if (start_pfn == pfn)
  539. continue;
  540. /* If we find valid section, we have nothing to do */
  541. zone_span_writeunlock(zone);
  542. return;
  543. }
  544. /* The zone has no valid section */
  545. zone->zone_start_pfn = 0;
  546. zone->spanned_pages = 0;
  547. zone_span_writeunlock(zone);
  548. }
  549. static void shrink_pgdat_span(struct pglist_data *pgdat,
  550. unsigned long start_pfn, unsigned long end_pfn)
  551. {
  552. unsigned long pgdat_start_pfn = pgdat->node_start_pfn;
  553. unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */
  554. unsigned long pgdat_end_pfn = p;
  555. unsigned long pfn;
  556. struct mem_section *ms;
  557. int nid = pgdat->node_id;
  558. if (pgdat_start_pfn == start_pfn) {
  559. /*
  560. * If the section is smallest section in the pgdat, it need
  561. * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages.
  562. * In this case, we find second smallest valid mem_section
  563. * for shrinking zone.
  564. */
  565. pfn = find_smallest_section_pfn(nid, NULL, end_pfn,
  566. pgdat_end_pfn);
  567. if (pfn) {
  568. pgdat->node_start_pfn = pfn;
  569. pgdat->node_spanned_pages = pgdat_end_pfn - pfn;
  570. }
  571. } else if (pgdat_end_pfn == end_pfn) {
  572. /*
  573. * If the section is biggest section in the pgdat, it need
  574. * shrink pgdat->node_spanned_pages.
  575. * In this case, we find second biggest valid mem_section for
  576. * shrinking zone.
  577. */
  578. pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn,
  579. start_pfn);
  580. if (pfn)
  581. pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1;
  582. }
  583. /*
  584. * If the section is not biggest or smallest mem_section in the pgdat,
  585. * it only creates a hole in the pgdat. So in this case, we need not
  586. * change the pgdat.
  587. * But perhaps, the pgdat has only hole data. Thus it check the pgdat
  588. * has only hole or not.
  589. */
  590. pfn = pgdat_start_pfn;
  591. for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) {
  592. ms = __pfn_to_section(pfn);
  593. if (unlikely(!valid_section(ms)))
  594. continue;
  595. if (pfn_to_nid(pfn) != nid)
  596. continue;
  597. /* If the section is current section, it continues the loop */
  598. if (start_pfn == pfn)
  599. continue;
  600. /* If we find valid section, we have nothing to do */
  601. return;
  602. }
  603. /* The pgdat has no valid section */
  604. pgdat->node_start_pfn = 0;
  605. pgdat->node_spanned_pages = 0;
  606. }
  607. static void __remove_zone(struct zone *zone, unsigned long start_pfn)
  608. {
  609. struct pglist_data *pgdat = zone->zone_pgdat;
  610. int nr_pages = PAGES_PER_SECTION;
  611. int zone_type;
  612. unsigned long flags;
  613. zone_type = zone - pgdat->node_zones;
  614. pgdat_resize_lock(zone->zone_pgdat, &flags);
  615. shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
  616. shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages);
  617. pgdat_resize_unlock(zone->zone_pgdat, &flags);
  618. }
  619. static int __remove_section(struct zone *zone, struct mem_section *ms,
  620. unsigned long map_offset)
  621. {
  622. unsigned long start_pfn;
  623. int scn_nr;
  624. int ret = -EINVAL;
  625. if (!valid_section(ms))
  626. return ret;
  627. ret = unregister_memory_section(ms);
  628. if (ret)
  629. return ret;
  630. scn_nr = __section_nr(ms);
  631. start_pfn = section_nr_to_pfn(scn_nr);
  632. __remove_zone(zone, start_pfn);
  633. sparse_remove_one_section(zone, ms, map_offset);
  634. return 0;
  635. }
  636. /**
  637. * __remove_pages() - remove sections of pages from a zone
  638. * @zone: zone from which pages need to be removed
  639. * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
  640. * @nr_pages: number of pages to remove (must be multiple of section size)
  641. *
  642. * Generic helper function to remove section mappings and sysfs entries
  643. * for the section of the memory we are removing. Caller needs to make
  644. * sure that pages are marked reserved and zones are adjust properly by
  645. * calling offline_pages().
  646. */
  647. int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
  648. unsigned long nr_pages)
  649. {
  650. unsigned long i;
  651. unsigned long map_offset = 0;
  652. int sections_to_remove, ret = 0;
  653. /* In the ZONE_DEVICE case device driver owns the memory region */
  654. if (is_dev_zone(zone)) {
  655. struct page *page = pfn_to_page(phys_start_pfn);
  656. struct vmem_altmap *altmap;
  657. altmap = to_vmem_altmap((unsigned long) page);
  658. if (altmap)
  659. map_offset = vmem_altmap_offset(altmap);
  660. } else {
  661. resource_size_t start, size;
  662. start = phys_start_pfn << PAGE_SHIFT;
  663. size = nr_pages * PAGE_SIZE;
  664. ret = release_mem_region_adjustable(&iomem_resource, start,
  665. size);
  666. if (ret) {
  667. resource_size_t endres = start + size - 1;
  668. pr_warn("Unable to release resource <%pa-%pa> (%d)\n",
  669. &start, &endres, ret);
  670. }
  671. }
  672. /*
  673. * We can only remove entire sections
  674. */
  675. BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
  676. BUG_ON(nr_pages % PAGES_PER_SECTION);
  677. sections_to_remove = nr_pages / PAGES_PER_SECTION;
  678. for (i = 0; i < sections_to_remove; i++) {
  679. unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
  680. ret = __remove_section(zone, __pfn_to_section(pfn), map_offset);
  681. map_offset = 0;
  682. if (ret)
  683. break;
  684. }
  685. return ret;
  686. }
  687. EXPORT_SYMBOL_GPL(__remove_pages);
  688. #endif /* CONFIG_MEMORY_HOTREMOVE */
  689. int set_online_page_callback(online_page_callback_t callback)
  690. {
  691. int rc = -EINVAL;
  692. get_online_mems();
  693. mutex_lock(&online_page_callback_lock);
  694. if (online_page_callback == generic_online_page) {
  695. online_page_callback = callback;
  696. rc = 0;
  697. }
  698. mutex_unlock(&online_page_callback_lock);
  699. put_online_mems();
  700. return rc;
  701. }
  702. EXPORT_SYMBOL_GPL(set_online_page_callback);
  703. int restore_online_page_callback(online_page_callback_t callback)
  704. {
  705. int rc = -EINVAL;
  706. get_online_mems();
  707. mutex_lock(&online_page_callback_lock);
  708. if (online_page_callback == callback) {
  709. online_page_callback = generic_online_page;
  710. rc = 0;
  711. }
  712. mutex_unlock(&online_page_callback_lock);
  713. put_online_mems();
  714. return rc;
  715. }
  716. EXPORT_SYMBOL_GPL(restore_online_page_callback);
  717. void __online_page_set_limits(struct page *page)
  718. {
  719. }
  720. EXPORT_SYMBOL_GPL(__online_page_set_limits);
  721. void __online_page_increment_counters(struct page *page)
  722. {
  723. adjust_managed_page_count(page, 1);
  724. }
  725. EXPORT_SYMBOL_GPL(__online_page_increment_counters);
  726. void __online_page_free(struct page *page)
  727. {
  728. __free_reserved_page(page);
  729. }
  730. EXPORT_SYMBOL_GPL(__online_page_free);
  731. static void generic_online_page(struct page *page)
  732. {
  733. __online_page_set_limits(page);
  734. __online_page_increment_counters(page);
  735. __online_page_free(page);
  736. }
  737. static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
  738. void *arg)
  739. {
  740. unsigned long i;
  741. unsigned long onlined_pages = *(unsigned long *)arg;
  742. struct page *page;
  743. if (PageReserved(pfn_to_page(start_pfn)))
  744. for (i = 0; i < nr_pages; i++) {
  745. page = pfn_to_page(start_pfn + i);
  746. (*online_page_callback)(page);
  747. onlined_pages++;
  748. }
  749. *(unsigned long *)arg = onlined_pages;
  750. return 0;
  751. }
  752. #ifdef CONFIG_MOVABLE_NODE
  753. /*
  754. * When CONFIG_MOVABLE_NODE, we permit onlining of a node which doesn't have
  755. * normal memory.
  756. */
  757. static bool can_online_high_movable(struct zone *zone)
  758. {
  759. return true;
  760. }
  761. #else /* CONFIG_MOVABLE_NODE */
  762. /* ensure every online node has NORMAL memory */
  763. static bool can_online_high_movable(struct zone *zone)
  764. {
  765. return node_state(zone_to_nid(zone), N_NORMAL_MEMORY);
  766. }
  767. #endif /* CONFIG_MOVABLE_NODE */
  768. /* check which state of node_states will be changed when online memory */
  769. static void node_states_check_changes_online(unsigned long nr_pages,
  770. struct zone *zone, struct memory_notify *arg)
  771. {
  772. int nid = zone_to_nid(zone);
  773. enum zone_type zone_last = ZONE_NORMAL;
  774. /*
  775. * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
  776. * contains nodes which have zones of 0...ZONE_NORMAL,
  777. * set zone_last to ZONE_NORMAL.
  778. *
  779. * If we don't have HIGHMEM nor movable node,
  780. * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
  781. * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
  782. */
  783. if (N_MEMORY == N_NORMAL_MEMORY)
  784. zone_last = ZONE_MOVABLE;
  785. /*
  786. * if the memory to be online is in a zone of 0...zone_last, and
  787. * the zones of 0...zone_last don't have memory before online, we will
  788. * need to set the node to node_states[N_NORMAL_MEMORY] after
  789. * the memory is online.
  790. */
  791. if (zone_idx(zone) <= zone_last && !node_state(nid, N_NORMAL_MEMORY))
  792. arg->status_change_nid_normal = nid;
  793. else
  794. arg->status_change_nid_normal = -1;
  795. #ifdef CONFIG_HIGHMEM
  796. /*
  797. * If we have movable node, node_states[N_HIGH_MEMORY]
  798. * contains nodes which have zones of 0...ZONE_HIGHMEM,
  799. * set zone_last to ZONE_HIGHMEM.
  800. *
  801. * If we don't have movable node, node_states[N_NORMAL_MEMORY]
  802. * contains nodes which have zones of 0...ZONE_MOVABLE,
  803. * set zone_last to ZONE_MOVABLE.
  804. */
  805. zone_last = ZONE_HIGHMEM;
  806. if (N_MEMORY == N_HIGH_MEMORY)
  807. zone_last = ZONE_MOVABLE;
  808. if (zone_idx(zone) <= zone_last && !node_state(nid, N_HIGH_MEMORY))
  809. arg->status_change_nid_high = nid;
  810. else
  811. arg->status_change_nid_high = -1;
  812. #else
  813. arg->status_change_nid_high = arg->status_change_nid_normal;
  814. #endif
  815. /*
  816. * if the node don't have memory befor online, we will need to
  817. * set the node to node_states[N_MEMORY] after the memory
  818. * is online.
  819. */
  820. if (!node_state(nid, N_MEMORY))
  821. arg->status_change_nid = nid;
  822. else
  823. arg->status_change_nid = -1;
  824. }
  825. static void node_states_set_node(int node, struct memory_notify *arg)
  826. {
  827. if (arg->status_change_nid_normal >= 0)
  828. node_set_state(node, N_NORMAL_MEMORY);
  829. if (arg->status_change_nid_high >= 0)
  830. node_set_state(node, N_HIGH_MEMORY);
  831. node_set_state(node, N_MEMORY);
  832. }
  833. /* Must be protected by mem_hotplug_begin() */
  834. int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type)
  835. {
  836. unsigned long flags;
  837. unsigned long onlined_pages = 0;
  838. struct zone *zone;
  839. int need_zonelists_rebuild = 0;
  840. int nid;
  841. int ret;
  842. struct memory_notify arg;
  843. /*
  844. * This doesn't need a lock to do pfn_to_page().
  845. * The section can't be removed here because of the
  846. * memory_block->state_mutex.
  847. */
  848. zone = page_zone(pfn_to_page(pfn));
  849. if ((zone_idx(zone) > ZONE_NORMAL ||
  850. online_type == MMOP_ONLINE_MOVABLE) &&
  851. !can_online_high_movable(zone))
  852. return -EINVAL;
  853. if (online_type == MMOP_ONLINE_KERNEL &&
  854. zone_idx(zone) == ZONE_MOVABLE) {
  855. if (move_pfn_range_left(zone - 1, zone, pfn, pfn + nr_pages))
  856. return -EINVAL;
  857. }
  858. if (online_type == MMOP_ONLINE_MOVABLE &&
  859. zone_idx(zone) == ZONE_MOVABLE - 1) {
  860. if (move_pfn_range_right(zone, zone + 1, pfn, pfn + nr_pages))
  861. return -EINVAL;
  862. }
  863. /* Previous code may changed the zone of the pfn range */
  864. zone = page_zone(pfn_to_page(pfn));
  865. arg.start_pfn = pfn;
  866. arg.nr_pages = nr_pages;
  867. node_states_check_changes_online(nr_pages, zone, &arg);
  868. nid = pfn_to_nid(pfn);
  869. ret = memory_notify(MEM_GOING_ONLINE, &arg);
  870. ret = notifier_to_errno(ret);
  871. if (ret) {
  872. memory_notify(MEM_CANCEL_ONLINE, &arg);
  873. return ret;
  874. }
  875. /*
  876. * If this zone is not populated, then it is not in zonelist.
  877. * This means the page allocator ignores this zone.
  878. * So, zonelist must be updated after online.
  879. */
  880. mutex_lock(&zonelists_mutex);
  881. if (!populated_zone(zone)) {
  882. need_zonelists_rebuild = 1;
  883. build_all_zonelists(NULL, zone);
  884. }
  885. ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
  886. online_pages_range);
  887. if (ret) {
  888. if (need_zonelists_rebuild)
  889. zone_pcp_reset(zone);
  890. mutex_unlock(&zonelists_mutex);
  891. printk(KERN_DEBUG "online_pages [mem %#010llx-%#010llx] failed\n",
  892. (unsigned long long) pfn << PAGE_SHIFT,
  893. (((unsigned long long) pfn + nr_pages)
  894. << PAGE_SHIFT) - 1);
  895. memory_notify(MEM_CANCEL_ONLINE, &arg);
  896. return ret;
  897. }
  898. zone->present_pages += onlined_pages;
  899. pgdat_resize_lock(zone->zone_pgdat, &flags);
  900. zone->zone_pgdat->node_present_pages += onlined_pages;
  901. pgdat_resize_unlock(zone->zone_pgdat, &flags);
  902. if (onlined_pages) {
  903. node_states_set_node(zone_to_nid(zone), &arg);
  904. if (need_zonelists_rebuild)
  905. build_all_zonelists(NULL, NULL);
  906. else
  907. zone_pcp_update(zone);
  908. }
  909. mutex_unlock(&zonelists_mutex);
  910. init_per_zone_wmark_min();
  911. if (onlined_pages)
  912. kswapd_run(zone_to_nid(zone));
  913. vm_total_pages = nr_free_pagecache_pages();
  914. writeback_set_ratelimit();
  915. if (onlined_pages)
  916. memory_notify(MEM_ONLINE, &arg);
  917. return 0;
  918. }
  919. #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
  920. static void reset_node_present_pages(pg_data_t *pgdat)
  921. {
  922. struct zone *z;
  923. for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
  924. z->present_pages = 0;
  925. pgdat->node_present_pages = 0;
  926. }
  927. /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
  928. static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
  929. {
  930. struct pglist_data *pgdat;
  931. unsigned long zones_size[MAX_NR_ZONES] = {0};
  932. unsigned long zholes_size[MAX_NR_ZONES] = {0};
  933. unsigned long start_pfn = PFN_DOWN(start);
  934. pgdat = NODE_DATA(nid);
  935. if (!pgdat) {
  936. pgdat = arch_alloc_nodedata(nid);
  937. if (!pgdat)
  938. return NULL;
  939. arch_refresh_nodedata(nid, pgdat);
  940. } else {
  941. /* Reset the nr_zones and classzone_idx to 0 before reuse */
  942. pgdat->nr_zones = 0;
  943. pgdat->classzone_idx = 0;
  944. }
  945. /* we can use NODE_DATA(nid) from here */
  946. /* init node's zones as empty zones, we don't have any present pages.*/
  947. free_area_init_node(nid, zones_size, start_pfn, zholes_size);
  948. /*
  949. * The node we allocated has no zone fallback lists. For avoiding
  950. * to access not-initialized zonelist, build here.
  951. */
  952. mutex_lock(&zonelists_mutex);
  953. build_all_zonelists(pgdat, NULL);
  954. mutex_unlock(&zonelists_mutex);
  955. /*
  956. * zone->managed_pages is set to an approximate value in
  957. * free_area_init_core(), which will cause
  958. * /sys/device/system/node/nodeX/meminfo has wrong data.
  959. * So reset it to 0 before any memory is onlined.
  960. */
  961. reset_node_managed_pages(pgdat);
  962. /*
  963. * When memory is hot-added, all the memory is in offline state. So
  964. * clear all zones' present_pages because they will be updated in
  965. * online_pages() and offline_pages().
  966. */
  967. reset_node_present_pages(pgdat);
  968. return pgdat;
  969. }
  970. static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
  971. {
  972. arch_refresh_nodedata(nid, NULL);
  973. arch_free_nodedata(pgdat);
  974. return;
  975. }
  976. /**
  977. * try_online_node - online a node if offlined
  978. *
  979. * called by cpu_up() to online a node without onlined memory.
  980. */
  981. int try_online_node(int nid)
  982. {
  983. pg_data_t *pgdat;
  984. int ret;
  985. if (node_online(nid))
  986. return 0;
  987. mem_hotplug_begin();
  988. pgdat = hotadd_new_pgdat(nid, 0);
  989. if (!pgdat) {
  990. pr_err("Cannot online node %d due to NULL pgdat\n", nid);
  991. ret = -ENOMEM;
  992. goto out;
  993. }
  994. node_set_online(nid);
  995. ret = register_one_node(nid);
  996. BUG_ON(ret);
  997. if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
  998. mutex_lock(&zonelists_mutex);
  999. build_all_zonelists(NULL, NULL);
  1000. mutex_unlock(&zonelists_mutex);
  1001. }
  1002. out:
  1003. mem_hotplug_done();
  1004. return ret;
  1005. }
  1006. static int check_hotplug_memory_range(u64 start, u64 size)
  1007. {
  1008. u64 start_pfn = PFN_DOWN(start);
  1009. u64 nr_pages = size >> PAGE_SHIFT;
  1010. /* Memory range must be aligned with section */
  1011. if ((start_pfn & ~PAGE_SECTION_MASK) ||
  1012. (nr_pages % PAGES_PER_SECTION) || (!nr_pages)) {
  1013. pr_err("Section-unaligned hotplug range: start 0x%llx, size 0x%llx\n",
  1014. (unsigned long long)start,
  1015. (unsigned long long)size);
  1016. return -EINVAL;
  1017. }
  1018. return 0;
  1019. }
  1020. /*
  1021. * If movable zone has already been setup, newly added memory should be check.
  1022. * If its address is higher than movable zone, it should be added as movable.
  1023. * Without this check, movable zone may overlap with other zone.
  1024. */
  1025. static int should_add_memory_movable(int nid, u64 start, u64 size)
  1026. {
  1027. unsigned long start_pfn = start >> PAGE_SHIFT;
  1028. pg_data_t *pgdat = NODE_DATA(nid);
  1029. struct zone *movable_zone = pgdat->node_zones + ZONE_MOVABLE;
  1030. if (zone_is_empty(movable_zone))
  1031. return 0;
  1032. if (movable_zone->zone_start_pfn <= start_pfn)
  1033. return 1;
  1034. return 0;
  1035. }
  1036. int zone_for_memory(int nid, u64 start, u64 size, int zone_default,
  1037. bool for_device)
  1038. {
  1039. #ifdef CONFIG_ZONE_DEVICE
  1040. if (for_device)
  1041. return ZONE_DEVICE;
  1042. #endif
  1043. if (should_add_memory_movable(nid, start, size))
  1044. return ZONE_MOVABLE;
  1045. return zone_default;
  1046. }
  1047. static int online_memory_block(struct memory_block *mem, void *arg)
  1048. {
  1049. return memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
  1050. }
  1051. /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
  1052. int __ref add_memory_resource(int nid, struct resource *res, bool online)
  1053. {
  1054. u64 start, size;
  1055. pg_data_t *pgdat = NULL;
  1056. bool new_pgdat;
  1057. bool new_node;
  1058. int ret;
  1059. start = res->start;
  1060. size = resource_size(res);
  1061. ret = check_hotplug_memory_range(start, size);
  1062. if (ret)
  1063. return ret;
  1064. { /* Stupid hack to suppress address-never-null warning */
  1065. void *p = NODE_DATA(nid);
  1066. new_pgdat = !p;
  1067. }
  1068. mem_hotplug_begin();
  1069. /*
  1070. * Add new range to memblock so that when hotadd_new_pgdat() is called
  1071. * to allocate new pgdat, get_pfn_range_for_nid() will be able to find
  1072. * this new range and calculate total pages correctly. The range will
  1073. * be removed at hot-remove time.
  1074. */
  1075. memblock_add_node(start, size, nid);
  1076. new_node = !node_online(nid);
  1077. if (new_node) {
  1078. pgdat = hotadd_new_pgdat(nid, start);
  1079. ret = -ENOMEM;
  1080. if (!pgdat)
  1081. goto error;
  1082. }
  1083. /* call arch's memory hotadd */
  1084. ret = arch_add_memory(nid, start, size, false);
  1085. if (ret < 0)
  1086. goto error;
  1087. /* we online node here. we can't roll back from here. */
  1088. node_set_online(nid);
  1089. if (new_node) {
  1090. ret = register_one_node(nid);
  1091. /*
  1092. * If sysfs file of new node can't create, cpu on the node
  1093. * can't be hot-added. There is no rollback way now.
  1094. * So, check by BUG_ON() to catch it reluctantly..
  1095. */
  1096. BUG_ON(ret);
  1097. }
  1098. /* create new memmap entry */
  1099. firmware_map_add_hotplug(start, start + size, "System RAM");
  1100. /* online pages if requested */
  1101. if (online)
  1102. walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1),
  1103. NULL, online_memory_block);
  1104. goto out;
  1105. error:
  1106. /* rollback pgdat allocation and others */
  1107. if (new_pgdat)
  1108. rollback_node_hotadd(nid, pgdat);
  1109. memblock_remove(start, size);
  1110. out:
  1111. mem_hotplug_done();
  1112. return ret;
  1113. }
  1114. EXPORT_SYMBOL_GPL(add_memory_resource);
  1115. int __ref add_memory(int nid, u64 start, u64 size)
  1116. {
  1117. struct resource *res;
  1118. int ret;
  1119. res = register_memory_resource(start, size);
  1120. if (IS_ERR(res))
  1121. return PTR_ERR(res);
  1122. ret = add_memory_resource(nid, res, memhp_auto_online);
  1123. if (ret < 0)
  1124. release_memory_resource(res);
  1125. return ret;
  1126. }
  1127. EXPORT_SYMBOL_GPL(add_memory);
  1128. #ifdef CONFIG_MEMORY_HOTREMOVE
  1129. /*
  1130. * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
  1131. * set and the size of the free page is given by page_order(). Using this,
  1132. * the function determines if the pageblock contains only free pages.
  1133. * Due to buddy contraints, a free page at least the size of a pageblock will
  1134. * be located at the start of the pageblock
  1135. */
  1136. static inline int pageblock_free(struct page *page)
  1137. {
  1138. return PageBuddy(page) && page_order(page) >= pageblock_order;
  1139. }
  1140. /* Return the start of the next active pageblock after a given page */
  1141. static struct page *next_active_pageblock(struct page *page)
  1142. {
  1143. /* Ensure the starting page is pageblock-aligned */
  1144. BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
  1145. /* If the entire pageblock is free, move to the end of free page */
  1146. if (pageblock_free(page)) {
  1147. int order;
  1148. /* be careful. we don't have locks, page_order can be changed.*/
  1149. order = page_order(page);
  1150. if ((order < MAX_ORDER) && (order >= pageblock_order))
  1151. return page + (1 << order);
  1152. }
  1153. return page + pageblock_nr_pages;
  1154. }
  1155. /* Checks if this range of memory is likely to be hot-removable. */
  1156. int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
  1157. {
  1158. struct page *page = pfn_to_page(start_pfn);
  1159. struct page *end_page = page + nr_pages;
  1160. /* Check the starting page of each pageblock within the range */
  1161. for (; page < end_page; page = next_active_pageblock(page)) {
  1162. if (!is_pageblock_removable_nolock(page))
  1163. return 0;
  1164. cond_resched();
  1165. }
  1166. /* All pageblocks in the memory block are likely to be hot-removable */
  1167. return 1;
  1168. }
  1169. /*
  1170. * Confirm all pages in a range [start, end) is belongs to the same zone.
  1171. */
  1172. int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
  1173. {
  1174. unsigned long pfn, sec_end_pfn;
  1175. struct zone *zone = NULL;
  1176. struct page *page;
  1177. int i;
  1178. for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn);
  1179. pfn < end_pfn;
  1180. pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) {
  1181. /* Make sure the memory section is present first */
  1182. if (!present_section_nr(pfn_to_section_nr(pfn)))
  1183. continue;
  1184. for (; pfn < sec_end_pfn && pfn < end_pfn;
  1185. pfn += MAX_ORDER_NR_PAGES) {
  1186. i = 0;
  1187. /* This is just a CONFIG_HOLES_IN_ZONE check.*/
  1188. while ((i < MAX_ORDER_NR_PAGES) &&
  1189. !pfn_valid_within(pfn + i))
  1190. i++;
  1191. if (i == MAX_ORDER_NR_PAGES)
  1192. continue;
  1193. page = pfn_to_page(pfn + i);
  1194. if (zone && page_zone(page) != zone)
  1195. return 0;
  1196. zone = page_zone(page);
  1197. }
  1198. }
  1199. return 1;
  1200. }
  1201. /*
  1202. * Scan pfn range [start,end) to find movable/migratable pages (LRU pages
  1203. * and hugepages). We scan pfn because it's much easier than scanning over
  1204. * linked list. This function returns the pfn of the first found movable
  1205. * page if it's found, otherwise 0.
  1206. */
  1207. static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
  1208. {
  1209. unsigned long pfn;
  1210. struct page *page;
  1211. for (pfn = start; pfn < end; pfn++) {
  1212. if (pfn_valid(pfn)) {
  1213. page = pfn_to_page(pfn);
  1214. if (PageLRU(page))
  1215. return pfn;
  1216. if (PageHuge(page)) {
  1217. if (page_huge_active(page))
  1218. return pfn;
  1219. else
  1220. pfn = round_up(pfn + 1,
  1221. 1 << compound_order(page)) - 1;
  1222. }
  1223. }
  1224. }
  1225. return 0;
  1226. }
  1227. #define NR_OFFLINE_AT_ONCE_PAGES (256)
  1228. static int
  1229. do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
  1230. {
  1231. unsigned long pfn;
  1232. struct page *page;
  1233. int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
  1234. int not_managed = 0;
  1235. int ret = 0;
  1236. LIST_HEAD(source);
  1237. for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
  1238. if (!pfn_valid(pfn))
  1239. continue;
  1240. page = pfn_to_page(pfn);
  1241. if (PageHuge(page)) {
  1242. struct page *head = compound_head(page);
  1243. pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1;
  1244. if (compound_order(head) > PFN_SECTION_SHIFT) {
  1245. ret = -EBUSY;
  1246. break;
  1247. }
  1248. if (isolate_huge_page(page, &source))
  1249. move_pages -= 1 << compound_order(head);
  1250. continue;
  1251. }
  1252. if (!get_page_unless_zero(page))
  1253. continue;
  1254. /*
  1255. * We can skip free pages. And we can only deal with pages on
  1256. * LRU.
  1257. */
  1258. ret = isolate_lru_page(page);
  1259. if (!ret) { /* Success */
  1260. put_page(page);
  1261. list_add_tail(&page->lru, &source);
  1262. move_pages--;
  1263. inc_zone_page_state(page, NR_ISOLATED_ANON +
  1264. page_is_file_cache(page));
  1265. } else {
  1266. #ifdef CONFIG_DEBUG_VM
  1267. printk(KERN_ALERT "removing pfn %lx from LRU failed\n",
  1268. pfn);
  1269. dump_page(page, "failed to remove from LRU");
  1270. #endif
  1271. put_page(page);
  1272. /* Because we don't have big zone->lock. we should
  1273. check this again here. */
  1274. if (page_count(page)) {
  1275. not_managed++;
  1276. ret = -EBUSY;
  1277. break;
  1278. }
  1279. }
  1280. }
  1281. if (!list_empty(&source)) {
  1282. if (not_managed) {
  1283. putback_movable_pages(&source);
  1284. goto out;
  1285. }
  1286. /*
  1287. * alloc_migrate_target should be improooooved!!
  1288. * migrate_pages returns # of failed pages.
  1289. */
  1290. ret = migrate_pages(&source, alloc_migrate_target, NULL, 0,
  1291. MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
  1292. if (ret)
  1293. putback_movable_pages(&source);
  1294. }
  1295. out:
  1296. return ret;
  1297. }
  1298. /*
  1299. * remove from free_area[] and mark all as Reserved.
  1300. */
  1301. static int
  1302. offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
  1303. void *data)
  1304. {
  1305. __offline_isolated_pages(start, start + nr_pages);
  1306. return 0;
  1307. }
  1308. static void
  1309. offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
  1310. {
  1311. walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
  1312. offline_isolated_pages_cb);
  1313. }
  1314. /*
  1315. * Check all pages in range, recoreded as memory resource, are isolated.
  1316. */
  1317. static int
  1318. check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
  1319. void *data)
  1320. {
  1321. int ret;
  1322. long offlined = *(long *)data;
  1323. ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true);
  1324. offlined = nr_pages;
  1325. if (!ret)
  1326. *(long *)data += offlined;
  1327. return ret;
  1328. }
  1329. static long
  1330. check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
  1331. {
  1332. long offlined = 0;
  1333. int ret;
  1334. ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
  1335. check_pages_isolated_cb);
  1336. if (ret < 0)
  1337. offlined = (long)ret;
  1338. return offlined;
  1339. }
  1340. #ifdef CONFIG_MOVABLE_NODE
  1341. /*
  1342. * When CONFIG_MOVABLE_NODE, we permit offlining of a node which doesn't have
  1343. * normal memory.
  1344. */
  1345. static bool can_offline_normal(struct zone *zone, unsigned long nr_pages)
  1346. {
  1347. return true;
  1348. }
  1349. #else /* CONFIG_MOVABLE_NODE */
  1350. /* ensure the node has NORMAL memory if it is still online */
  1351. static bool can_offline_normal(struct zone *zone, unsigned long nr_pages)
  1352. {
  1353. struct pglist_data *pgdat = zone->zone_pgdat;
  1354. unsigned long present_pages = 0;
  1355. enum zone_type zt;
  1356. for (zt = 0; zt <= ZONE_NORMAL; zt++)
  1357. present_pages += pgdat->node_zones[zt].present_pages;
  1358. if (present_pages > nr_pages)
  1359. return true;
  1360. present_pages = 0;
  1361. for (; zt <= ZONE_MOVABLE; zt++)
  1362. present_pages += pgdat->node_zones[zt].present_pages;
  1363. /*
  1364. * we can't offline the last normal memory until all
  1365. * higher memory is offlined.
  1366. */
  1367. return present_pages == 0;
  1368. }
  1369. #endif /* CONFIG_MOVABLE_NODE */
  1370. static int __init cmdline_parse_movable_node(char *p)
  1371. {
  1372. #ifdef CONFIG_MOVABLE_NODE
  1373. /*
  1374. * Memory used by the kernel cannot be hot-removed because Linux
  1375. * cannot migrate the kernel pages. When memory hotplug is
  1376. * enabled, we should prevent memblock from allocating memory
  1377. * for the kernel.
  1378. *
  1379. * ACPI SRAT records all hotpluggable memory ranges. But before
  1380. * SRAT is parsed, we don't know about it.
  1381. *
  1382. * The kernel image is loaded into memory at very early time. We
  1383. * cannot prevent this anyway. So on NUMA system, we set any
  1384. * node the kernel resides in as un-hotpluggable.
  1385. *
  1386. * Since on modern servers, one node could have double-digit
  1387. * gigabytes memory, we can assume the memory around the kernel
  1388. * image is also un-hotpluggable. So before SRAT is parsed, just
  1389. * allocate memory near the kernel image to try the best to keep
  1390. * the kernel away from hotpluggable memory.
  1391. */
  1392. memblock_set_bottom_up(true);
  1393. movable_node_enabled = true;
  1394. #else
  1395. pr_warn("movable_node option not supported\n");
  1396. #endif
  1397. return 0;
  1398. }
  1399. early_param("movable_node", cmdline_parse_movable_node);
  1400. /* check which state of node_states will be changed when offline memory */
  1401. static void node_states_check_changes_offline(unsigned long nr_pages,
  1402. struct zone *zone, struct memory_notify *arg)
  1403. {
  1404. struct pglist_data *pgdat = zone->zone_pgdat;
  1405. unsigned long present_pages = 0;
  1406. enum zone_type zt, zone_last = ZONE_NORMAL;
  1407. /*
  1408. * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
  1409. * contains nodes which have zones of 0...ZONE_NORMAL,
  1410. * set zone_last to ZONE_NORMAL.
  1411. *
  1412. * If we don't have HIGHMEM nor movable node,
  1413. * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
  1414. * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
  1415. */
  1416. if (N_MEMORY == N_NORMAL_MEMORY)
  1417. zone_last = ZONE_MOVABLE;
  1418. /*
  1419. * check whether node_states[N_NORMAL_MEMORY] will be changed.
  1420. * If the memory to be offline is in a zone of 0...zone_last,
  1421. * and it is the last present memory, 0...zone_last will
  1422. * become empty after offline , thus we can determind we will
  1423. * need to clear the node from node_states[N_NORMAL_MEMORY].
  1424. */
  1425. for (zt = 0; zt <= zone_last; zt++)
  1426. present_pages += pgdat->node_zones[zt].present_pages;
  1427. if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
  1428. arg->status_change_nid_normal = zone_to_nid(zone);
  1429. else
  1430. arg->status_change_nid_normal = -1;
  1431. #ifdef CONFIG_HIGHMEM
  1432. /*
  1433. * If we have movable node, node_states[N_HIGH_MEMORY]
  1434. * contains nodes which have zones of 0...ZONE_HIGHMEM,
  1435. * set zone_last to ZONE_HIGHMEM.
  1436. *
  1437. * If we don't have movable node, node_states[N_NORMAL_MEMORY]
  1438. * contains nodes which have zones of 0...ZONE_MOVABLE,
  1439. * set zone_last to ZONE_MOVABLE.
  1440. */
  1441. zone_last = ZONE_HIGHMEM;
  1442. if (N_MEMORY == N_HIGH_MEMORY)
  1443. zone_last = ZONE_MOVABLE;
  1444. for (; zt <= zone_last; zt++)
  1445. present_pages += pgdat->node_zones[zt].present_pages;
  1446. if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
  1447. arg->status_change_nid_high = zone_to_nid(zone);
  1448. else
  1449. arg->status_change_nid_high = -1;
  1450. #else
  1451. arg->status_change_nid_high = arg->status_change_nid_normal;
  1452. #endif
  1453. /*
  1454. * node_states[N_HIGH_MEMORY] contains nodes which have 0...ZONE_MOVABLE
  1455. */
  1456. zone_last = ZONE_MOVABLE;
  1457. /*
  1458. * check whether node_states[N_HIGH_MEMORY] will be changed
  1459. * If we try to offline the last present @nr_pages from the node,
  1460. * we can determind we will need to clear the node from
  1461. * node_states[N_HIGH_MEMORY].
  1462. */
  1463. for (; zt <= zone_last; zt++)
  1464. present_pages += pgdat->node_zones[zt].present_pages;
  1465. if (nr_pages >= present_pages)
  1466. arg->status_change_nid = zone_to_nid(zone);
  1467. else
  1468. arg->status_change_nid = -1;
  1469. }
  1470. static void node_states_clear_node(int node, struct memory_notify *arg)
  1471. {
  1472. if (arg->status_change_nid_normal >= 0)
  1473. node_clear_state(node, N_NORMAL_MEMORY);
  1474. if ((N_MEMORY != N_NORMAL_MEMORY) &&
  1475. (arg->status_change_nid_high >= 0))
  1476. node_clear_state(node, N_HIGH_MEMORY);
  1477. if ((N_MEMORY != N_HIGH_MEMORY) &&
  1478. (arg->status_change_nid >= 0))
  1479. node_clear_state(node, N_MEMORY);
  1480. }
  1481. static int __ref __offline_pages(unsigned long start_pfn,
  1482. unsigned long end_pfn, unsigned long timeout)
  1483. {
  1484. unsigned long pfn, nr_pages, expire;
  1485. long offlined_pages;
  1486. int ret, drain, retry_max, node;
  1487. unsigned long flags;
  1488. struct zone *zone;
  1489. struct memory_notify arg;
  1490. /* at least, alignment against pageblock is necessary */
  1491. if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
  1492. return -EINVAL;
  1493. if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
  1494. return -EINVAL;
  1495. /* This makes hotplug much easier...and readable.
  1496. we assume this for now. .*/
  1497. if (!test_pages_in_a_zone(start_pfn, end_pfn))
  1498. return -EINVAL;
  1499. zone = page_zone(pfn_to_page(start_pfn));
  1500. node = zone_to_nid(zone);
  1501. nr_pages = end_pfn - start_pfn;
  1502. if (zone_idx(zone) <= ZONE_NORMAL && !can_offline_normal(zone, nr_pages))
  1503. return -EINVAL;
  1504. /* set above range as isolated */
  1505. ret = start_isolate_page_range(start_pfn, end_pfn,
  1506. MIGRATE_MOVABLE, true);
  1507. if (ret)
  1508. return ret;
  1509. arg.start_pfn = start_pfn;
  1510. arg.nr_pages = nr_pages;
  1511. node_states_check_changes_offline(nr_pages, zone, &arg);
  1512. ret = memory_notify(MEM_GOING_OFFLINE, &arg);
  1513. ret = notifier_to_errno(ret);
  1514. if (ret)
  1515. goto failed_removal;
  1516. pfn = start_pfn;
  1517. expire = jiffies + timeout;
  1518. drain = 0;
  1519. retry_max = 5;
  1520. repeat:
  1521. /* start memory hot removal */
  1522. ret = -EAGAIN;
  1523. if (time_after(jiffies, expire))
  1524. goto failed_removal;
  1525. ret = -EINTR;
  1526. if (signal_pending(current))
  1527. goto failed_removal;
  1528. ret = 0;
  1529. if (drain) {
  1530. lru_add_drain_all();
  1531. cond_resched();
  1532. drain_all_pages(zone);
  1533. }
  1534. pfn = scan_movable_pages(start_pfn, end_pfn);
  1535. if (pfn) { /* We have movable pages */
  1536. ret = do_migrate_range(pfn, end_pfn);
  1537. if (!ret) {
  1538. drain = 1;
  1539. goto repeat;
  1540. } else {
  1541. if (ret < 0)
  1542. if (--retry_max == 0)
  1543. goto failed_removal;
  1544. yield();
  1545. drain = 1;
  1546. goto repeat;
  1547. }
  1548. }
  1549. /* drain all zone's lru pagevec, this is asynchronous... */
  1550. lru_add_drain_all();
  1551. yield();
  1552. /* drain pcp pages, this is synchronous. */
  1553. drain_all_pages(zone);
  1554. /*
  1555. * dissolve free hugepages in the memory block before doing offlining
  1556. * actually in order to make hugetlbfs's object counting consistent.
  1557. */
  1558. dissolve_free_huge_pages(start_pfn, end_pfn);
  1559. /* check again */
  1560. offlined_pages = check_pages_isolated(start_pfn, end_pfn);
  1561. if (offlined_pages < 0) {
  1562. ret = -EBUSY;
  1563. goto failed_removal;
  1564. }
  1565. printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages);
  1566. /* Ok, all of our target is isolated.
  1567. We cannot do rollback at this point. */
  1568. offline_isolated_pages(start_pfn, end_pfn);
  1569. /* reset pagetype flags and makes migrate type to be MOVABLE */
  1570. undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
  1571. /* removal success */
  1572. adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
  1573. zone->present_pages -= offlined_pages;
  1574. pgdat_resize_lock(zone->zone_pgdat, &flags);
  1575. zone->zone_pgdat->node_present_pages -= offlined_pages;
  1576. pgdat_resize_unlock(zone->zone_pgdat, &flags);
  1577. init_per_zone_wmark_min();
  1578. if (!populated_zone(zone)) {
  1579. zone_pcp_reset(zone);
  1580. mutex_lock(&zonelists_mutex);
  1581. build_all_zonelists(NULL, NULL);
  1582. mutex_unlock(&zonelists_mutex);
  1583. } else
  1584. zone_pcp_update(zone);
  1585. node_states_clear_node(node, &arg);
  1586. if (arg.status_change_nid >= 0)
  1587. kswapd_stop(node);
  1588. vm_total_pages = nr_free_pagecache_pages();
  1589. writeback_set_ratelimit();
  1590. memory_notify(MEM_OFFLINE, &arg);
  1591. return 0;
  1592. failed_removal:
  1593. printk(KERN_INFO "memory offlining [mem %#010llx-%#010llx] failed\n",
  1594. (unsigned long long) start_pfn << PAGE_SHIFT,
  1595. ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
  1596. memory_notify(MEM_CANCEL_OFFLINE, &arg);
  1597. /* pushback to free area */
  1598. undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
  1599. return ret;
  1600. }
  1601. /* Must be protected by mem_hotplug_begin() */
  1602. int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
  1603. {
  1604. return __offline_pages(start_pfn, start_pfn + nr_pages, 120 * HZ);
  1605. }
  1606. #endif /* CONFIG_MEMORY_HOTREMOVE */
  1607. /**
  1608. * walk_memory_range - walks through all mem sections in [start_pfn, end_pfn)
  1609. * @start_pfn: start pfn of the memory range
  1610. * @end_pfn: end pfn of the memory range
  1611. * @arg: argument passed to func
  1612. * @func: callback for each memory section walked
  1613. *
  1614. * This function walks through all present mem sections in range
  1615. * [start_pfn, end_pfn) and call func on each mem section.
  1616. *
  1617. * Returns the return value of func.
  1618. */
  1619. int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
  1620. void *arg, int (*func)(struct memory_block *, void *))
  1621. {
  1622. struct memory_block *mem = NULL;
  1623. struct mem_section *section;
  1624. unsigned long pfn, section_nr;
  1625. int ret;
  1626. for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
  1627. section_nr = pfn_to_section_nr(pfn);
  1628. if (!present_section_nr(section_nr))
  1629. continue;
  1630. section = __nr_to_section(section_nr);
  1631. /* same memblock? */
  1632. if (mem)
  1633. if ((section_nr >= mem->start_section_nr) &&
  1634. (section_nr <= mem->end_section_nr))
  1635. continue;
  1636. mem = find_memory_block_hinted(section, mem);
  1637. if (!mem)
  1638. continue;
  1639. ret = func(mem, arg);
  1640. if (ret) {
  1641. kobject_put(&mem->dev.kobj);
  1642. return ret;
  1643. }
  1644. }
  1645. if (mem)
  1646. kobject_put(&mem->dev.kobj);
  1647. return 0;
  1648. }
  1649. #ifdef CONFIG_MEMORY_HOTREMOVE
  1650. static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
  1651. {
  1652. int ret = !is_memblock_offlined(mem);
  1653. if (unlikely(ret)) {
  1654. phys_addr_t beginpa, endpa;
  1655. beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr));
  1656. endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1;
  1657. pr_warn("removing memory fails, because memory "
  1658. "[%pa-%pa] is onlined\n",
  1659. &beginpa, &endpa);
  1660. }
  1661. return ret;
  1662. }
  1663. static int check_cpu_on_node(pg_data_t *pgdat)
  1664. {
  1665. int cpu;
  1666. for_each_present_cpu(cpu) {
  1667. if (cpu_to_node(cpu) == pgdat->node_id)
  1668. /*
  1669. * the cpu on this node isn't removed, and we can't
  1670. * offline this node.
  1671. */
  1672. return -EBUSY;
  1673. }
  1674. return 0;
  1675. }
  1676. static void unmap_cpu_on_node(pg_data_t *pgdat)
  1677. {
  1678. #ifdef CONFIG_ACPI_NUMA
  1679. int cpu;
  1680. for_each_possible_cpu(cpu)
  1681. if (cpu_to_node(cpu) == pgdat->node_id)
  1682. numa_clear_node(cpu);
  1683. #endif
  1684. }
  1685. static int check_and_unmap_cpu_on_node(pg_data_t *pgdat)
  1686. {
  1687. int ret;
  1688. ret = check_cpu_on_node(pgdat);
  1689. if (ret)
  1690. return ret;
  1691. /*
  1692. * the node will be offlined when we come here, so we can clear
  1693. * the cpu_to_node() now.
  1694. */
  1695. unmap_cpu_on_node(pgdat);
  1696. return 0;
  1697. }
  1698. /**
  1699. * try_offline_node
  1700. *
  1701. * Offline a node if all memory sections and cpus of the node are removed.
  1702. *
  1703. * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
  1704. * and online/offline operations before this call.
  1705. */
  1706. void try_offline_node(int nid)
  1707. {
  1708. pg_data_t *pgdat = NODE_DATA(nid);
  1709. unsigned long start_pfn = pgdat->node_start_pfn;
  1710. unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
  1711. unsigned long pfn;
  1712. int i;
  1713. for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
  1714. unsigned long section_nr = pfn_to_section_nr(pfn);
  1715. if (!present_section_nr(section_nr))
  1716. continue;
  1717. if (pfn_to_nid(pfn) != nid)
  1718. continue;
  1719. /*
  1720. * some memory sections of this node are not removed, and we
  1721. * can't offline node now.
  1722. */
  1723. return;
  1724. }
  1725. if (check_and_unmap_cpu_on_node(pgdat))
  1726. return;
  1727. /*
  1728. * all memory/cpu of this node are removed, we can offline this
  1729. * node now.
  1730. */
  1731. node_set_offline(nid);
  1732. unregister_one_node(nid);
  1733. /* free waittable in each zone */
  1734. for (i = 0; i < MAX_NR_ZONES; i++) {
  1735. struct zone *zone = pgdat->node_zones + i;
  1736. /*
  1737. * wait_table may be allocated from boot memory,
  1738. * here only free if it's allocated by vmalloc.
  1739. */
  1740. if (is_vmalloc_addr(zone->wait_table)) {
  1741. vfree(zone->wait_table);
  1742. zone->wait_table = NULL;
  1743. }
  1744. }
  1745. }
  1746. EXPORT_SYMBOL(try_offline_node);
  1747. /**
  1748. * remove_memory
  1749. *
  1750. * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
  1751. * and online/offline operations before this call, as required by
  1752. * try_offline_node().
  1753. */
  1754. void __ref remove_memory(int nid, u64 start, u64 size)
  1755. {
  1756. int ret;
  1757. BUG_ON(check_hotplug_memory_range(start, size));
  1758. mem_hotplug_begin();
  1759. /*
  1760. * All memory blocks must be offlined before removing memory. Check
  1761. * whether all memory blocks in question are offline and trigger a BUG()
  1762. * if this is not the case.
  1763. */
  1764. ret = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL,
  1765. check_memblock_offlined_cb);
  1766. if (ret)
  1767. BUG();
  1768. /* remove memmap entry */
  1769. firmware_map_remove(start, start + size, "System RAM");
  1770. memblock_free(start, size);
  1771. memblock_remove(start, size);
  1772. arch_remove_memory(start, size);
  1773. try_offline_node(nid);
  1774. mem_hotplug_done();
  1775. }
  1776. EXPORT_SYMBOL_GPL(remove_memory);
  1777. #endif /* CONFIG_MEMORY_HOTREMOVE */