hotplug-memory.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737
  1. /*
  2. * pseries Memory Hotplug infrastructure.
  3. *
  4. * Copyright (C) 2008 Badari Pulavarty, IBM Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define pr_fmt(fmt) "pseries-hotplug-mem: " fmt
  12. #include <linux/of.h>
  13. #include <linux/of_address.h>
  14. #include <linux/memblock.h>
  15. #include <linux/memory.h>
  16. #include <linux/memory_hotplug.h>
  17. #include <linux/slab.h>
  18. #include <asm/firmware.h>
  19. #include <asm/machdep.h>
  20. #include <asm/prom.h>
  21. #include <asm/sparsemem.h>
  22. #include "pseries.h"
  23. static bool rtas_hp_event;
  24. unsigned long pseries_memory_block_size(void)
  25. {
  26. struct device_node *np;
  27. unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE;
  28. struct resource r;
  29. np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
  30. if (np) {
  31. const __be64 *size;
  32. size = of_get_property(np, "ibm,lmb-size", NULL);
  33. if (size)
  34. memblock_size = be64_to_cpup(size);
  35. of_node_put(np);
  36. } else if (machine_is(pseries)) {
  37. /* This fallback really only applies to pseries */
  38. unsigned int memzero_size = 0;
  39. np = of_find_node_by_path("/memory@0");
  40. if (np) {
  41. if (!of_address_to_resource(np, 0, &r))
  42. memzero_size = resource_size(&r);
  43. of_node_put(np);
  44. }
  45. if (memzero_size) {
  46. /* We now know the size of memory@0, use this to find
  47. * the first memoryblock and get its size.
  48. */
  49. char buf[64];
  50. sprintf(buf, "/memory@%x", memzero_size);
  51. np = of_find_node_by_path(buf);
  52. if (np) {
  53. if (!of_address_to_resource(np, 0, &r))
  54. memblock_size = resource_size(&r);
  55. of_node_put(np);
  56. }
  57. }
  58. }
  59. return memblock_size;
  60. }
  61. static void dlpar_free_drconf_property(struct property *prop)
  62. {
  63. kfree(prop->name);
  64. kfree(prop->value);
  65. kfree(prop);
  66. }
  67. static struct property *dlpar_clone_drconf_property(struct device_node *dn)
  68. {
  69. struct property *prop, *new_prop;
  70. struct of_drconf_cell *lmbs;
  71. u32 num_lmbs, *p;
  72. int i;
  73. prop = of_find_property(dn, "ibm,dynamic-memory", NULL);
  74. if (!prop)
  75. return NULL;
  76. new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
  77. if (!new_prop)
  78. return NULL;
  79. new_prop->name = kstrdup(prop->name, GFP_KERNEL);
  80. new_prop->value = kmemdup(prop->value, prop->length, GFP_KERNEL);
  81. if (!new_prop->name || !new_prop->value) {
  82. dlpar_free_drconf_property(new_prop);
  83. return NULL;
  84. }
  85. new_prop->length = prop->length;
  86. /* Convert the property to cpu endian-ness */
  87. p = new_prop->value;
  88. *p = be32_to_cpu(*p);
  89. num_lmbs = *p++;
  90. lmbs = (struct of_drconf_cell *)p;
  91. for (i = 0; i < num_lmbs; i++) {
  92. lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
  93. lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
  94. lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
  95. }
  96. return new_prop;
  97. }
  98. static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb)
  99. {
  100. unsigned long section_nr;
  101. struct mem_section *mem_sect;
  102. struct memory_block *mem_block;
  103. section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
  104. mem_sect = __nr_to_section(section_nr);
  105. mem_block = find_memory_block(mem_sect);
  106. return mem_block;
  107. }
  108. #ifdef CONFIG_MEMORY_HOTREMOVE
  109. static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
  110. {
  111. unsigned long block_sz, start_pfn;
  112. int sections_per_block;
  113. int i, nid;
  114. start_pfn = base >> PAGE_SHIFT;
  115. lock_device_hotplug();
  116. if (!pfn_valid(start_pfn))
  117. goto out;
  118. block_sz = pseries_memory_block_size();
  119. sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
  120. nid = memory_add_physaddr_to_nid(base);
  121. for (i = 0; i < sections_per_block; i++) {
  122. remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE);
  123. base += MIN_MEMORY_BLOCK_SIZE;
  124. }
  125. out:
  126. /* Update memory regions for memory remove */
  127. memblock_remove(base, memblock_size);
  128. unlock_device_hotplug();
  129. return 0;
  130. }
  131. static int pseries_remove_mem_node(struct device_node *np)
  132. {
  133. const char *type;
  134. const __be32 *regs;
  135. unsigned long base;
  136. unsigned int lmb_size;
  137. int ret = -EINVAL;
  138. /*
  139. * Check to see if we are actually removing memory
  140. */
  141. type = of_get_property(np, "device_type", NULL);
  142. if (type == NULL || strcmp(type, "memory") != 0)
  143. return 0;
  144. /*
  145. * Find the base address and size of the memblock
  146. */
  147. regs = of_get_property(np, "reg", NULL);
  148. if (!regs)
  149. return ret;
  150. base = be64_to_cpu(*(unsigned long *)regs);
  151. lmb_size = be32_to_cpu(regs[3]);
  152. pseries_remove_memblock(base, lmb_size);
  153. return 0;
  154. }
  155. static bool lmb_is_removable(struct of_drconf_cell *lmb)
  156. {
  157. int i, scns_per_block;
  158. int rc = 1;
  159. unsigned long pfn, block_sz;
  160. u64 phys_addr;
  161. if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
  162. return false;
  163. block_sz = memory_block_size_bytes();
  164. scns_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
  165. phys_addr = lmb->base_addr;
  166. for (i = 0; i < scns_per_block; i++) {
  167. pfn = PFN_DOWN(phys_addr);
  168. if (!pfn_present(pfn))
  169. continue;
  170. rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
  171. phys_addr += MIN_MEMORY_BLOCK_SIZE;
  172. }
  173. return rc ? true : false;
  174. }
  175. static int dlpar_add_lmb(struct of_drconf_cell *);
  176. static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
  177. {
  178. struct memory_block *mem_block;
  179. unsigned long block_sz;
  180. int nid, rc;
  181. if (!lmb_is_removable(lmb))
  182. return -EINVAL;
  183. mem_block = lmb_to_memblock(lmb);
  184. if (!mem_block)
  185. return -EINVAL;
  186. rc = device_offline(&mem_block->dev);
  187. put_device(&mem_block->dev);
  188. if (rc)
  189. return rc;
  190. block_sz = pseries_memory_block_size();
  191. nid = memory_add_physaddr_to_nid(lmb->base_addr);
  192. remove_memory(nid, lmb->base_addr, block_sz);
  193. /* Update memory regions for memory remove */
  194. memblock_remove(lmb->base_addr, block_sz);
  195. dlpar_release_drc(lmb->drc_index);
  196. lmb->flags &= ~DRCONF_MEM_ASSIGNED;
  197. return 0;
  198. }
  199. static int dlpar_memory_remove_by_count(u32 lmbs_to_remove,
  200. struct property *prop)
  201. {
  202. struct of_drconf_cell *lmbs;
  203. int lmbs_removed = 0;
  204. int lmbs_available = 0;
  205. u32 num_lmbs, *p;
  206. int i, rc;
  207. pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);
  208. if (lmbs_to_remove == 0)
  209. return -EINVAL;
  210. p = prop->value;
  211. num_lmbs = *p++;
  212. lmbs = (struct of_drconf_cell *)p;
  213. /* Validate that there are enough LMBs to satisfy the request */
  214. for (i = 0; i < num_lmbs; i++) {
  215. if (lmbs[i].flags & DRCONF_MEM_ASSIGNED)
  216. lmbs_available++;
  217. }
  218. if (lmbs_available < lmbs_to_remove)
  219. return -EINVAL;
  220. for (i = 0; i < num_lmbs && lmbs_removed < lmbs_to_remove; i++) {
  221. rc = dlpar_remove_lmb(&lmbs[i]);
  222. if (rc)
  223. continue;
  224. lmbs_removed++;
  225. /* Mark this lmb so we can add it later if all of the
  226. * requested LMBs cannot be removed.
  227. */
  228. lmbs[i].reserved = 1;
  229. }
  230. if (lmbs_removed != lmbs_to_remove) {
  231. pr_err("Memory hot-remove failed, adding LMB's back\n");
  232. for (i = 0; i < num_lmbs; i++) {
  233. if (!lmbs[i].reserved)
  234. continue;
  235. rc = dlpar_add_lmb(&lmbs[i]);
  236. if (rc)
  237. pr_err("Failed to add LMB back, drc index %x\n",
  238. lmbs[i].drc_index);
  239. lmbs[i].reserved = 0;
  240. }
  241. rc = -EINVAL;
  242. } else {
  243. for (i = 0; i < num_lmbs; i++) {
  244. if (!lmbs[i].reserved)
  245. continue;
  246. pr_info("Memory at %llx was hot-removed\n",
  247. lmbs[i].base_addr);
  248. lmbs[i].reserved = 0;
  249. }
  250. rc = 0;
  251. }
  252. return rc;
  253. }
  254. static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop)
  255. {
  256. struct of_drconf_cell *lmbs;
  257. u32 num_lmbs, *p;
  258. int lmb_found;
  259. int i, rc;
  260. pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index);
  261. p = prop->value;
  262. num_lmbs = *p++;
  263. lmbs = (struct of_drconf_cell *)p;
  264. lmb_found = 0;
  265. for (i = 0; i < num_lmbs; i++) {
  266. if (lmbs[i].drc_index == drc_index) {
  267. lmb_found = 1;
  268. rc = dlpar_remove_lmb(&lmbs[i]);
  269. break;
  270. }
  271. }
  272. if (!lmb_found)
  273. rc = -EINVAL;
  274. if (rc)
  275. pr_info("Failed to hot-remove memory at %llx\n",
  276. lmbs[i].base_addr);
  277. else
  278. pr_info("Memory at %llx was hot-removed\n", lmbs[i].base_addr);
  279. return rc;
  280. }
  281. #else
  282. static inline int pseries_remove_memblock(unsigned long base,
  283. unsigned int memblock_size)
  284. {
  285. return -EOPNOTSUPP;
  286. }
  287. static inline int pseries_remove_mem_node(struct device_node *np)
  288. {
  289. return 0;
  290. }
  291. static inline int dlpar_memory_remove(struct pseries_hp_errorlog *hp_elog)
  292. {
  293. return -EOPNOTSUPP;
  294. }
  295. static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
  296. {
  297. return -EOPNOTSUPP;
  298. }
  299. static int dlpar_memory_remove_by_count(u32 lmbs_to_remove,
  300. struct property *prop)
  301. {
  302. return -EOPNOTSUPP;
  303. }
  304. static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop)
  305. {
  306. return -EOPNOTSUPP;
  307. }
  308. #endif /* CONFIG_MEMORY_HOTREMOVE */
  309. static int dlpar_add_lmb(struct of_drconf_cell *lmb)
  310. {
  311. struct memory_block *mem_block;
  312. unsigned long block_sz;
  313. int nid, rc;
  314. if (lmb->flags & DRCONF_MEM_ASSIGNED)
  315. return -EINVAL;
  316. block_sz = memory_block_size_bytes();
  317. rc = dlpar_acquire_drc(lmb->drc_index);
  318. if (rc)
  319. return rc;
  320. /* Find the node id for this address */
  321. nid = memory_add_physaddr_to_nid(lmb->base_addr);
  322. /* Add the memory */
  323. rc = add_memory(nid, lmb->base_addr, block_sz);
  324. if (rc) {
  325. dlpar_release_drc(lmb->drc_index);
  326. return rc;
  327. }
  328. /* Register this block of memory */
  329. rc = memblock_add(lmb->base_addr, block_sz);
  330. if (rc) {
  331. remove_memory(nid, lmb->base_addr, block_sz);
  332. dlpar_release_drc(lmb->drc_index);
  333. return rc;
  334. }
  335. mem_block = lmb_to_memblock(lmb);
  336. if (!mem_block) {
  337. remove_memory(nid, lmb->base_addr, block_sz);
  338. dlpar_release_drc(lmb->drc_index);
  339. return -EINVAL;
  340. }
  341. rc = device_online(&mem_block->dev);
  342. put_device(&mem_block->dev);
  343. if (rc) {
  344. remove_memory(nid, lmb->base_addr, block_sz);
  345. dlpar_release_drc(lmb->drc_index);
  346. return rc;
  347. }
  348. lmb->flags |= DRCONF_MEM_ASSIGNED;
  349. return 0;
  350. }
  351. static int dlpar_memory_add_by_count(u32 lmbs_to_add, struct property *prop)
  352. {
  353. struct of_drconf_cell *lmbs;
  354. u32 num_lmbs, *p;
  355. int lmbs_available = 0;
  356. int lmbs_added = 0;
  357. int i, rc;
  358. pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
  359. if (lmbs_to_add == 0)
  360. return -EINVAL;
  361. p = prop->value;
  362. num_lmbs = *p++;
  363. lmbs = (struct of_drconf_cell *)p;
  364. /* Validate that there are enough LMBs to satisfy the request */
  365. for (i = 0; i < num_lmbs; i++) {
  366. if (!(lmbs[i].flags & DRCONF_MEM_ASSIGNED))
  367. lmbs_available++;
  368. }
  369. if (lmbs_available < lmbs_to_add)
  370. return -EINVAL;
  371. for (i = 0; i < num_lmbs && lmbs_to_add != lmbs_added; i++) {
  372. rc = dlpar_add_lmb(&lmbs[i]);
  373. if (rc)
  374. continue;
  375. lmbs_added++;
  376. /* Mark this lmb so we can remove it later if all of the
  377. * requested LMBs cannot be added.
  378. */
  379. lmbs[i].reserved = 1;
  380. }
  381. if (lmbs_added != lmbs_to_add) {
  382. pr_err("Memory hot-add failed, removing any added LMBs\n");
  383. for (i = 0; i < num_lmbs; i++) {
  384. if (!lmbs[i].reserved)
  385. continue;
  386. rc = dlpar_remove_lmb(&lmbs[i]);
  387. if (rc)
  388. pr_err("Failed to remove LMB, drc index %x\n",
  389. be32_to_cpu(lmbs[i].drc_index));
  390. }
  391. rc = -EINVAL;
  392. } else {
  393. for (i = 0; i < num_lmbs; i++) {
  394. if (!lmbs[i].reserved)
  395. continue;
  396. pr_info("Memory at %llx (drc index %x) was hot-added\n",
  397. lmbs[i].base_addr, lmbs[i].drc_index);
  398. lmbs[i].reserved = 0;
  399. }
  400. }
  401. return rc;
  402. }
  403. static int dlpar_memory_add_by_index(u32 drc_index, struct property *prop)
  404. {
  405. struct of_drconf_cell *lmbs;
  406. u32 num_lmbs, *p;
  407. int i, lmb_found;
  408. int rc;
  409. pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);
  410. p = prop->value;
  411. num_lmbs = *p++;
  412. lmbs = (struct of_drconf_cell *)p;
  413. lmb_found = 0;
  414. for (i = 0; i < num_lmbs; i++) {
  415. if (lmbs[i].drc_index == drc_index) {
  416. lmb_found = 1;
  417. rc = dlpar_add_lmb(&lmbs[i]);
  418. break;
  419. }
  420. }
  421. if (!lmb_found)
  422. rc = -EINVAL;
  423. if (rc)
  424. pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
  425. else
  426. pr_info("Memory at %llx (drc index %x) was hot-added\n",
  427. lmbs[i].base_addr, drc_index);
  428. return rc;
  429. }
  430. static void dlpar_update_drconf_property(struct device_node *dn,
  431. struct property *prop)
  432. {
  433. struct of_drconf_cell *lmbs;
  434. u32 num_lmbs, *p;
  435. int i;
  436. /* Convert the property back to BE */
  437. p = prop->value;
  438. num_lmbs = *p;
  439. *p = cpu_to_be32(*p);
  440. p++;
  441. lmbs = (struct of_drconf_cell *)p;
  442. for (i = 0; i < num_lmbs; i++) {
  443. lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
  444. lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
  445. lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
  446. }
  447. rtas_hp_event = true;
  448. of_update_property(dn, prop);
  449. rtas_hp_event = false;
  450. }
  451. int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
  452. {
  453. struct device_node *dn;
  454. struct property *prop;
  455. u32 count, drc_index;
  456. int rc;
  457. count = hp_elog->_drc_u.drc_count;
  458. drc_index = hp_elog->_drc_u.drc_index;
  459. lock_device_hotplug();
  460. dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
  461. if (!dn) {
  462. rc = -EINVAL;
  463. goto dlpar_memory_out;
  464. }
  465. prop = dlpar_clone_drconf_property(dn);
  466. if (!prop) {
  467. rc = -EINVAL;
  468. goto dlpar_memory_out;
  469. }
  470. switch (hp_elog->action) {
  471. case PSERIES_HP_ELOG_ACTION_ADD:
  472. if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
  473. rc = dlpar_memory_add_by_count(count, prop);
  474. else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
  475. rc = dlpar_memory_add_by_index(drc_index, prop);
  476. else
  477. rc = -EINVAL;
  478. break;
  479. case PSERIES_HP_ELOG_ACTION_REMOVE:
  480. if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
  481. rc = dlpar_memory_remove_by_count(count, prop);
  482. else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
  483. rc = dlpar_memory_remove_by_index(drc_index, prop);
  484. else
  485. rc = -EINVAL;
  486. break;
  487. default:
  488. pr_err("Invalid action (%d) specified\n", hp_elog->action);
  489. rc = -EINVAL;
  490. break;
  491. }
  492. if (rc)
  493. dlpar_free_drconf_property(prop);
  494. else
  495. dlpar_update_drconf_property(dn, prop);
  496. dlpar_memory_out:
  497. of_node_put(dn);
  498. unlock_device_hotplug();
  499. return rc;
  500. }
  501. static int pseries_add_mem_node(struct device_node *np)
  502. {
  503. const char *type;
  504. const __be32 *regs;
  505. unsigned long base;
  506. unsigned int lmb_size;
  507. int ret = -EINVAL;
  508. /*
  509. * Check to see if we are actually adding memory
  510. */
  511. type = of_get_property(np, "device_type", NULL);
  512. if (type == NULL || strcmp(type, "memory") != 0)
  513. return 0;
  514. /*
  515. * Find the base and size of the memblock
  516. */
  517. regs = of_get_property(np, "reg", NULL);
  518. if (!regs)
  519. return ret;
  520. base = be64_to_cpu(*(unsigned long *)regs);
  521. lmb_size = be32_to_cpu(regs[3]);
  522. /*
  523. * Update memory region to represent the memory add
  524. */
  525. ret = memblock_add(base, lmb_size);
  526. return (ret < 0) ? -EINVAL : 0;
  527. }
  528. static int pseries_update_drconf_memory(struct of_reconfig_data *pr)
  529. {
  530. struct of_drconf_cell *new_drmem, *old_drmem;
  531. unsigned long memblock_size;
  532. u32 entries;
  533. __be32 *p;
  534. int i, rc = -EINVAL;
  535. if (rtas_hp_event)
  536. return 0;
  537. memblock_size = pseries_memory_block_size();
  538. if (!memblock_size)
  539. return -EINVAL;
  540. p = (__be32 *) pr->old_prop->value;
  541. if (!p)
  542. return -EINVAL;
  543. /* The first int of the property is the number of lmb's described
  544. * by the property. This is followed by an array of of_drconf_cell
  545. * entries. Get the number of entries and skip to the array of
  546. * of_drconf_cell's.
  547. */
  548. entries = be32_to_cpu(*p++);
  549. old_drmem = (struct of_drconf_cell *)p;
  550. p = (__be32 *)pr->prop->value;
  551. p++;
  552. new_drmem = (struct of_drconf_cell *)p;
  553. for (i = 0; i < entries; i++) {
  554. if ((be32_to_cpu(old_drmem[i].flags) & DRCONF_MEM_ASSIGNED) &&
  555. (!(be32_to_cpu(new_drmem[i].flags) & DRCONF_MEM_ASSIGNED))) {
  556. rc = pseries_remove_memblock(
  557. be64_to_cpu(old_drmem[i].base_addr),
  558. memblock_size);
  559. break;
  560. } else if ((!(be32_to_cpu(old_drmem[i].flags) &
  561. DRCONF_MEM_ASSIGNED)) &&
  562. (be32_to_cpu(new_drmem[i].flags) &
  563. DRCONF_MEM_ASSIGNED)) {
  564. rc = memblock_add(be64_to_cpu(old_drmem[i].base_addr),
  565. memblock_size);
  566. rc = (rc < 0) ? -EINVAL : 0;
  567. break;
  568. }
  569. }
  570. return rc;
  571. }
  572. static int pseries_memory_notifier(struct notifier_block *nb,
  573. unsigned long action, void *data)
  574. {
  575. struct of_reconfig_data *rd = data;
  576. int err = 0;
  577. switch (action) {
  578. case OF_RECONFIG_ATTACH_NODE:
  579. err = pseries_add_mem_node(rd->dn);
  580. break;
  581. case OF_RECONFIG_DETACH_NODE:
  582. err = pseries_remove_mem_node(rd->dn);
  583. break;
  584. case OF_RECONFIG_UPDATE_PROPERTY:
  585. if (!strcmp(rd->prop->name, "ibm,dynamic-memory"))
  586. err = pseries_update_drconf_memory(rd);
  587. break;
  588. }
  589. return notifier_from_errno(err);
  590. }
  591. static struct notifier_block pseries_mem_nb = {
  592. .notifier_call = pseries_memory_notifier,
  593. };
  594. static int __init pseries_memory_hotplug_init(void)
  595. {
  596. if (firmware_has_feature(FW_FEATURE_LPAR))
  597. of_reconfig_notifier_register(&pseries_mem_nb);
  598. return 0;
  599. }
  600. machine_device_initcall(pseries, pseries_memory_hotplug_init);