hotplug-memory.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901
  1. /*
  2. * pseries Memory Hotplug infrastructure.
  3. *
  4. * Copyright (C) 2008 Badari Pulavarty, IBM Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define pr_fmt(fmt) "pseries-hotplug-mem: " fmt
  12. #include <linux/of.h>
  13. #include <linux/of_address.h>
  14. #include <linux/memblock.h>
  15. #include <linux/memory.h>
  16. #include <linux/memory_hotplug.h>
  17. #include <linux/slab.h>
  18. #include <asm/firmware.h>
  19. #include <asm/machdep.h>
  20. #include <asm/prom.h>
  21. #include <asm/sparsemem.h>
  22. #include "pseries.h"
  23. static bool rtas_hp_event;
  24. unsigned long pseries_memory_block_size(void)
  25. {
  26. struct device_node *np;
  27. unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE;
  28. struct resource r;
  29. np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
  30. if (np) {
  31. const __be64 *size;
  32. size = of_get_property(np, "ibm,lmb-size", NULL);
  33. if (size)
  34. memblock_size = be64_to_cpup(size);
  35. of_node_put(np);
  36. } else if (machine_is(pseries)) {
  37. /* This fallback really only applies to pseries */
  38. unsigned int memzero_size = 0;
  39. np = of_find_node_by_path("/memory@0");
  40. if (np) {
  41. if (!of_address_to_resource(np, 0, &r))
  42. memzero_size = resource_size(&r);
  43. of_node_put(np);
  44. }
  45. if (memzero_size) {
  46. /* We now know the size of memory@0, use this to find
  47. * the first memoryblock and get its size.
  48. */
  49. char buf[64];
  50. sprintf(buf, "/memory@%x", memzero_size);
  51. np = of_find_node_by_path(buf);
  52. if (np) {
  53. if (!of_address_to_resource(np, 0, &r))
  54. memblock_size = resource_size(&r);
  55. of_node_put(np);
  56. }
  57. }
  58. }
  59. return memblock_size;
  60. }
  61. static void dlpar_free_property(struct property *prop)
  62. {
  63. kfree(prop->name);
  64. kfree(prop->value);
  65. kfree(prop);
  66. }
  67. static struct property *dlpar_clone_property(struct property *prop,
  68. u32 prop_size)
  69. {
  70. struct property *new_prop;
  71. new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
  72. if (!new_prop)
  73. return NULL;
  74. new_prop->name = kstrdup(prop->name, GFP_KERNEL);
  75. new_prop->value = kzalloc(prop_size, GFP_KERNEL);
  76. if (!new_prop->name || !new_prop->value) {
  77. dlpar_free_property(new_prop);
  78. return NULL;
  79. }
  80. memcpy(new_prop->value, prop->value, prop->length);
  81. new_prop->length = prop_size;
  82. of_property_set_flag(new_prop, OF_DYNAMIC);
  83. return new_prop;
  84. }
  85. static struct property *dlpar_clone_drconf_property(struct device_node *dn)
  86. {
  87. struct property *prop, *new_prop;
  88. struct of_drconf_cell *lmbs;
  89. u32 num_lmbs, *p;
  90. int i;
  91. prop = of_find_property(dn, "ibm,dynamic-memory", NULL);
  92. if (!prop)
  93. return NULL;
  94. new_prop = dlpar_clone_property(prop, prop->length);
  95. if (!new_prop)
  96. return NULL;
  97. /* Convert the property to cpu endian-ness */
  98. p = new_prop->value;
  99. *p = be32_to_cpu(*p);
  100. num_lmbs = *p++;
  101. lmbs = (struct of_drconf_cell *)p;
  102. for (i = 0; i < num_lmbs; i++) {
  103. lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
  104. lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
  105. lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
  106. }
  107. return new_prop;
  108. }
  109. static void dlpar_update_drconf_property(struct device_node *dn,
  110. struct property *prop)
  111. {
  112. struct of_drconf_cell *lmbs;
  113. u32 num_lmbs, *p;
  114. int i;
  115. /* Convert the property back to BE */
  116. p = prop->value;
  117. num_lmbs = *p;
  118. *p = cpu_to_be32(*p);
  119. p++;
  120. lmbs = (struct of_drconf_cell *)p;
  121. for (i = 0; i < num_lmbs; i++) {
  122. lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
  123. lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
  124. lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
  125. }
  126. rtas_hp_event = true;
  127. of_update_property(dn, prop);
  128. rtas_hp_event = false;
  129. }
  130. static int dlpar_update_device_tree_lmb(struct of_drconf_cell *lmb)
  131. {
  132. struct device_node *dn;
  133. struct property *prop;
  134. struct of_drconf_cell *lmbs;
  135. u32 *p, num_lmbs;
  136. int i;
  137. dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
  138. if (!dn)
  139. return -ENODEV;
  140. prop = dlpar_clone_drconf_property(dn);
  141. if (!prop) {
  142. of_node_put(dn);
  143. return -ENODEV;
  144. }
  145. p = prop->value;
  146. num_lmbs = *p++;
  147. lmbs = (struct of_drconf_cell *)p;
  148. for (i = 0; i < num_lmbs; i++) {
  149. if (lmbs[i].drc_index == lmb->drc_index) {
  150. lmbs[i].flags = lmb->flags;
  151. lmbs[i].aa_index = lmb->aa_index;
  152. dlpar_update_drconf_property(dn, prop);
  153. break;
  154. }
  155. }
  156. of_node_put(dn);
  157. return 0;
  158. }
  159. static u32 find_aa_index(struct device_node *dr_node,
  160. struct property *ala_prop, const u32 *lmb_assoc)
  161. {
  162. u32 *assoc_arrays;
  163. u32 aa_index;
  164. int aa_arrays, aa_array_entries, aa_array_sz;
  165. int i, index;
  166. /*
  167. * The ibm,associativity-lookup-arrays property is defined to be
  168. * a 32-bit value specifying the number of associativity arrays
  169. * followed by a 32-bitvalue specifying the number of entries per
  170. * array, followed by the associativity arrays.
  171. */
  172. assoc_arrays = ala_prop->value;
  173. aa_arrays = be32_to_cpu(assoc_arrays[0]);
  174. aa_array_entries = be32_to_cpu(assoc_arrays[1]);
  175. aa_array_sz = aa_array_entries * sizeof(u32);
  176. aa_index = -1;
  177. for (i = 0; i < aa_arrays; i++) {
  178. index = (i * aa_array_entries) + 2;
  179. if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
  180. continue;
  181. aa_index = i;
  182. break;
  183. }
  184. if (aa_index == -1) {
  185. struct property *new_prop;
  186. u32 new_prop_size;
  187. new_prop_size = ala_prop->length + aa_array_sz;
  188. new_prop = dlpar_clone_property(ala_prop, new_prop_size);
  189. if (!new_prop)
  190. return -1;
  191. assoc_arrays = new_prop->value;
  192. /* increment the number of entries in the lookup array */
  193. assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
  194. /* copy the new associativity into the lookup array */
  195. index = aa_arrays * aa_array_entries + 2;
  196. memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
  197. of_update_property(dr_node, new_prop);
  198. /*
  199. * The associativity lookup array index for this lmb is
  200. * number of entries - 1 since we added its associativity
  201. * to the end of the lookup array.
  202. */
  203. aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
  204. }
  205. return aa_index;
  206. }
  207. static u32 lookup_lmb_associativity_index(struct of_drconf_cell *lmb)
  208. {
  209. struct device_node *parent, *lmb_node, *dr_node;
  210. struct property *ala_prop;
  211. const u32 *lmb_assoc;
  212. u32 aa_index;
  213. parent = of_find_node_by_path("/");
  214. if (!parent)
  215. return -ENODEV;
  216. lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index),
  217. parent);
  218. of_node_put(parent);
  219. if (!lmb_node)
  220. return -EINVAL;
  221. lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL);
  222. if (!lmb_assoc) {
  223. dlpar_free_cc_nodes(lmb_node);
  224. return -ENODEV;
  225. }
  226. dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
  227. if (!dr_node) {
  228. dlpar_free_cc_nodes(lmb_node);
  229. return -ENODEV;
  230. }
  231. ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays",
  232. NULL);
  233. if (!ala_prop) {
  234. of_node_put(dr_node);
  235. dlpar_free_cc_nodes(lmb_node);
  236. return -ENODEV;
  237. }
  238. aa_index = find_aa_index(dr_node, ala_prop, lmb_assoc);
  239. dlpar_free_cc_nodes(lmb_node);
  240. return aa_index;
  241. }
  242. static int dlpar_add_device_tree_lmb(struct of_drconf_cell *lmb)
  243. {
  244. int aa_index;
  245. lmb->flags |= DRCONF_MEM_ASSIGNED;
  246. aa_index = lookup_lmb_associativity_index(lmb);
  247. if (aa_index < 0) {
  248. pr_err("Couldn't find associativity index for drc index %x\n",
  249. lmb->drc_index);
  250. return aa_index;
  251. }
  252. lmb->aa_index = aa_index;
  253. return dlpar_update_device_tree_lmb(lmb);
  254. }
  255. static int dlpar_remove_device_tree_lmb(struct of_drconf_cell *lmb)
  256. {
  257. lmb->flags &= ~DRCONF_MEM_ASSIGNED;
  258. lmb->aa_index = 0xffffffff;
  259. return dlpar_update_device_tree_lmb(lmb);
  260. }
  261. #ifdef CONFIG_MEMORY_HOTREMOVE
  262. static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
  263. {
  264. unsigned long block_sz, start_pfn;
  265. int sections_per_block;
  266. int i, nid;
  267. start_pfn = base >> PAGE_SHIFT;
  268. lock_device_hotplug();
  269. if (!pfn_valid(start_pfn))
  270. goto out;
  271. block_sz = pseries_memory_block_size();
  272. sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
  273. nid = memory_add_physaddr_to_nid(base);
  274. for (i = 0; i < sections_per_block; i++) {
  275. remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE);
  276. base += MIN_MEMORY_BLOCK_SIZE;
  277. }
  278. out:
  279. /* Update memory regions for memory remove */
  280. memblock_remove(base, memblock_size);
  281. unlock_device_hotplug();
  282. return 0;
  283. }
  284. static int pseries_remove_mem_node(struct device_node *np)
  285. {
  286. const char *type;
  287. const __be32 *regs;
  288. unsigned long base;
  289. unsigned int lmb_size;
  290. int ret = -EINVAL;
  291. /*
  292. * Check to see if we are actually removing memory
  293. */
  294. type = of_get_property(np, "device_type", NULL);
  295. if (type == NULL || strcmp(type, "memory") != 0)
  296. return 0;
  297. /*
  298. * Find the base address and size of the memblock
  299. */
  300. regs = of_get_property(np, "reg", NULL);
  301. if (!regs)
  302. return ret;
  303. base = be64_to_cpu(*(unsigned long *)regs);
  304. lmb_size = be32_to_cpu(regs[3]);
  305. pseries_remove_memblock(base, lmb_size);
  306. return 0;
  307. }
  308. static bool lmb_is_removable(struct of_drconf_cell *lmb)
  309. {
  310. int i, scns_per_block;
  311. int rc = 1;
  312. unsigned long pfn, block_sz;
  313. u64 phys_addr;
  314. if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
  315. return false;
  316. block_sz = memory_block_size_bytes();
  317. scns_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
  318. phys_addr = lmb->base_addr;
  319. for (i = 0; i < scns_per_block; i++) {
  320. pfn = PFN_DOWN(phys_addr);
  321. if (!pfn_present(pfn))
  322. continue;
  323. rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
  324. phys_addr += MIN_MEMORY_BLOCK_SIZE;
  325. }
  326. return rc ? true : false;
  327. }
  328. static int dlpar_add_lmb(struct of_drconf_cell *);
  329. static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb)
  330. {
  331. unsigned long section_nr;
  332. struct mem_section *mem_sect;
  333. struct memory_block *mem_block;
  334. section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
  335. mem_sect = __nr_to_section(section_nr);
  336. mem_block = find_memory_block(mem_sect);
  337. return mem_block;
  338. }
  339. static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
  340. {
  341. struct memory_block *mem_block;
  342. unsigned long block_sz;
  343. int nid, rc;
  344. if (!lmb_is_removable(lmb))
  345. return -EINVAL;
  346. mem_block = lmb_to_memblock(lmb);
  347. if (!mem_block)
  348. return -EINVAL;
  349. rc = device_offline(&mem_block->dev);
  350. put_device(&mem_block->dev);
  351. if (rc)
  352. return rc;
  353. block_sz = pseries_memory_block_size();
  354. nid = memory_add_physaddr_to_nid(lmb->base_addr);
  355. remove_memory(nid, lmb->base_addr, block_sz);
  356. /* Update memory regions for memory remove */
  357. memblock_remove(lmb->base_addr, block_sz);
  358. dlpar_release_drc(lmb->drc_index);
  359. dlpar_remove_device_tree_lmb(lmb);
  360. return 0;
  361. }
  362. static int dlpar_memory_remove_by_count(u32 lmbs_to_remove,
  363. struct property *prop)
  364. {
  365. struct of_drconf_cell *lmbs;
  366. int lmbs_removed = 0;
  367. int lmbs_available = 0;
  368. u32 num_lmbs, *p;
  369. int i, rc;
  370. pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);
  371. if (lmbs_to_remove == 0)
  372. return -EINVAL;
  373. p = prop->value;
  374. num_lmbs = *p++;
  375. lmbs = (struct of_drconf_cell *)p;
  376. /* Validate that there are enough LMBs to satisfy the request */
  377. for (i = 0; i < num_lmbs; i++) {
  378. if (lmb_is_removable(&lmbs[i]))
  379. lmbs_available++;
  380. }
  381. if (lmbs_available < lmbs_to_remove) {
  382. pr_info("Not enough LMBs available (%d of %d) to satisfy request\n",
  383. lmbs_available, lmbs_to_remove);
  384. return -EINVAL;
  385. }
  386. for (i = 0; i < num_lmbs && lmbs_removed < lmbs_to_remove; i++) {
  387. rc = dlpar_remove_lmb(&lmbs[i]);
  388. if (rc)
  389. continue;
  390. lmbs_removed++;
  391. /* Mark this lmb so we can add it later if all of the
  392. * requested LMBs cannot be removed.
  393. */
  394. lmbs[i].reserved = 1;
  395. }
  396. if (lmbs_removed != lmbs_to_remove) {
  397. pr_err("Memory hot-remove failed, adding LMB's back\n");
  398. for (i = 0; i < num_lmbs; i++) {
  399. if (!lmbs[i].reserved)
  400. continue;
  401. rc = dlpar_add_lmb(&lmbs[i]);
  402. if (rc)
  403. pr_err("Failed to add LMB back, drc index %x\n",
  404. lmbs[i].drc_index);
  405. lmbs[i].reserved = 0;
  406. }
  407. rc = -EINVAL;
  408. } else {
  409. for (i = 0; i < num_lmbs; i++) {
  410. if (!lmbs[i].reserved)
  411. continue;
  412. pr_info("Memory at %llx was hot-removed\n",
  413. lmbs[i].base_addr);
  414. lmbs[i].reserved = 0;
  415. }
  416. rc = 0;
  417. }
  418. return rc;
  419. }
  420. static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop)
  421. {
  422. struct of_drconf_cell *lmbs;
  423. u32 num_lmbs, *p;
  424. int lmb_found;
  425. int i, rc;
  426. pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index);
  427. p = prop->value;
  428. num_lmbs = *p++;
  429. lmbs = (struct of_drconf_cell *)p;
  430. lmb_found = 0;
  431. for (i = 0; i < num_lmbs; i++) {
  432. if (lmbs[i].drc_index == drc_index) {
  433. lmb_found = 1;
  434. rc = dlpar_remove_lmb(&lmbs[i]);
  435. break;
  436. }
  437. }
  438. if (!lmb_found)
  439. rc = -EINVAL;
  440. if (rc)
  441. pr_info("Failed to hot-remove memory at %llx\n",
  442. lmbs[i].base_addr);
  443. else
  444. pr_info("Memory at %llx was hot-removed\n", lmbs[i].base_addr);
  445. return rc;
  446. }
  447. #else
  448. static inline int pseries_remove_memblock(unsigned long base,
  449. unsigned int memblock_size)
  450. {
  451. return -EOPNOTSUPP;
  452. }
  453. static inline int pseries_remove_mem_node(struct device_node *np)
  454. {
  455. return 0;
  456. }
  457. static inline int dlpar_memory_remove(struct pseries_hp_errorlog *hp_elog)
  458. {
  459. return -EOPNOTSUPP;
  460. }
  461. static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
  462. {
  463. return -EOPNOTSUPP;
  464. }
  465. static int dlpar_memory_remove_by_count(u32 lmbs_to_remove,
  466. struct property *prop)
  467. {
  468. return -EOPNOTSUPP;
  469. }
  470. static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop)
  471. {
  472. return -EOPNOTSUPP;
  473. }
  474. #endif /* CONFIG_MEMORY_HOTREMOVE */
  475. static int dlpar_add_lmb(struct of_drconf_cell *lmb)
  476. {
  477. unsigned long block_sz;
  478. int nid, rc;
  479. if (lmb->flags & DRCONF_MEM_ASSIGNED)
  480. return -EINVAL;
  481. rc = dlpar_acquire_drc(lmb->drc_index);
  482. if (rc)
  483. return rc;
  484. rc = dlpar_add_device_tree_lmb(lmb);
  485. if (rc) {
  486. pr_err("Couldn't update device tree for drc index %x\n",
  487. lmb->drc_index);
  488. dlpar_release_drc(lmb->drc_index);
  489. return rc;
  490. }
  491. block_sz = memory_block_size_bytes();
  492. /* Find the node id for this address */
  493. nid = memory_add_physaddr_to_nid(lmb->base_addr);
  494. /* Add the memory */
  495. rc = add_memory(nid, lmb->base_addr, block_sz);
  496. if (rc) {
  497. dlpar_remove_device_tree_lmb(lmb);
  498. dlpar_release_drc(lmb->drc_index);
  499. } else {
  500. lmb->flags |= DRCONF_MEM_ASSIGNED;
  501. }
  502. return rc;
  503. }
  504. static int dlpar_memory_add_by_count(u32 lmbs_to_add, struct property *prop)
  505. {
  506. struct of_drconf_cell *lmbs;
  507. u32 num_lmbs, *p;
  508. int lmbs_available = 0;
  509. int lmbs_added = 0;
  510. int i, rc;
  511. pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
  512. if (lmbs_to_add == 0)
  513. return -EINVAL;
  514. p = prop->value;
  515. num_lmbs = *p++;
  516. lmbs = (struct of_drconf_cell *)p;
  517. /* Validate that there are enough LMBs to satisfy the request */
  518. for (i = 0; i < num_lmbs; i++) {
  519. if (!(lmbs[i].flags & DRCONF_MEM_ASSIGNED))
  520. lmbs_available++;
  521. }
  522. if (lmbs_available < lmbs_to_add)
  523. return -EINVAL;
  524. for (i = 0; i < num_lmbs && lmbs_to_add != lmbs_added; i++) {
  525. rc = dlpar_add_lmb(&lmbs[i]);
  526. if (rc)
  527. continue;
  528. lmbs_added++;
  529. /* Mark this lmb so we can remove it later if all of the
  530. * requested LMBs cannot be added.
  531. */
  532. lmbs[i].reserved = 1;
  533. }
  534. if (lmbs_added != lmbs_to_add) {
  535. pr_err("Memory hot-add failed, removing any added LMBs\n");
  536. for (i = 0; i < num_lmbs; i++) {
  537. if (!lmbs[i].reserved)
  538. continue;
  539. rc = dlpar_remove_lmb(&lmbs[i]);
  540. if (rc)
  541. pr_err("Failed to remove LMB, drc index %x\n",
  542. be32_to_cpu(lmbs[i].drc_index));
  543. }
  544. rc = -EINVAL;
  545. } else {
  546. for (i = 0; i < num_lmbs; i++) {
  547. if (!lmbs[i].reserved)
  548. continue;
  549. pr_info("Memory at %llx (drc index %x) was hot-added\n",
  550. lmbs[i].base_addr, lmbs[i].drc_index);
  551. lmbs[i].reserved = 0;
  552. }
  553. }
  554. return rc;
  555. }
  556. static int dlpar_memory_add_by_index(u32 drc_index, struct property *prop)
  557. {
  558. struct of_drconf_cell *lmbs;
  559. u32 num_lmbs, *p;
  560. int i, lmb_found;
  561. int rc;
  562. pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);
  563. p = prop->value;
  564. num_lmbs = *p++;
  565. lmbs = (struct of_drconf_cell *)p;
  566. lmb_found = 0;
  567. for (i = 0; i < num_lmbs; i++) {
  568. if (lmbs[i].drc_index == drc_index) {
  569. lmb_found = 1;
  570. rc = dlpar_add_lmb(&lmbs[i]);
  571. break;
  572. }
  573. }
  574. if (!lmb_found)
  575. rc = -EINVAL;
  576. if (rc)
  577. pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
  578. else
  579. pr_info("Memory at %llx (drc index %x) was hot-added\n",
  580. lmbs[i].base_addr, drc_index);
  581. return rc;
  582. }
  583. int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
  584. {
  585. struct device_node *dn;
  586. struct property *prop;
  587. u32 count, drc_index;
  588. int rc;
  589. count = hp_elog->_drc_u.drc_count;
  590. drc_index = hp_elog->_drc_u.drc_index;
  591. lock_device_hotplug();
  592. dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
  593. if (!dn) {
  594. rc = -EINVAL;
  595. goto dlpar_memory_out;
  596. }
  597. prop = dlpar_clone_drconf_property(dn);
  598. if (!prop) {
  599. rc = -EINVAL;
  600. goto dlpar_memory_out;
  601. }
  602. switch (hp_elog->action) {
  603. case PSERIES_HP_ELOG_ACTION_ADD:
  604. if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
  605. rc = dlpar_memory_add_by_count(count, prop);
  606. else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
  607. rc = dlpar_memory_add_by_index(drc_index, prop);
  608. else
  609. rc = -EINVAL;
  610. break;
  611. case PSERIES_HP_ELOG_ACTION_REMOVE:
  612. if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
  613. rc = dlpar_memory_remove_by_count(count, prop);
  614. else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
  615. rc = dlpar_memory_remove_by_index(drc_index, prop);
  616. else
  617. rc = -EINVAL;
  618. break;
  619. default:
  620. pr_err("Invalid action (%d) specified\n", hp_elog->action);
  621. rc = -EINVAL;
  622. break;
  623. }
  624. dlpar_free_property(prop);
  625. dlpar_memory_out:
  626. of_node_put(dn);
  627. unlock_device_hotplug();
  628. return rc;
  629. }
  630. static int pseries_add_mem_node(struct device_node *np)
  631. {
  632. const char *type;
  633. const __be32 *regs;
  634. unsigned long base;
  635. unsigned int lmb_size;
  636. int ret = -EINVAL;
  637. /*
  638. * Check to see if we are actually adding memory
  639. */
  640. type = of_get_property(np, "device_type", NULL);
  641. if (type == NULL || strcmp(type, "memory") != 0)
  642. return 0;
  643. /*
  644. * Find the base and size of the memblock
  645. */
  646. regs = of_get_property(np, "reg", NULL);
  647. if (!regs)
  648. return ret;
  649. base = be64_to_cpu(*(unsigned long *)regs);
  650. lmb_size = be32_to_cpu(regs[3]);
  651. /*
  652. * Update memory region to represent the memory add
  653. */
  654. ret = memblock_add(base, lmb_size);
  655. return (ret < 0) ? -EINVAL : 0;
  656. }
  657. static int pseries_update_drconf_memory(struct of_reconfig_data *pr)
  658. {
  659. struct of_drconf_cell *new_drmem, *old_drmem;
  660. unsigned long memblock_size;
  661. u32 entries;
  662. __be32 *p;
  663. int i, rc = -EINVAL;
  664. if (rtas_hp_event)
  665. return 0;
  666. memblock_size = pseries_memory_block_size();
  667. if (!memblock_size)
  668. return -EINVAL;
  669. p = (__be32 *) pr->old_prop->value;
  670. if (!p)
  671. return -EINVAL;
  672. /* The first int of the property is the number of lmb's described
  673. * by the property. This is followed by an array of of_drconf_cell
  674. * entries. Get the number of entries and skip to the array of
  675. * of_drconf_cell's.
  676. */
  677. entries = be32_to_cpu(*p++);
  678. old_drmem = (struct of_drconf_cell *)p;
  679. p = (__be32 *)pr->prop->value;
  680. p++;
  681. new_drmem = (struct of_drconf_cell *)p;
  682. for (i = 0; i < entries; i++) {
  683. if ((be32_to_cpu(old_drmem[i].flags) & DRCONF_MEM_ASSIGNED) &&
  684. (!(be32_to_cpu(new_drmem[i].flags) & DRCONF_MEM_ASSIGNED))) {
  685. rc = pseries_remove_memblock(
  686. be64_to_cpu(old_drmem[i].base_addr),
  687. memblock_size);
  688. break;
  689. } else if ((!(be32_to_cpu(old_drmem[i].flags) &
  690. DRCONF_MEM_ASSIGNED)) &&
  691. (be32_to_cpu(new_drmem[i].flags) &
  692. DRCONF_MEM_ASSIGNED)) {
  693. rc = memblock_add(be64_to_cpu(old_drmem[i].base_addr),
  694. memblock_size);
  695. rc = (rc < 0) ? -EINVAL : 0;
  696. break;
  697. }
  698. }
  699. return rc;
  700. }
  701. static int pseries_memory_notifier(struct notifier_block *nb,
  702. unsigned long action, void *data)
  703. {
  704. struct of_reconfig_data *rd = data;
  705. int err = 0;
  706. switch (action) {
  707. case OF_RECONFIG_ATTACH_NODE:
  708. err = pseries_add_mem_node(rd->dn);
  709. break;
  710. case OF_RECONFIG_DETACH_NODE:
  711. err = pseries_remove_mem_node(rd->dn);
  712. break;
  713. case OF_RECONFIG_UPDATE_PROPERTY:
  714. if (!strcmp(rd->prop->name, "ibm,dynamic-memory"))
  715. err = pseries_update_drconf_memory(rd);
  716. break;
  717. }
  718. return notifier_from_errno(err);
  719. }
  720. static struct notifier_block pseries_mem_nb = {
  721. .notifier_call = pseries_memory_notifier,
  722. };
  723. static int __init pseries_memory_hotplug_init(void)
  724. {
  725. if (firmware_has_feature(FW_FEATURE_LPAR))
  726. of_reconfig_notifier_register(&pseries_mem_nb);
  727. return 0;
  728. }
  729. machine_device_initcall(pseries, pseries_memory_hotplug_init);