hotplug-memory.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898
  1. /*
  2. * pseries Memory Hotplug infrastructure.
  3. *
  4. * Copyright (C) 2008 Badari Pulavarty, IBM Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define pr_fmt(fmt) "pseries-hotplug-mem: " fmt
  12. #include <linux/of.h>
  13. #include <linux/of_address.h>
  14. #include <linux/memblock.h>
  15. #include <linux/memory.h>
  16. #include <linux/memory_hotplug.h>
  17. #include <linux/slab.h>
  18. #include <asm/firmware.h>
  19. #include <asm/machdep.h>
  20. #include <asm/prom.h>
  21. #include <asm/sparsemem.h>
  22. #include "pseries.h"
  23. static bool rtas_hp_event;
  24. unsigned long pseries_memory_block_size(void)
  25. {
  26. struct device_node *np;
  27. unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE;
  28. struct resource r;
  29. np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
  30. if (np) {
  31. const __be64 *size;
  32. size = of_get_property(np, "ibm,lmb-size", NULL);
  33. if (size)
  34. memblock_size = be64_to_cpup(size);
  35. of_node_put(np);
  36. } else if (machine_is(pseries)) {
  37. /* This fallback really only applies to pseries */
  38. unsigned int memzero_size = 0;
  39. np = of_find_node_by_path("/memory@0");
  40. if (np) {
  41. if (!of_address_to_resource(np, 0, &r))
  42. memzero_size = resource_size(&r);
  43. of_node_put(np);
  44. }
  45. if (memzero_size) {
  46. /* We now know the size of memory@0, use this to find
  47. * the first memoryblock and get its size.
  48. */
  49. char buf[64];
  50. sprintf(buf, "/memory@%x", memzero_size);
  51. np = of_find_node_by_path(buf);
  52. if (np) {
  53. if (!of_address_to_resource(np, 0, &r))
  54. memblock_size = resource_size(&r);
  55. of_node_put(np);
  56. }
  57. }
  58. }
  59. return memblock_size;
  60. }
  61. static void dlpar_free_property(struct property *prop)
  62. {
  63. kfree(prop->name);
  64. kfree(prop->value);
  65. kfree(prop);
  66. }
  67. static struct property *dlpar_clone_property(struct property *prop,
  68. u32 prop_size)
  69. {
  70. struct property *new_prop;
  71. new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
  72. if (!new_prop)
  73. return NULL;
  74. new_prop->name = kstrdup(prop->name, GFP_KERNEL);
  75. new_prop->value = kzalloc(prop_size, GFP_KERNEL);
  76. if (!new_prop->name || !new_prop->value) {
  77. dlpar_free_property(new_prop);
  78. return NULL;
  79. }
  80. memcpy(new_prop->value, prop->value, prop->length);
  81. new_prop->length = prop_size;
  82. of_property_set_flag(new_prop, OF_DYNAMIC);
  83. return new_prop;
  84. }
  85. static struct property *dlpar_clone_drconf_property(struct device_node *dn)
  86. {
  87. struct property *prop, *new_prop;
  88. struct of_drconf_cell *lmbs;
  89. u32 num_lmbs, *p;
  90. int i;
  91. prop = of_find_property(dn, "ibm,dynamic-memory", NULL);
  92. if (!prop)
  93. return NULL;
  94. new_prop = dlpar_clone_property(prop, prop->length);
  95. if (!new_prop)
  96. return NULL;
  97. /* Convert the property to cpu endian-ness */
  98. p = new_prop->value;
  99. *p = be32_to_cpu(*p);
  100. num_lmbs = *p++;
  101. lmbs = (struct of_drconf_cell *)p;
  102. for (i = 0; i < num_lmbs; i++) {
  103. lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
  104. lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
  105. lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
  106. }
  107. return new_prop;
  108. }
  109. static void dlpar_update_drconf_property(struct device_node *dn,
  110. struct property *prop)
  111. {
  112. struct of_drconf_cell *lmbs;
  113. u32 num_lmbs, *p;
  114. int i;
  115. /* Convert the property back to BE */
  116. p = prop->value;
  117. num_lmbs = *p;
  118. *p = cpu_to_be32(*p);
  119. p++;
  120. lmbs = (struct of_drconf_cell *)p;
  121. for (i = 0; i < num_lmbs; i++) {
  122. lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
  123. lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
  124. lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
  125. }
  126. rtas_hp_event = true;
  127. of_update_property(dn, prop);
  128. rtas_hp_event = false;
  129. }
  130. static int dlpar_update_device_tree_lmb(struct of_drconf_cell *lmb)
  131. {
  132. struct device_node *dn;
  133. struct property *prop;
  134. struct of_drconf_cell *lmbs;
  135. u32 *p, num_lmbs;
  136. int i;
  137. dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
  138. if (!dn)
  139. return -ENODEV;
  140. prop = dlpar_clone_drconf_property(dn);
  141. if (!prop) {
  142. of_node_put(dn);
  143. return -ENODEV;
  144. }
  145. p = prop->value;
  146. num_lmbs = *p++;
  147. lmbs = (struct of_drconf_cell *)p;
  148. for (i = 0; i < num_lmbs; i++) {
  149. if (lmbs[i].drc_index == lmb->drc_index) {
  150. lmbs[i].flags = lmb->flags;
  151. lmbs[i].aa_index = lmb->aa_index;
  152. dlpar_update_drconf_property(dn, prop);
  153. break;
  154. }
  155. }
  156. of_node_put(dn);
  157. return 0;
  158. }
  159. static u32 find_aa_index(struct device_node *dr_node,
  160. struct property *ala_prop, const u32 *lmb_assoc)
  161. {
  162. u32 *assoc_arrays;
  163. u32 aa_index;
  164. int aa_arrays, aa_array_entries, aa_array_sz;
  165. int i, index;
  166. /*
  167. * The ibm,associativity-lookup-arrays property is defined to be
  168. * a 32-bit value specifying the number of associativity arrays
  169. * followed by a 32-bitvalue specifying the number of entries per
  170. * array, followed by the associativity arrays.
  171. */
  172. assoc_arrays = ala_prop->value;
  173. aa_arrays = be32_to_cpu(assoc_arrays[0]);
  174. aa_array_entries = be32_to_cpu(assoc_arrays[1]);
  175. aa_array_sz = aa_array_entries * sizeof(u32);
  176. aa_index = -1;
  177. for (i = 0; i < aa_arrays; i++) {
  178. index = (i * aa_array_entries) + 2;
  179. if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
  180. continue;
  181. aa_index = i;
  182. break;
  183. }
  184. if (aa_index == -1) {
  185. struct property *new_prop;
  186. u32 new_prop_size;
  187. new_prop_size = ala_prop->length + aa_array_sz;
  188. new_prop = dlpar_clone_property(ala_prop, new_prop_size);
  189. if (!new_prop)
  190. return -1;
  191. assoc_arrays = new_prop->value;
  192. /* increment the number of entries in the lookup array */
  193. assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
  194. /* copy the new associativity into the lookup array */
  195. index = aa_arrays * aa_array_entries + 2;
  196. memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
  197. of_update_property(dr_node, new_prop);
  198. /*
  199. * The associativity lookup array index for this lmb is
  200. * number of entries - 1 since we added its associativity
  201. * to the end of the lookup array.
  202. */
  203. aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
  204. }
  205. return aa_index;
  206. }
  207. static u32 lookup_lmb_associativity_index(struct of_drconf_cell *lmb)
  208. {
  209. struct device_node *parent, *lmb_node, *dr_node;
  210. struct property *ala_prop;
  211. const u32 *lmb_assoc;
  212. u32 aa_index;
  213. parent = of_find_node_by_path("/");
  214. if (!parent)
  215. return -ENODEV;
  216. lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index),
  217. parent);
  218. of_node_put(parent);
  219. if (!lmb_node)
  220. return -EINVAL;
  221. lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL);
  222. if (!lmb_assoc) {
  223. dlpar_free_cc_nodes(lmb_node);
  224. return -ENODEV;
  225. }
  226. dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
  227. if (!dr_node) {
  228. dlpar_free_cc_nodes(lmb_node);
  229. return -ENODEV;
  230. }
  231. ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays",
  232. NULL);
  233. if (!ala_prop) {
  234. of_node_put(dr_node);
  235. dlpar_free_cc_nodes(lmb_node);
  236. return -ENODEV;
  237. }
  238. aa_index = find_aa_index(dr_node, ala_prop, lmb_assoc);
  239. dlpar_free_cc_nodes(lmb_node);
  240. return aa_index;
  241. }
  242. static int dlpar_add_device_tree_lmb(struct of_drconf_cell *lmb)
  243. {
  244. int aa_index;
  245. lmb->flags |= DRCONF_MEM_ASSIGNED;
  246. aa_index = lookup_lmb_associativity_index(lmb);
  247. if (aa_index < 0) {
  248. pr_err("Couldn't find associativity index for drc index %x\n",
  249. lmb->drc_index);
  250. return aa_index;
  251. }
  252. lmb->aa_index = aa_index;
  253. return dlpar_update_device_tree_lmb(lmb);
  254. }
  255. static int dlpar_remove_device_tree_lmb(struct of_drconf_cell *lmb)
  256. {
  257. lmb->flags &= ~DRCONF_MEM_ASSIGNED;
  258. lmb->aa_index = 0xffffffff;
  259. return dlpar_update_device_tree_lmb(lmb);
  260. }
  261. #ifdef CONFIG_MEMORY_HOTREMOVE
  262. static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
  263. {
  264. unsigned long block_sz, start_pfn;
  265. int sections_per_block;
  266. int i, nid;
  267. start_pfn = base >> PAGE_SHIFT;
  268. lock_device_hotplug();
  269. if (!pfn_valid(start_pfn))
  270. goto out;
  271. block_sz = pseries_memory_block_size();
  272. sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
  273. nid = memory_add_physaddr_to_nid(base);
  274. for (i = 0; i < sections_per_block; i++) {
  275. remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE);
  276. base += MIN_MEMORY_BLOCK_SIZE;
  277. }
  278. out:
  279. /* Update memory regions for memory remove */
  280. memblock_remove(base, memblock_size);
  281. unlock_device_hotplug();
  282. return 0;
  283. }
  284. static int pseries_remove_mem_node(struct device_node *np)
  285. {
  286. const char *type;
  287. const __be32 *regs;
  288. unsigned long base;
  289. unsigned int lmb_size;
  290. int ret = -EINVAL;
  291. /*
  292. * Check to see if we are actually removing memory
  293. */
  294. type = of_get_property(np, "device_type", NULL);
  295. if (type == NULL || strcmp(type, "memory") != 0)
  296. return 0;
  297. /*
  298. * Find the base address and size of the memblock
  299. */
  300. regs = of_get_property(np, "reg", NULL);
  301. if (!regs)
  302. return ret;
  303. base = be64_to_cpu(*(unsigned long *)regs);
  304. lmb_size = be32_to_cpu(regs[3]);
  305. pseries_remove_memblock(base, lmb_size);
  306. return 0;
  307. }
  308. static bool lmb_is_removable(struct of_drconf_cell *lmb)
  309. {
  310. int i, scns_per_block;
  311. int rc = 1;
  312. unsigned long pfn, block_sz;
  313. u64 phys_addr;
  314. if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
  315. return false;
  316. block_sz = memory_block_size_bytes();
  317. scns_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
  318. phys_addr = lmb->base_addr;
  319. for (i = 0; i < scns_per_block; i++) {
  320. pfn = PFN_DOWN(phys_addr);
  321. if (!pfn_present(pfn))
  322. continue;
  323. rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
  324. phys_addr += MIN_MEMORY_BLOCK_SIZE;
  325. }
  326. return rc ? true : false;
  327. }
  328. static int dlpar_add_lmb(struct of_drconf_cell *);
  329. static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb)
  330. {
  331. unsigned long section_nr;
  332. struct mem_section *mem_sect;
  333. struct memory_block *mem_block;
  334. section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
  335. mem_sect = __nr_to_section(section_nr);
  336. mem_block = find_memory_block(mem_sect);
  337. return mem_block;
  338. }
  339. static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
  340. {
  341. struct memory_block *mem_block;
  342. unsigned long block_sz;
  343. int nid, rc;
  344. if (!lmb_is_removable(lmb))
  345. return -EINVAL;
  346. mem_block = lmb_to_memblock(lmb);
  347. if (!mem_block)
  348. return -EINVAL;
  349. rc = device_offline(&mem_block->dev);
  350. put_device(&mem_block->dev);
  351. if (rc)
  352. return rc;
  353. block_sz = pseries_memory_block_size();
  354. nid = memory_add_physaddr_to_nid(lmb->base_addr);
  355. remove_memory(nid, lmb->base_addr, block_sz);
  356. /* Update memory regions for memory remove */
  357. memblock_remove(lmb->base_addr, block_sz);
  358. dlpar_release_drc(lmb->drc_index);
  359. dlpar_remove_device_tree_lmb(lmb);
  360. return 0;
  361. }
  362. static int dlpar_memory_remove_by_count(u32 lmbs_to_remove,
  363. struct property *prop)
  364. {
  365. struct of_drconf_cell *lmbs;
  366. int lmbs_removed = 0;
  367. int lmbs_available = 0;
  368. u32 num_lmbs, *p;
  369. int i, rc;
  370. pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);
  371. if (lmbs_to_remove == 0)
  372. return -EINVAL;
  373. p = prop->value;
  374. num_lmbs = *p++;
  375. lmbs = (struct of_drconf_cell *)p;
  376. /* Validate that there are enough LMBs to satisfy the request */
  377. for (i = 0; i < num_lmbs; i++) {
  378. if (lmbs[i].flags & DRCONF_MEM_ASSIGNED)
  379. lmbs_available++;
  380. }
  381. if (lmbs_available < lmbs_to_remove)
  382. return -EINVAL;
  383. for (i = 0; i < num_lmbs && lmbs_removed < lmbs_to_remove; i++) {
  384. rc = dlpar_remove_lmb(&lmbs[i]);
  385. if (rc)
  386. continue;
  387. lmbs_removed++;
  388. /* Mark this lmb so we can add it later if all of the
  389. * requested LMBs cannot be removed.
  390. */
  391. lmbs[i].reserved = 1;
  392. }
  393. if (lmbs_removed != lmbs_to_remove) {
  394. pr_err("Memory hot-remove failed, adding LMB's back\n");
  395. for (i = 0; i < num_lmbs; i++) {
  396. if (!lmbs[i].reserved)
  397. continue;
  398. rc = dlpar_add_lmb(&lmbs[i]);
  399. if (rc)
  400. pr_err("Failed to add LMB back, drc index %x\n",
  401. lmbs[i].drc_index);
  402. lmbs[i].reserved = 0;
  403. }
  404. rc = -EINVAL;
  405. } else {
  406. for (i = 0; i < num_lmbs; i++) {
  407. if (!lmbs[i].reserved)
  408. continue;
  409. pr_info("Memory at %llx was hot-removed\n",
  410. lmbs[i].base_addr);
  411. lmbs[i].reserved = 0;
  412. }
  413. rc = 0;
  414. }
  415. return rc;
  416. }
  417. static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop)
  418. {
  419. struct of_drconf_cell *lmbs;
  420. u32 num_lmbs, *p;
  421. int lmb_found;
  422. int i, rc;
  423. pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index);
  424. p = prop->value;
  425. num_lmbs = *p++;
  426. lmbs = (struct of_drconf_cell *)p;
  427. lmb_found = 0;
  428. for (i = 0; i < num_lmbs; i++) {
  429. if (lmbs[i].drc_index == drc_index) {
  430. lmb_found = 1;
  431. rc = dlpar_remove_lmb(&lmbs[i]);
  432. break;
  433. }
  434. }
  435. if (!lmb_found)
  436. rc = -EINVAL;
  437. if (rc)
  438. pr_info("Failed to hot-remove memory at %llx\n",
  439. lmbs[i].base_addr);
  440. else
  441. pr_info("Memory at %llx was hot-removed\n", lmbs[i].base_addr);
  442. return rc;
  443. }
  444. #else
  445. static inline int pseries_remove_memblock(unsigned long base,
  446. unsigned int memblock_size)
  447. {
  448. return -EOPNOTSUPP;
  449. }
  450. static inline int pseries_remove_mem_node(struct device_node *np)
  451. {
  452. return 0;
  453. }
  454. static inline int dlpar_memory_remove(struct pseries_hp_errorlog *hp_elog)
  455. {
  456. return -EOPNOTSUPP;
  457. }
  458. static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
  459. {
  460. return -EOPNOTSUPP;
  461. }
  462. static int dlpar_memory_remove_by_count(u32 lmbs_to_remove,
  463. struct property *prop)
  464. {
  465. return -EOPNOTSUPP;
  466. }
  467. static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop)
  468. {
  469. return -EOPNOTSUPP;
  470. }
  471. #endif /* CONFIG_MEMORY_HOTREMOVE */
  472. static int dlpar_add_lmb(struct of_drconf_cell *lmb)
  473. {
  474. unsigned long block_sz;
  475. int nid, rc;
  476. if (lmb->flags & DRCONF_MEM_ASSIGNED)
  477. return -EINVAL;
  478. rc = dlpar_acquire_drc(lmb->drc_index);
  479. if (rc)
  480. return rc;
  481. rc = dlpar_add_device_tree_lmb(lmb);
  482. if (rc) {
  483. pr_err("Couldn't update device tree for drc index %x\n",
  484. lmb->drc_index);
  485. dlpar_release_drc(lmb->drc_index);
  486. return rc;
  487. }
  488. block_sz = memory_block_size_bytes();
  489. /* Find the node id for this address */
  490. nid = memory_add_physaddr_to_nid(lmb->base_addr);
  491. /* Add the memory */
  492. rc = add_memory(nid, lmb->base_addr, block_sz);
  493. if (rc) {
  494. dlpar_remove_device_tree_lmb(lmb);
  495. dlpar_release_drc(lmb->drc_index);
  496. } else {
  497. lmb->flags |= DRCONF_MEM_ASSIGNED;
  498. }
  499. return rc;
  500. }
  501. static int dlpar_memory_add_by_count(u32 lmbs_to_add, struct property *prop)
  502. {
  503. struct of_drconf_cell *lmbs;
  504. u32 num_lmbs, *p;
  505. int lmbs_available = 0;
  506. int lmbs_added = 0;
  507. int i, rc;
  508. pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
  509. if (lmbs_to_add == 0)
  510. return -EINVAL;
  511. p = prop->value;
  512. num_lmbs = *p++;
  513. lmbs = (struct of_drconf_cell *)p;
  514. /* Validate that there are enough LMBs to satisfy the request */
  515. for (i = 0; i < num_lmbs; i++) {
  516. if (!(lmbs[i].flags & DRCONF_MEM_ASSIGNED))
  517. lmbs_available++;
  518. }
  519. if (lmbs_available < lmbs_to_add)
  520. return -EINVAL;
  521. for (i = 0; i < num_lmbs && lmbs_to_add != lmbs_added; i++) {
  522. rc = dlpar_add_lmb(&lmbs[i]);
  523. if (rc)
  524. continue;
  525. lmbs_added++;
  526. /* Mark this lmb so we can remove it later if all of the
  527. * requested LMBs cannot be added.
  528. */
  529. lmbs[i].reserved = 1;
  530. }
  531. if (lmbs_added != lmbs_to_add) {
  532. pr_err("Memory hot-add failed, removing any added LMBs\n");
  533. for (i = 0; i < num_lmbs; i++) {
  534. if (!lmbs[i].reserved)
  535. continue;
  536. rc = dlpar_remove_lmb(&lmbs[i]);
  537. if (rc)
  538. pr_err("Failed to remove LMB, drc index %x\n",
  539. be32_to_cpu(lmbs[i].drc_index));
  540. }
  541. rc = -EINVAL;
  542. } else {
  543. for (i = 0; i < num_lmbs; i++) {
  544. if (!lmbs[i].reserved)
  545. continue;
  546. pr_info("Memory at %llx (drc index %x) was hot-added\n",
  547. lmbs[i].base_addr, lmbs[i].drc_index);
  548. lmbs[i].reserved = 0;
  549. }
  550. }
  551. return rc;
  552. }
  553. static int dlpar_memory_add_by_index(u32 drc_index, struct property *prop)
  554. {
  555. struct of_drconf_cell *lmbs;
  556. u32 num_lmbs, *p;
  557. int i, lmb_found;
  558. int rc;
  559. pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);
  560. p = prop->value;
  561. num_lmbs = *p++;
  562. lmbs = (struct of_drconf_cell *)p;
  563. lmb_found = 0;
  564. for (i = 0; i < num_lmbs; i++) {
  565. if (lmbs[i].drc_index == drc_index) {
  566. lmb_found = 1;
  567. rc = dlpar_add_lmb(&lmbs[i]);
  568. break;
  569. }
  570. }
  571. if (!lmb_found)
  572. rc = -EINVAL;
  573. if (rc)
  574. pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
  575. else
  576. pr_info("Memory at %llx (drc index %x) was hot-added\n",
  577. lmbs[i].base_addr, drc_index);
  578. return rc;
  579. }
  580. int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
  581. {
  582. struct device_node *dn;
  583. struct property *prop;
  584. u32 count, drc_index;
  585. int rc;
  586. count = hp_elog->_drc_u.drc_count;
  587. drc_index = hp_elog->_drc_u.drc_index;
  588. lock_device_hotplug();
  589. dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
  590. if (!dn) {
  591. rc = -EINVAL;
  592. goto dlpar_memory_out;
  593. }
  594. prop = dlpar_clone_drconf_property(dn);
  595. if (!prop) {
  596. rc = -EINVAL;
  597. goto dlpar_memory_out;
  598. }
  599. switch (hp_elog->action) {
  600. case PSERIES_HP_ELOG_ACTION_ADD:
  601. if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
  602. rc = dlpar_memory_add_by_count(count, prop);
  603. else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
  604. rc = dlpar_memory_add_by_index(drc_index, prop);
  605. else
  606. rc = -EINVAL;
  607. break;
  608. case PSERIES_HP_ELOG_ACTION_REMOVE:
  609. if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
  610. rc = dlpar_memory_remove_by_count(count, prop);
  611. else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
  612. rc = dlpar_memory_remove_by_index(drc_index, prop);
  613. else
  614. rc = -EINVAL;
  615. break;
  616. default:
  617. pr_err("Invalid action (%d) specified\n", hp_elog->action);
  618. rc = -EINVAL;
  619. break;
  620. }
  621. dlpar_free_property(prop);
  622. dlpar_memory_out:
  623. of_node_put(dn);
  624. unlock_device_hotplug();
  625. return rc;
  626. }
  627. static int pseries_add_mem_node(struct device_node *np)
  628. {
  629. const char *type;
  630. const __be32 *regs;
  631. unsigned long base;
  632. unsigned int lmb_size;
  633. int ret = -EINVAL;
  634. /*
  635. * Check to see if we are actually adding memory
  636. */
  637. type = of_get_property(np, "device_type", NULL);
  638. if (type == NULL || strcmp(type, "memory") != 0)
  639. return 0;
  640. /*
  641. * Find the base and size of the memblock
  642. */
  643. regs = of_get_property(np, "reg", NULL);
  644. if (!regs)
  645. return ret;
  646. base = be64_to_cpu(*(unsigned long *)regs);
  647. lmb_size = be32_to_cpu(regs[3]);
  648. /*
  649. * Update memory region to represent the memory add
  650. */
  651. ret = memblock_add(base, lmb_size);
  652. return (ret < 0) ? -EINVAL : 0;
  653. }
  654. static int pseries_update_drconf_memory(struct of_reconfig_data *pr)
  655. {
  656. struct of_drconf_cell *new_drmem, *old_drmem;
  657. unsigned long memblock_size;
  658. u32 entries;
  659. __be32 *p;
  660. int i, rc = -EINVAL;
  661. if (rtas_hp_event)
  662. return 0;
  663. memblock_size = pseries_memory_block_size();
  664. if (!memblock_size)
  665. return -EINVAL;
  666. p = (__be32 *) pr->old_prop->value;
  667. if (!p)
  668. return -EINVAL;
  669. /* The first int of the property is the number of lmb's described
  670. * by the property. This is followed by an array of of_drconf_cell
  671. * entries. Get the number of entries and skip to the array of
  672. * of_drconf_cell's.
  673. */
  674. entries = be32_to_cpu(*p++);
  675. old_drmem = (struct of_drconf_cell *)p;
  676. p = (__be32 *)pr->prop->value;
  677. p++;
  678. new_drmem = (struct of_drconf_cell *)p;
  679. for (i = 0; i < entries; i++) {
  680. if ((be32_to_cpu(old_drmem[i].flags) & DRCONF_MEM_ASSIGNED) &&
  681. (!(be32_to_cpu(new_drmem[i].flags) & DRCONF_MEM_ASSIGNED))) {
  682. rc = pseries_remove_memblock(
  683. be64_to_cpu(old_drmem[i].base_addr),
  684. memblock_size);
  685. break;
  686. } else if ((!(be32_to_cpu(old_drmem[i].flags) &
  687. DRCONF_MEM_ASSIGNED)) &&
  688. (be32_to_cpu(new_drmem[i].flags) &
  689. DRCONF_MEM_ASSIGNED)) {
  690. rc = memblock_add(be64_to_cpu(old_drmem[i].base_addr),
  691. memblock_size);
  692. rc = (rc < 0) ? -EINVAL : 0;
  693. break;
  694. }
  695. }
  696. return rc;
  697. }
  698. static int pseries_memory_notifier(struct notifier_block *nb,
  699. unsigned long action, void *data)
  700. {
  701. struct of_reconfig_data *rd = data;
  702. int err = 0;
  703. switch (action) {
  704. case OF_RECONFIG_ATTACH_NODE:
  705. err = pseries_add_mem_node(rd->dn);
  706. break;
  707. case OF_RECONFIG_DETACH_NODE:
  708. err = pseries_remove_mem_node(rd->dn);
  709. break;
  710. case OF_RECONFIG_UPDATE_PROPERTY:
  711. if (!strcmp(rd->prop->name, "ibm,dynamic-memory"))
  712. err = pseries_update_drconf_memory(rd);
  713. break;
  714. }
  715. return notifier_from_errno(err);
  716. }
  717. static struct notifier_block pseries_mem_nb = {
  718. .notifier_call = pseries_memory_notifier,
  719. };
  720. static int __init pseries_memory_hotplug_init(void)
  721. {
  722. if (firmware_has_feature(FW_FEATURE_LPAR))
  723. of_reconfig_notifier_register(&pseries_mem_nb);
  724. return 0;
  725. }
  726. machine_device_initcall(pseries, pseries_memory_hotplug_init);