hotplug-memory.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080
  1. /*
  2. * pseries Memory Hotplug infrastructure.
  3. *
  4. * Copyright (C) 2008 Badari Pulavarty, IBM Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define pr_fmt(fmt) "pseries-hotplug-mem: " fmt
  12. #include <linux/of.h>
  13. #include <linux/of_address.h>
  14. #include <linux/memblock.h>
  15. #include <linux/memory.h>
  16. #include <linux/memory_hotplug.h>
  17. #include <linux/slab.h>
  18. #include <asm/firmware.h>
  19. #include <asm/machdep.h>
  20. #include <asm/prom.h>
  21. #include <asm/sparsemem.h>
  22. #include <asm/fadump.h>
  23. #include <asm/drmem.h>
  24. #include "pseries.h"
  25. static bool rtas_hp_event;
  26. unsigned long pseries_memory_block_size(void)
  27. {
  28. struct device_node *np;
  29. unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE;
  30. struct resource r;
  31. np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
  32. if (np) {
  33. const __be64 *size;
  34. size = of_get_property(np, "ibm,lmb-size", NULL);
  35. if (size)
  36. memblock_size = be64_to_cpup(size);
  37. of_node_put(np);
  38. } else if (machine_is(pseries)) {
  39. /* This fallback really only applies to pseries */
  40. unsigned int memzero_size = 0;
  41. np = of_find_node_by_path("/memory@0");
  42. if (np) {
  43. if (!of_address_to_resource(np, 0, &r))
  44. memzero_size = resource_size(&r);
  45. of_node_put(np);
  46. }
  47. if (memzero_size) {
  48. /* We now know the size of memory@0, use this to find
  49. * the first memoryblock and get its size.
  50. */
  51. char buf[64];
  52. sprintf(buf, "/memory@%x", memzero_size);
  53. np = of_find_node_by_path(buf);
  54. if (np) {
  55. if (!of_address_to_resource(np, 0, &r))
  56. memblock_size = resource_size(&r);
  57. of_node_put(np);
  58. }
  59. }
  60. }
  61. return memblock_size;
  62. }
  63. static void dlpar_free_property(struct property *prop)
  64. {
  65. kfree(prop->name);
  66. kfree(prop->value);
  67. kfree(prop);
  68. }
  69. static struct property *dlpar_clone_property(struct property *prop,
  70. u32 prop_size)
  71. {
  72. struct property *new_prop;
  73. new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
  74. if (!new_prop)
  75. return NULL;
  76. new_prop->name = kstrdup(prop->name, GFP_KERNEL);
  77. new_prop->value = kzalloc(prop_size, GFP_KERNEL);
  78. if (!new_prop->name || !new_prop->value) {
  79. dlpar_free_property(new_prop);
  80. return NULL;
  81. }
  82. memcpy(new_prop->value, prop->value, prop->length);
  83. new_prop->length = prop_size;
  84. of_property_set_flag(new_prop, OF_DYNAMIC);
  85. return new_prop;
  86. }
  87. static u32 find_aa_index(struct device_node *dr_node,
  88. struct property *ala_prop, const u32 *lmb_assoc)
  89. {
  90. u32 *assoc_arrays;
  91. u32 aa_index;
  92. int aa_arrays, aa_array_entries, aa_array_sz;
  93. int i, index;
  94. /*
  95. * The ibm,associativity-lookup-arrays property is defined to be
  96. * a 32-bit value specifying the number of associativity arrays
  97. * followed by a 32-bitvalue specifying the number of entries per
  98. * array, followed by the associativity arrays.
  99. */
  100. assoc_arrays = ala_prop->value;
  101. aa_arrays = be32_to_cpu(assoc_arrays[0]);
  102. aa_array_entries = be32_to_cpu(assoc_arrays[1]);
  103. aa_array_sz = aa_array_entries * sizeof(u32);
  104. aa_index = -1;
  105. for (i = 0; i < aa_arrays; i++) {
  106. index = (i * aa_array_entries) + 2;
  107. if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
  108. continue;
  109. aa_index = i;
  110. break;
  111. }
  112. if (aa_index == -1) {
  113. struct property *new_prop;
  114. u32 new_prop_size;
  115. new_prop_size = ala_prop->length + aa_array_sz;
  116. new_prop = dlpar_clone_property(ala_prop, new_prop_size);
  117. if (!new_prop)
  118. return -1;
  119. assoc_arrays = new_prop->value;
  120. /* increment the number of entries in the lookup array */
  121. assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
  122. /* copy the new associativity into the lookup array */
  123. index = aa_arrays * aa_array_entries + 2;
  124. memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
  125. of_update_property(dr_node, new_prop);
  126. /*
  127. * The associativity lookup array index for this lmb is
  128. * number of entries - 1 since we added its associativity
  129. * to the end of the lookup array.
  130. */
  131. aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
  132. }
  133. return aa_index;
  134. }
  135. static u32 lookup_lmb_associativity_index(struct drmem_lmb *lmb)
  136. {
  137. struct device_node *parent, *lmb_node, *dr_node;
  138. struct property *ala_prop;
  139. const u32 *lmb_assoc;
  140. u32 aa_index;
  141. parent = of_find_node_by_path("/");
  142. if (!parent)
  143. return -ENODEV;
  144. lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index),
  145. parent);
  146. of_node_put(parent);
  147. if (!lmb_node)
  148. return -EINVAL;
  149. lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL);
  150. if (!lmb_assoc) {
  151. dlpar_free_cc_nodes(lmb_node);
  152. return -ENODEV;
  153. }
  154. dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
  155. if (!dr_node) {
  156. dlpar_free_cc_nodes(lmb_node);
  157. return -ENODEV;
  158. }
  159. ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays",
  160. NULL);
  161. if (!ala_prop) {
  162. of_node_put(dr_node);
  163. dlpar_free_cc_nodes(lmb_node);
  164. return -ENODEV;
  165. }
  166. aa_index = find_aa_index(dr_node, ala_prop, lmb_assoc);
  167. dlpar_free_cc_nodes(lmb_node);
  168. return aa_index;
  169. }
  170. static int dlpar_add_device_tree_lmb(struct drmem_lmb *lmb)
  171. {
  172. int rc, aa_index;
  173. lmb->flags |= DRCONF_MEM_ASSIGNED;
  174. aa_index = lookup_lmb_associativity_index(lmb);
  175. if (aa_index < 0) {
  176. pr_err("Couldn't find associativity index for drc index %x\n",
  177. lmb->drc_index);
  178. return aa_index;
  179. }
  180. lmb->aa_index = aa_index;
  181. rtas_hp_event = true;
  182. rc = drmem_update_dt();
  183. rtas_hp_event = false;
  184. return rc;
  185. }
  186. static int dlpar_remove_device_tree_lmb(struct drmem_lmb *lmb)
  187. {
  188. int rc;
  189. lmb->flags &= ~DRCONF_MEM_ASSIGNED;
  190. lmb->aa_index = 0xffffffff;
  191. rtas_hp_event = true;
  192. rc = drmem_update_dt();
  193. rtas_hp_event = false;
  194. return rc;
  195. }
  196. static struct memory_block *lmb_to_memblock(struct drmem_lmb *lmb)
  197. {
  198. unsigned long section_nr;
  199. struct mem_section *mem_sect;
  200. struct memory_block *mem_block;
  201. section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
  202. mem_sect = __nr_to_section(section_nr);
  203. mem_block = find_memory_block(mem_sect);
  204. return mem_block;
  205. }
  206. static int get_lmb_range(u32 drc_index, int n_lmbs,
  207. struct drmem_lmb **start_lmb,
  208. struct drmem_lmb **end_lmb)
  209. {
  210. struct drmem_lmb *lmb, *start, *end;
  211. struct drmem_lmb *last_lmb;
  212. start = NULL;
  213. for_each_drmem_lmb(lmb) {
  214. if (lmb->drc_index == drc_index) {
  215. start = lmb;
  216. break;
  217. }
  218. }
  219. if (!start)
  220. return -EINVAL;
  221. end = &start[n_lmbs - 1];
  222. last_lmb = &drmem_info->lmbs[drmem_info->n_lmbs - 1];
  223. if (end > last_lmb)
  224. return -EINVAL;
  225. *start_lmb = start;
  226. *end_lmb = end;
  227. return 0;
  228. }
  229. static int dlpar_change_lmb_state(struct drmem_lmb *lmb, bool online)
  230. {
  231. struct memory_block *mem_block;
  232. int rc;
  233. mem_block = lmb_to_memblock(lmb);
  234. if (!mem_block)
  235. return -EINVAL;
  236. if (online && mem_block->dev.offline)
  237. rc = device_online(&mem_block->dev);
  238. else if (!online && !mem_block->dev.offline)
  239. rc = device_offline(&mem_block->dev);
  240. else
  241. rc = 0;
  242. put_device(&mem_block->dev);
  243. return rc;
  244. }
  245. static int dlpar_online_lmb(struct drmem_lmb *lmb)
  246. {
  247. return dlpar_change_lmb_state(lmb, true);
  248. }
  249. #ifdef CONFIG_MEMORY_HOTREMOVE
  250. static int dlpar_offline_lmb(struct drmem_lmb *lmb)
  251. {
  252. return dlpar_change_lmb_state(lmb, false);
  253. }
  254. static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
  255. {
  256. unsigned long block_sz, start_pfn;
  257. int sections_per_block;
  258. int i, nid;
  259. start_pfn = base >> PAGE_SHIFT;
  260. lock_device_hotplug();
  261. if (!pfn_valid(start_pfn))
  262. goto out;
  263. block_sz = pseries_memory_block_size();
  264. sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
  265. nid = memory_add_physaddr_to_nid(base);
  266. for (i = 0; i < sections_per_block; i++) {
  267. remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE);
  268. base += MIN_MEMORY_BLOCK_SIZE;
  269. }
  270. out:
  271. /* Update memory regions for memory remove */
  272. memblock_remove(base, memblock_size);
  273. unlock_device_hotplug();
  274. return 0;
  275. }
  276. static int pseries_remove_mem_node(struct device_node *np)
  277. {
  278. const char *type;
  279. const __be32 *regs;
  280. unsigned long base;
  281. unsigned int lmb_size;
  282. int ret = -EINVAL;
  283. /*
  284. * Check to see if we are actually removing memory
  285. */
  286. type = of_get_property(np, "device_type", NULL);
  287. if (type == NULL || strcmp(type, "memory") != 0)
  288. return 0;
  289. /*
  290. * Find the base address and size of the memblock
  291. */
  292. regs = of_get_property(np, "reg", NULL);
  293. if (!regs)
  294. return ret;
  295. base = be64_to_cpu(*(unsigned long *)regs);
  296. lmb_size = be32_to_cpu(regs[3]);
  297. pseries_remove_memblock(base, lmb_size);
  298. return 0;
  299. }
  300. static bool lmb_is_removable(struct drmem_lmb *lmb)
  301. {
  302. int i, scns_per_block;
  303. int rc = 1;
  304. unsigned long pfn, block_sz;
  305. u64 phys_addr;
  306. if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
  307. return false;
  308. block_sz = memory_block_size_bytes();
  309. scns_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
  310. phys_addr = lmb->base_addr;
  311. #ifdef CONFIG_FA_DUMP
  312. /* Don't hot-remove memory that falls in fadump boot memory area */
  313. if (is_fadump_boot_memory_area(phys_addr, block_sz))
  314. return false;
  315. #endif
  316. for (i = 0; i < scns_per_block; i++) {
  317. pfn = PFN_DOWN(phys_addr);
  318. if (!pfn_present(pfn))
  319. continue;
  320. rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
  321. phys_addr += MIN_MEMORY_BLOCK_SIZE;
  322. }
  323. return rc ? true : false;
  324. }
  325. static int dlpar_add_lmb(struct drmem_lmb *);
  326. static int dlpar_remove_lmb(struct drmem_lmb *lmb)
  327. {
  328. unsigned long block_sz;
  329. int nid, rc;
  330. if (!lmb_is_removable(lmb))
  331. return -EINVAL;
  332. rc = dlpar_offline_lmb(lmb);
  333. if (rc)
  334. return rc;
  335. block_sz = pseries_memory_block_size();
  336. nid = memory_add_physaddr_to_nid(lmb->base_addr);
  337. remove_memory(nid, lmb->base_addr, block_sz);
  338. /* Update memory regions for memory remove */
  339. memblock_remove(lmb->base_addr, block_sz);
  340. dlpar_remove_device_tree_lmb(lmb);
  341. return 0;
  342. }
  343. static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
  344. {
  345. struct drmem_lmb *lmb;
  346. int lmbs_removed = 0;
  347. int lmbs_available = 0;
  348. int rc;
  349. pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);
  350. if (lmbs_to_remove == 0)
  351. return -EINVAL;
  352. /* Validate that there are enough LMBs to satisfy the request */
  353. for_each_drmem_lmb(lmb) {
  354. if (lmb_is_removable(lmb))
  355. lmbs_available++;
  356. if (lmbs_available == lmbs_to_remove)
  357. break;
  358. }
  359. if (lmbs_available < lmbs_to_remove) {
  360. pr_info("Not enough LMBs available (%d of %d) to satisfy request\n",
  361. lmbs_available, lmbs_to_remove);
  362. return -EINVAL;
  363. }
  364. for_each_drmem_lmb(lmb) {
  365. rc = dlpar_remove_lmb(lmb);
  366. if (rc)
  367. continue;
  368. /* Mark this lmb so we can add it later if all of the
  369. * requested LMBs cannot be removed.
  370. */
  371. drmem_mark_lmb_reserved(lmb);
  372. lmbs_removed++;
  373. if (lmbs_removed == lmbs_to_remove)
  374. break;
  375. }
  376. if (lmbs_removed != lmbs_to_remove) {
  377. pr_err("Memory hot-remove failed, adding LMB's back\n");
  378. for_each_drmem_lmb(lmb) {
  379. if (!drmem_lmb_reserved(lmb))
  380. continue;
  381. rc = dlpar_add_lmb(lmb);
  382. if (rc)
  383. pr_err("Failed to add LMB back, drc index %x\n",
  384. lmb->drc_index);
  385. drmem_remove_lmb_reservation(lmb);
  386. }
  387. rc = -EINVAL;
  388. } else {
  389. for_each_drmem_lmb(lmb) {
  390. if (!drmem_lmb_reserved(lmb))
  391. continue;
  392. dlpar_release_drc(lmb->drc_index);
  393. pr_info("Memory at %llx was hot-removed\n",
  394. lmb->base_addr);
  395. drmem_remove_lmb_reservation(lmb);
  396. }
  397. rc = 0;
  398. }
  399. return rc;
  400. }
  401. static int dlpar_memory_remove_by_index(u32 drc_index)
  402. {
  403. struct drmem_lmb *lmb;
  404. int lmb_found;
  405. int rc;
  406. pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index);
  407. lmb_found = 0;
  408. for_each_drmem_lmb(lmb) {
  409. if (lmb->drc_index == drc_index) {
  410. lmb_found = 1;
  411. rc = dlpar_remove_lmb(lmb);
  412. if (!rc)
  413. dlpar_release_drc(lmb->drc_index);
  414. break;
  415. }
  416. }
  417. if (!lmb_found)
  418. rc = -EINVAL;
  419. if (rc)
  420. pr_info("Failed to hot-remove memory at %llx\n",
  421. lmb->base_addr);
  422. else
  423. pr_info("Memory at %llx was hot-removed\n", lmb->base_addr);
  424. return rc;
  425. }
  426. static int dlpar_memory_readd_by_index(u32 drc_index)
  427. {
  428. struct drmem_lmb *lmb;
  429. int lmb_found;
  430. int rc;
  431. pr_info("Attempting to update LMB, drc index %x\n", drc_index);
  432. lmb_found = 0;
  433. for_each_drmem_lmb(lmb) {
  434. if (lmb->drc_index == drc_index) {
  435. lmb_found = 1;
  436. rc = dlpar_remove_lmb(lmb);
  437. if (!rc) {
  438. rc = dlpar_add_lmb(lmb);
  439. if (rc)
  440. dlpar_release_drc(lmb->drc_index);
  441. }
  442. break;
  443. }
  444. }
  445. if (!lmb_found)
  446. rc = -EINVAL;
  447. if (rc)
  448. pr_info("Failed to update memory at %llx\n",
  449. lmb->base_addr);
  450. else
  451. pr_info("Memory at %llx was updated\n", lmb->base_addr);
  452. return rc;
  453. }
  454. static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
  455. {
  456. struct drmem_lmb *lmb, *start_lmb, *end_lmb;
  457. int lmbs_available = 0;
  458. int rc;
  459. pr_info("Attempting to hot-remove %u LMB(s) at %x\n",
  460. lmbs_to_remove, drc_index);
  461. if (lmbs_to_remove == 0)
  462. return -EINVAL;
  463. rc = get_lmb_range(drc_index, lmbs_to_remove, &start_lmb, &end_lmb);
  464. if (rc)
  465. return -EINVAL;
  466. /* Validate that there are enough LMBs to satisfy the request */
  467. for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
  468. if (lmb->flags & DRCONF_MEM_RESERVED)
  469. break;
  470. lmbs_available++;
  471. }
  472. if (lmbs_available < lmbs_to_remove)
  473. return -EINVAL;
  474. for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
  475. if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
  476. continue;
  477. rc = dlpar_remove_lmb(lmb);
  478. if (rc)
  479. break;
  480. drmem_mark_lmb_reserved(lmb);
  481. }
  482. if (rc) {
  483. pr_err("Memory indexed-count-remove failed, adding any removed LMBs\n");
  484. for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
  485. if (!drmem_lmb_reserved(lmb))
  486. continue;
  487. rc = dlpar_add_lmb(lmb);
  488. if (rc)
  489. pr_err("Failed to add LMB, drc index %x\n",
  490. lmb->drc_index);
  491. drmem_remove_lmb_reservation(lmb);
  492. }
  493. rc = -EINVAL;
  494. } else {
  495. for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
  496. if (!drmem_lmb_reserved(lmb))
  497. continue;
  498. dlpar_release_drc(lmb->drc_index);
  499. pr_info("Memory at %llx (drc index %x) was hot-removed\n",
  500. lmb->base_addr, lmb->drc_index);
  501. drmem_remove_lmb_reservation(lmb);
  502. }
  503. }
  504. return rc;
  505. }
  506. #else
  507. static inline int pseries_remove_memblock(unsigned long base,
  508. unsigned int memblock_size)
  509. {
  510. return -EOPNOTSUPP;
  511. }
  512. static inline int pseries_remove_mem_node(struct device_node *np)
  513. {
  514. return 0;
  515. }
  516. static inline int dlpar_memory_remove(struct pseries_hp_errorlog *hp_elog)
  517. {
  518. return -EOPNOTSUPP;
  519. }
  520. static int dlpar_remove_lmb(struct drmem_lmb *lmb)
  521. {
  522. return -EOPNOTSUPP;
  523. }
  524. static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
  525. {
  526. return -EOPNOTSUPP;
  527. }
  528. static int dlpar_memory_remove_by_index(u32 drc_index)
  529. {
  530. return -EOPNOTSUPP;
  531. }
  532. static int dlpar_memory_readd_by_index(u32 drc_index)
  533. {
  534. return -EOPNOTSUPP;
  535. }
  536. static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
  537. {
  538. return -EOPNOTSUPP;
  539. }
  540. #endif /* CONFIG_MEMORY_HOTREMOVE */
  541. static int dlpar_add_lmb(struct drmem_lmb *lmb)
  542. {
  543. unsigned long block_sz;
  544. int nid, rc;
  545. if (lmb->flags & DRCONF_MEM_ASSIGNED)
  546. return -EINVAL;
  547. rc = dlpar_add_device_tree_lmb(lmb);
  548. if (rc) {
  549. pr_err("Couldn't update device tree for drc index %x\n",
  550. lmb->drc_index);
  551. dlpar_release_drc(lmb->drc_index);
  552. return rc;
  553. }
  554. block_sz = memory_block_size_bytes();
  555. /* Find the node id for this address */
  556. nid = memory_add_physaddr_to_nid(lmb->base_addr);
  557. /* Add the memory */
  558. rc = add_memory(nid, lmb->base_addr, block_sz);
  559. if (rc) {
  560. dlpar_remove_device_tree_lmb(lmb);
  561. return rc;
  562. }
  563. rc = dlpar_online_lmb(lmb);
  564. if (rc) {
  565. remove_memory(nid, lmb->base_addr, block_sz);
  566. dlpar_remove_device_tree_lmb(lmb);
  567. } else {
  568. lmb->flags |= DRCONF_MEM_ASSIGNED;
  569. }
  570. return rc;
  571. }
  572. static int dlpar_memory_add_by_count(u32 lmbs_to_add)
  573. {
  574. struct drmem_lmb *lmb;
  575. int lmbs_available = 0;
  576. int lmbs_added = 0;
  577. int rc;
  578. pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
  579. if (lmbs_to_add == 0)
  580. return -EINVAL;
  581. /* Validate that there are enough LMBs to satisfy the request */
  582. for_each_drmem_lmb(lmb) {
  583. if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
  584. lmbs_available++;
  585. if (lmbs_available == lmbs_to_add)
  586. break;
  587. }
  588. if (lmbs_available < lmbs_to_add)
  589. return -EINVAL;
  590. for_each_drmem_lmb(lmb) {
  591. if (lmb->flags & DRCONF_MEM_ASSIGNED)
  592. continue;
  593. rc = dlpar_acquire_drc(lmb->drc_index);
  594. if (rc)
  595. continue;
  596. rc = dlpar_add_lmb(lmb);
  597. if (rc) {
  598. dlpar_release_drc(lmb->drc_index);
  599. continue;
  600. }
  601. /* Mark this lmb so we can remove it later if all of the
  602. * requested LMBs cannot be added.
  603. */
  604. drmem_mark_lmb_reserved(lmb);
  605. lmbs_added++;
  606. if (lmbs_added == lmbs_to_add)
  607. break;
  608. }
  609. if (lmbs_added != lmbs_to_add) {
  610. pr_err("Memory hot-add failed, removing any added LMBs\n");
  611. for_each_drmem_lmb(lmb) {
  612. if (!drmem_lmb_reserved(lmb))
  613. continue;
  614. rc = dlpar_remove_lmb(lmb);
  615. if (rc)
  616. pr_err("Failed to remove LMB, drc index %x\n",
  617. lmb->drc_index);
  618. else
  619. dlpar_release_drc(lmb->drc_index);
  620. drmem_remove_lmb_reservation(lmb);
  621. }
  622. rc = -EINVAL;
  623. } else {
  624. for_each_drmem_lmb(lmb) {
  625. if (!drmem_lmb_reserved(lmb))
  626. continue;
  627. pr_info("Memory at %llx (drc index %x) was hot-added\n",
  628. lmb->base_addr, lmb->drc_index);
  629. drmem_remove_lmb_reservation(lmb);
  630. }
  631. rc = 0;
  632. }
  633. return rc;
  634. }
  635. static int dlpar_memory_add_by_index(u32 drc_index)
  636. {
  637. struct drmem_lmb *lmb;
  638. int rc, lmb_found;
  639. pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);
  640. lmb_found = 0;
  641. for_each_drmem_lmb(lmb) {
  642. if (lmb->drc_index == drc_index) {
  643. lmb_found = 1;
  644. rc = dlpar_acquire_drc(lmb->drc_index);
  645. if (!rc) {
  646. rc = dlpar_add_lmb(lmb);
  647. if (rc)
  648. dlpar_release_drc(lmb->drc_index);
  649. }
  650. break;
  651. }
  652. }
  653. if (!lmb_found)
  654. rc = -EINVAL;
  655. if (rc)
  656. pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
  657. else
  658. pr_info("Memory at %llx (drc index %x) was hot-added\n",
  659. lmb->base_addr, drc_index);
  660. return rc;
  661. }
  662. static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
  663. {
  664. struct drmem_lmb *lmb, *start_lmb, *end_lmb;
  665. int lmbs_available = 0;
  666. int rc;
  667. pr_info("Attempting to hot-add %u LMB(s) at index %x\n",
  668. lmbs_to_add, drc_index);
  669. if (lmbs_to_add == 0)
  670. return -EINVAL;
  671. rc = get_lmb_range(drc_index, lmbs_to_add, &start_lmb, &end_lmb);
  672. if (rc)
  673. return -EINVAL;
  674. /* Validate that the LMBs in this range are not reserved */
  675. for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
  676. if (lmb->flags & DRCONF_MEM_RESERVED)
  677. break;
  678. lmbs_available++;
  679. }
  680. if (lmbs_available < lmbs_to_add)
  681. return -EINVAL;
  682. for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
  683. if (lmb->flags & DRCONF_MEM_ASSIGNED)
  684. continue;
  685. rc = dlpar_acquire_drc(lmb->drc_index);
  686. if (rc)
  687. break;
  688. rc = dlpar_add_lmb(lmb);
  689. if (rc) {
  690. dlpar_release_drc(lmb->drc_index);
  691. break;
  692. }
  693. drmem_mark_lmb_reserved(lmb);
  694. }
  695. if (rc) {
  696. pr_err("Memory indexed-count-add failed, removing any added LMBs\n");
  697. for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
  698. if (!drmem_lmb_reserved(lmb))
  699. continue;
  700. rc = dlpar_remove_lmb(lmb);
  701. if (rc)
  702. pr_err("Failed to remove LMB, drc index %x\n",
  703. lmb->drc_index);
  704. else
  705. dlpar_release_drc(lmb->drc_index);
  706. drmem_remove_lmb_reservation(lmb);
  707. }
  708. rc = -EINVAL;
  709. } else {
  710. for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
  711. if (!drmem_lmb_reserved(lmb))
  712. continue;
  713. pr_info("Memory at %llx (drc index %x) was hot-added\n",
  714. lmb->base_addr, lmb->drc_index);
  715. drmem_remove_lmb_reservation(lmb);
  716. }
  717. }
  718. return rc;
  719. }
  720. int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
  721. {
  722. u32 count, drc_index;
  723. int rc;
  724. lock_device_hotplug();
  725. switch (hp_elog->action) {
  726. case PSERIES_HP_ELOG_ACTION_ADD:
  727. if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) {
  728. count = hp_elog->_drc_u.drc_count;
  729. rc = dlpar_memory_add_by_count(count);
  730. } else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
  731. drc_index = hp_elog->_drc_u.drc_index;
  732. rc = dlpar_memory_add_by_index(drc_index);
  733. } else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_IC) {
  734. count = hp_elog->_drc_u.ic.count;
  735. drc_index = hp_elog->_drc_u.ic.index;
  736. rc = dlpar_memory_add_by_ic(count, drc_index);
  737. } else {
  738. rc = -EINVAL;
  739. }
  740. break;
  741. case PSERIES_HP_ELOG_ACTION_REMOVE:
  742. if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) {
  743. count = hp_elog->_drc_u.drc_count;
  744. rc = dlpar_memory_remove_by_count(count);
  745. } else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
  746. drc_index = hp_elog->_drc_u.drc_index;
  747. rc = dlpar_memory_remove_by_index(drc_index);
  748. } else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_IC) {
  749. count = hp_elog->_drc_u.ic.count;
  750. drc_index = hp_elog->_drc_u.ic.index;
  751. rc = dlpar_memory_remove_by_ic(count, drc_index);
  752. } else {
  753. rc = -EINVAL;
  754. }
  755. break;
  756. case PSERIES_HP_ELOG_ACTION_READD:
  757. drc_index = hp_elog->_drc_u.drc_index;
  758. rc = dlpar_memory_readd_by_index(drc_index);
  759. break;
  760. default:
  761. pr_err("Invalid action (%d) specified\n", hp_elog->action);
  762. rc = -EINVAL;
  763. break;
  764. }
  765. unlock_device_hotplug();
  766. return rc;
  767. }
  768. static int pseries_add_mem_node(struct device_node *np)
  769. {
  770. const char *type;
  771. const __be32 *regs;
  772. unsigned long base;
  773. unsigned int lmb_size;
  774. int ret = -EINVAL;
  775. /*
  776. * Check to see if we are actually adding memory
  777. */
  778. type = of_get_property(np, "device_type", NULL);
  779. if (type == NULL || strcmp(type, "memory") != 0)
  780. return 0;
  781. /*
  782. * Find the base and size of the memblock
  783. */
  784. regs = of_get_property(np, "reg", NULL);
  785. if (!regs)
  786. return ret;
  787. base = be64_to_cpu(*(unsigned long *)regs);
  788. lmb_size = be32_to_cpu(regs[3]);
  789. /*
  790. * Update memory region to represent the memory add
  791. */
  792. ret = memblock_add(base, lmb_size);
  793. return (ret < 0) ? -EINVAL : 0;
  794. }
  795. static int pseries_update_drconf_memory(struct of_reconfig_data *pr)
  796. {
  797. struct of_drconf_cell_v1 *new_drmem, *old_drmem;
  798. unsigned long memblock_size;
  799. u32 entries;
  800. __be32 *p;
  801. int i, rc = -EINVAL;
  802. if (rtas_hp_event)
  803. return 0;
  804. memblock_size = pseries_memory_block_size();
  805. if (!memblock_size)
  806. return -EINVAL;
  807. p = (__be32 *) pr->old_prop->value;
  808. if (!p)
  809. return -EINVAL;
  810. /* The first int of the property is the number of lmb's described
  811. * by the property. This is followed by an array of of_drconf_cell
  812. * entries. Get the number of entries and skip to the array of
  813. * of_drconf_cell's.
  814. */
  815. entries = be32_to_cpu(*p++);
  816. old_drmem = (struct of_drconf_cell_v1 *)p;
  817. p = (__be32 *)pr->prop->value;
  818. p++;
  819. new_drmem = (struct of_drconf_cell_v1 *)p;
  820. for (i = 0; i < entries; i++) {
  821. if ((be32_to_cpu(old_drmem[i].flags) & DRCONF_MEM_ASSIGNED) &&
  822. (!(be32_to_cpu(new_drmem[i].flags) & DRCONF_MEM_ASSIGNED))) {
  823. rc = pseries_remove_memblock(
  824. be64_to_cpu(old_drmem[i].base_addr),
  825. memblock_size);
  826. break;
  827. } else if ((!(be32_to_cpu(old_drmem[i].flags) &
  828. DRCONF_MEM_ASSIGNED)) &&
  829. (be32_to_cpu(new_drmem[i].flags) &
  830. DRCONF_MEM_ASSIGNED)) {
  831. rc = memblock_add(be64_to_cpu(old_drmem[i].base_addr),
  832. memblock_size);
  833. rc = (rc < 0) ? -EINVAL : 0;
  834. break;
  835. }
  836. }
  837. return rc;
  838. }
  839. static int pseries_memory_notifier(struct notifier_block *nb,
  840. unsigned long action, void *data)
  841. {
  842. struct of_reconfig_data *rd = data;
  843. int err = 0;
  844. switch (action) {
  845. case OF_RECONFIG_ATTACH_NODE:
  846. err = pseries_add_mem_node(rd->dn);
  847. break;
  848. case OF_RECONFIG_DETACH_NODE:
  849. err = pseries_remove_mem_node(rd->dn);
  850. break;
  851. case OF_RECONFIG_UPDATE_PROPERTY:
  852. if (!strcmp(rd->prop->name, "ibm,dynamic-memory"))
  853. err = pseries_update_drconf_memory(rd);
  854. break;
  855. }
  856. return notifier_from_errno(err);
  857. }
  858. static struct notifier_block pseries_mem_nb = {
  859. .notifier_call = pseries_memory_notifier,
  860. };
  861. static int __init pseries_memory_hotplug_init(void)
  862. {
  863. if (firmware_has_feature(FW_FEATURE_LPAR))
  864. of_reconfig_notifier_register(&pseries_mem_nb);
  865. return 0;
  866. }
  867. machine_device_initcall(pseries, pseries_memory_hotplug_init);