opal.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725
  1. /*
  2. * PowerNV OPAL high level interfaces
  3. *
  4. * Copyright 2011 IBM Corp.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #undef DEBUG
  12. #include <linux/types.h>
  13. #include <linux/of.h>
  14. #include <linux/of_fdt.h>
  15. #include <linux/of_platform.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/notifier.h>
  18. #include <linux/slab.h>
  19. #include <linux/sched.h>
  20. #include <linux/kobject.h>
  21. #include <linux/delay.h>
  22. #include <linux/memblock.h>
  23. #include <asm/opal.h>
  24. #include <asm/firmware.h>
  25. #include <asm/mce.h>
  26. #include "powernv.h"
  27. /* /sys/firmware/opal */
  28. struct kobject *opal_kobj;
  29. struct opal {
  30. u64 base;
  31. u64 entry;
  32. u64 size;
  33. } opal;
  34. struct mcheck_recoverable_range {
  35. u64 start_addr;
  36. u64 end_addr;
  37. u64 recover_addr;
  38. };
  39. static struct mcheck_recoverable_range *mc_recoverable_range;
  40. static int mc_recoverable_range_len;
  41. struct device_node *opal_node;
  42. static DEFINE_SPINLOCK(opal_write_lock);
  43. extern u64 opal_mc_secondary_handler[];
  44. static unsigned int *opal_irqs;
  45. static unsigned int opal_irq_count;
  46. static ATOMIC_NOTIFIER_HEAD(opal_notifier_head);
  47. static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX];
  48. static DEFINE_SPINLOCK(opal_notifier_lock);
  49. static uint64_t last_notified_mask = 0x0ul;
  50. static atomic_t opal_notifier_hold = ATOMIC_INIT(0);
  51. static void opal_reinit_cores(void)
  52. {
  53. /* Do the actual re-init, This will clobber all FPRs, VRs, etc...
  54. *
  55. * It will preserve non volatile GPRs and HSPRG0/1. It will
  56. * also restore HIDs and other SPRs to their original value
  57. * but it might clobber a bunch.
  58. */
  59. #ifdef __BIG_ENDIAN__
  60. opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_BE);
  61. #else
  62. opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_LE);
  63. #endif
  64. }
  65. int __init early_init_dt_scan_opal(unsigned long node,
  66. const char *uname, int depth, void *data)
  67. {
  68. const void *basep, *entryp, *sizep;
  69. int basesz, entrysz, runtimesz;
  70. if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
  71. return 0;
  72. basep = of_get_flat_dt_prop(node, "opal-base-address", &basesz);
  73. entryp = of_get_flat_dt_prop(node, "opal-entry-address", &entrysz);
  74. sizep = of_get_flat_dt_prop(node, "opal-runtime-size", &runtimesz);
  75. if (!basep || !entryp || !sizep)
  76. return 1;
  77. opal.base = of_read_number(basep, basesz/4);
  78. opal.entry = of_read_number(entryp, entrysz/4);
  79. opal.size = of_read_number(sizep, runtimesz/4);
  80. pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%d)\n",
  81. opal.base, basep, basesz);
  82. pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%d)\n",
  83. opal.entry, entryp, entrysz);
  84. pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%d)\n",
  85. opal.size, sizep, runtimesz);
  86. powerpc_firmware_features |= FW_FEATURE_OPAL;
  87. if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
  88. powerpc_firmware_features |= FW_FEATURE_OPALv2;
  89. powerpc_firmware_features |= FW_FEATURE_OPALv3;
  90. printk("OPAL V3 detected !\n");
  91. } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
  92. powerpc_firmware_features |= FW_FEATURE_OPALv2;
  93. printk("OPAL V2 detected !\n");
  94. } else {
  95. printk("OPAL V1 detected !\n");
  96. }
  97. /* Reinit all cores with the right endian */
  98. opal_reinit_cores();
  99. /* Restore some bits */
  100. if (cur_cpu_spec->cpu_restore)
  101. cur_cpu_spec->cpu_restore();
  102. return 1;
  103. }
  104. int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
  105. const char *uname, int depth, void *data)
  106. {
  107. int i, psize, size;
  108. const __be32 *prop;
  109. if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
  110. return 0;
  111. prop = of_get_flat_dt_prop(node, "mcheck-recoverable-ranges", &psize);
  112. if (!prop)
  113. return 1;
  114. pr_debug("Found machine check recoverable ranges.\n");
  115. /*
  116. * Calculate number of available entries.
  117. *
  118. * Each recoverable address range entry is (start address, len,
  119. * recovery address), 2 cells each for start and recovery address,
  120. * 1 cell for len, totalling 5 cells per entry.
  121. */
  122. mc_recoverable_range_len = psize / (sizeof(*prop) * 5);
  123. /* Sanity check */
  124. if (!mc_recoverable_range_len)
  125. return 1;
  126. /* Size required to hold all the entries. */
  127. size = mc_recoverable_range_len *
  128. sizeof(struct mcheck_recoverable_range);
  129. /*
  130. * Allocate a buffer to hold the MC recoverable ranges. We would be
  131. * accessing them in real mode, hence it needs to be within
  132. * RMO region.
  133. */
  134. mc_recoverable_range =__va(memblock_alloc_base(size, __alignof__(u64),
  135. ppc64_rma_size));
  136. memset(mc_recoverable_range, 0, size);
  137. for (i = 0; i < mc_recoverable_range_len; i++) {
  138. mc_recoverable_range[i].start_addr =
  139. of_read_number(prop + (i * 5) + 0, 2);
  140. mc_recoverable_range[i].end_addr =
  141. mc_recoverable_range[i].start_addr +
  142. of_read_number(prop + (i * 5) + 2, 1);
  143. mc_recoverable_range[i].recover_addr =
  144. of_read_number(prop + (i * 5) + 3, 2);
  145. pr_debug("Machine check recoverable range: %llx..%llx: %llx\n",
  146. mc_recoverable_range[i].start_addr,
  147. mc_recoverable_range[i].end_addr,
  148. mc_recoverable_range[i].recover_addr);
  149. }
  150. return 1;
  151. }
  152. static int __init opal_register_exception_handlers(void)
  153. {
  154. #ifdef __BIG_ENDIAN__
  155. u64 glue;
  156. if (!(powerpc_firmware_features & FW_FEATURE_OPAL))
  157. return -ENODEV;
  158. /* Hookup some exception handlers except machine check. We use the
  159. * fwnmi area at 0x7000 to provide the glue space to OPAL
  160. */
  161. glue = 0x7000;
  162. opal_register_exception_handler(OPAL_HYPERVISOR_MAINTENANCE_HANDLER,
  163. 0, glue);
  164. glue += 128;
  165. opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue);
  166. #endif
  167. return 0;
  168. }
  169. early_initcall(opal_register_exception_handlers);
  170. int opal_notifier_register(struct notifier_block *nb)
  171. {
  172. if (!nb) {
  173. pr_warning("%s: Invalid argument (%p)\n",
  174. __func__, nb);
  175. return -EINVAL;
  176. }
  177. atomic_notifier_chain_register(&opal_notifier_head, nb);
  178. return 0;
  179. }
  180. EXPORT_SYMBOL_GPL(opal_notifier_register);
  181. int opal_notifier_unregister(struct notifier_block *nb)
  182. {
  183. if (!nb) {
  184. pr_warning("%s: Invalid argument (%p)\n",
  185. __func__, nb);
  186. return -EINVAL;
  187. }
  188. atomic_notifier_chain_unregister(&opal_notifier_head, nb);
  189. return 0;
  190. }
  191. EXPORT_SYMBOL_GPL(opal_notifier_unregister);
  192. static void opal_do_notifier(uint64_t events)
  193. {
  194. unsigned long flags;
  195. uint64_t changed_mask;
  196. if (atomic_read(&opal_notifier_hold))
  197. return;
  198. spin_lock_irqsave(&opal_notifier_lock, flags);
  199. changed_mask = last_notified_mask ^ events;
  200. last_notified_mask = events;
  201. spin_unlock_irqrestore(&opal_notifier_lock, flags);
  202. /*
  203. * We feed with the event bits and changed bits for
  204. * enough information to the callback.
  205. */
  206. atomic_notifier_call_chain(&opal_notifier_head,
  207. events, (void *)changed_mask);
  208. }
  209. void opal_notifier_update_evt(uint64_t evt_mask,
  210. uint64_t evt_val)
  211. {
  212. unsigned long flags;
  213. spin_lock_irqsave(&opal_notifier_lock, flags);
  214. last_notified_mask &= ~evt_mask;
  215. last_notified_mask |= evt_val;
  216. spin_unlock_irqrestore(&opal_notifier_lock, flags);
  217. }
  218. void opal_notifier_enable(void)
  219. {
  220. int64_t rc;
  221. __be64 evt = 0;
  222. atomic_set(&opal_notifier_hold, 0);
  223. /* Process pending events */
  224. rc = opal_poll_events(&evt);
  225. if (rc == OPAL_SUCCESS && evt)
  226. opal_do_notifier(be64_to_cpu(evt));
  227. }
  228. void opal_notifier_disable(void)
  229. {
  230. atomic_set(&opal_notifier_hold, 1);
  231. }
  232. /*
  233. * Opal message notifier based on message type. Allow subscribers to get
  234. * notified for specific messgae type.
  235. */
  236. int opal_message_notifier_register(enum OpalMessageType msg_type,
  237. struct notifier_block *nb)
  238. {
  239. if (!nb) {
  240. pr_warning("%s: Invalid argument (%p)\n",
  241. __func__, nb);
  242. return -EINVAL;
  243. }
  244. if (msg_type > OPAL_MSG_TYPE_MAX) {
  245. pr_warning("%s: Invalid message type argument (%d)\n",
  246. __func__, msg_type);
  247. return -EINVAL;
  248. }
  249. return atomic_notifier_chain_register(
  250. &opal_msg_notifier_head[msg_type], nb);
  251. }
  252. static void opal_message_do_notify(uint32_t msg_type, void *msg)
  253. {
  254. /* notify subscribers */
  255. atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type],
  256. msg_type, msg);
  257. }
  258. static void opal_handle_message(void)
  259. {
  260. s64 ret;
  261. /*
  262. * TODO: pre-allocate a message buffer depending on opal-msg-size
  263. * value in /proc/device-tree.
  264. */
  265. static struct opal_msg msg;
  266. u32 type;
  267. ret = opal_get_msg(__pa(&msg), sizeof(msg));
  268. /* No opal message pending. */
  269. if (ret == OPAL_RESOURCE)
  270. return;
  271. /* check for errors. */
  272. if (ret) {
  273. pr_warning("%s: Failed to retrive opal message, err=%lld\n",
  274. __func__, ret);
  275. return;
  276. }
  277. type = be32_to_cpu(msg.msg_type);
  278. /* Sanity check */
  279. if (type > OPAL_MSG_TYPE_MAX) {
  280. pr_warning("%s: Unknown message type: %u\n", __func__, type);
  281. return;
  282. }
  283. opal_message_do_notify(type, (void *)&msg);
  284. }
  285. static int opal_message_notify(struct notifier_block *nb,
  286. unsigned long events, void *change)
  287. {
  288. if (events & OPAL_EVENT_MSG_PENDING)
  289. opal_handle_message();
  290. return 0;
  291. }
  292. static struct notifier_block opal_message_nb = {
  293. .notifier_call = opal_message_notify,
  294. .next = NULL,
  295. .priority = 0,
  296. };
  297. static int __init opal_message_init(void)
  298. {
  299. int ret, i;
  300. for (i = 0; i < OPAL_MSG_TYPE_MAX; i++)
  301. ATOMIC_INIT_NOTIFIER_HEAD(&opal_msg_notifier_head[i]);
  302. ret = opal_notifier_register(&opal_message_nb);
  303. if (ret) {
  304. pr_err("%s: Can't register OPAL event notifier (%d)\n",
  305. __func__, ret);
  306. return ret;
  307. }
  308. return 0;
  309. }
  310. early_initcall(opal_message_init);
  311. int opal_get_chars(uint32_t vtermno, char *buf, int count)
  312. {
  313. s64 rc;
  314. __be64 evt, len;
  315. if (!opal.entry)
  316. return -ENODEV;
  317. opal_poll_events(&evt);
  318. if ((be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_INPUT) == 0)
  319. return 0;
  320. len = cpu_to_be64(count);
  321. rc = opal_console_read(vtermno, &len, buf);
  322. if (rc == OPAL_SUCCESS)
  323. return be64_to_cpu(len);
  324. return 0;
  325. }
  326. int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
  327. {
  328. int written = 0;
  329. __be64 olen;
  330. s64 len, rc;
  331. unsigned long flags;
  332. __be64 evt;
  333. if (!opal.entry)
  334. return -ENODEV;
  335. /* We want put_chars to be atomic to avoid mangling of hvsi
  336. * packets. To do that, we first test for room and return
  337. * -EAGAIN if there isn't enough.
  338. *
  339. * Unfortunately, opal_console_write_buffer_space() doesn't
  340. * appear to work on opal v1, so we just assume there is
  341. * enough room and be done with it
  342. */
  343. spin_lock_irqsave(&opal_write_lock, flags);
  344. if (firmware_has_feature(FW_FEATURE_OPALv2)) {
  345. rc = opal_console_write_buffer_space(vtermno, &olen);
  346. len = be64_to_cpu(olen);
  347. if (rc || len < total_len) {
  348. spin_unlock_irqrestore(&opal_write_lock, flags);
  349. /* Closed -> drop characters */
  350. if (rc)
  351. return total_len;
  352. opal_poll_events(NULL);
  353. return -EAGAIN;
  354. }
  355. }
  356. /* We still try to handle partial completions, though they
  357. * should no longer happen.
  358. */
  359. rc = OPAL_BUSY;
  360. while(total_len > 0 && (rc == OPAL_BUSY ||
  361. rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) {
  362. olen = cpu_to_be64(total_len);
  363. rc = opal_console_write(vtermno, &olen, data);
  364. len = be64_to_cpu(olen);
  365. /* Closed or other error drop */
  366. if (rc != OPAL_SUCCESS && rc != OPAL_BUSY &&
  367. rc != OPAL_BUSY_EVENT) {
  368. written = total_len;
  369. break;
  370. }
  371. if (rc == OPAL_SUCCESS) {
  372. total_len -= len;
  373. data += len;
  374. written += len;
  375. }
  376. /* This is a bit nasty but we need that for the console to
  377. * flush when there aren't any interrupts. We will clean
  378. * things a bit later to limit that to synchronous path
  379. * such as the kernel console and xmon/udbg
  380. */
  381. do
  382. opal_poll_events(&evt);
  383. while(rc == OPAL_SUCCESS &&
  384. (be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT));
  385. }
  386. spin_unlock_irqrestore(&opal_write_lock, flags);
  387. return written;
  388. }
  389. static int opal_recover_mce(struct pt_regs *regs,
  390. struct machine_check_event *evt)
  391. {
  392. int recovered = 0;
  393. uint64_t ea = get_mce_fault_addr(evt);
  394. if (!(regs->msr & MSR_RI)) {
  395. /* If MSR_RI isn't set, we cannot recover */
  396. recovered = 0;
  397. } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
  398. /* Platform corrected itself */
  399. recovered = 1;
  400. } else if (ea && !is_kernel_addr(ea)) {
  401. /*
  402. * Faulting address is not in kernel text. We should be fine.
  403. * We need to find which process uses this address.
  404. * For now, kill the task if we have received exception when
  405. * in userspace.
  406. *
  407. * TODO: Queue up this address for hwpoisioning later.
  408. */
  409. if (user_mode(regs) && !is_global_init(current)) {
  410. _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
  411. recovered = 1;
  412. } else
  413. recovered = 0;
  414. } else if (user_mode(regs) && !is_global_init(current) &&
  415. evt->severity == MCE_SEV_ERROR_SYNC) {
  416. /*
  417. * If we have received a synchronous error when in userspace
  418. * kill the task.
  419. */
  420. _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
  421. recovered = 1;
  422. }
  423. return recovered;
  424. }
  425. int opal_machine_check(struct pt_regs *regs)
  426. {
  427. struct machine_check_event evt;
  428. if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
  429. return 0;
  430. /* Print things out */
  431. if (evt.version != MCE_V1) {
  432. pr_err("Machine Check Exception, Unknown event version %d !\n",
  433. evt.version);
  434. return 0;
  435. }
  436. machine_check_print_event_info(&evt);
  437. if (opal_recover_mce(regs, &evt))
  438. return 1;
  439. return 0;
  440. }
  441. static uint64_t find_recovery_address(uint64_t nip)
  442. {
  443. int i;
  444. for (i = 0; i < mc_recoverable_range_len; i++)
  445. if ((nip >= mc_recoverable_range[i].start_addr) &&
  446. (nip < mc_recoverable_range[i].end_addr))
  447. return mc_recoverable_range[i].recover_addr;
  448. return 0;
  449. }
  450. bool opal_mce_check_early_recovery(struct pt_regs *regs)
  451. {
  452. uint64_t recover_addr = 0;
  453. if (!opal.base || !opal.size)
  454. goto out;
  455. if ((regs->nip >= opal.base) &&
  456. (regs->nip <= (opal.base + opal.size)))
  457. recover_addr = find_recovery_address(regs->nip);
  458. /*
  459. * Setup regs->nip to rfi into fixup address.
  460. */
  461. if (recover_addr)
  462. regs->nip = recover_addr;
  463. out:
  464. return !!recover_addr;
  465. }
  466. static irqreturn_t opal_interrupt(int irq, void *data)
  467. {
  468. __be64 events;
  469. opal_handle_interrupt(virq_to_hw(irq), &events);
  470. opal_do_notifier(be64_to_cpu(events));
  471. return IRQ_HANDLED;
  472. }
  473. static int opal_sysfs_init(void)
  474. {
  475. opal_kobj = kobject_create_and_add("opal", firmware_kobj);
  476. if (!opal_kobj) {
  477. pr_warn("kobject_create_and_add opal failed\n");
  478. return -ENOMEM;
  479. }
  480. return 0;
  481. }
  482. static int __init opal_init(void)
  483. {
  484. struct device_node *np, *consoles;
  485. const __be32 *irqs;
  486. int rc, i, irqlen;
  487. opal_node = of_find_node_by_path("/ibm,opal");
  488. if (!opal_node) {
  489. pr_warn("opal: Node not found\n");
  490. return -ENODEV;
  491. }
  492. /* Register OPAL consoles if any ports */
  493. if (firmware_has_feature(FW_FEATURE_OPALv2))
  494. consoles = of_find_node_by_path("/ibm,opal/consoles");
  495. else
  496. consoles = of_node_get(opal_node);
  497. if (consoles) {
  498. for_each_child_of_node(consoles, np) {
  499. if (strcmp(np->name, "serial"))
  500. continue;
  501. of_platform_device_create(np, NULL, NULL);
  502. }
  503. of_node_put(consoles);
  504. }
  505. /* Find all OPAL interrupts and request them */
  506. irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
  507. pr_debug("opal: Found %d interrupts reserved for OPAL\n",
  508. irqs ? (irqlen / 4) : 0);
  509. opal_irq_count = irqlen / 4;
  510. opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL);
  511. for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) {
  512. unsigned int hwirq = be32_to_cpup(irqs);
  513. unsigned int irq = irq_create_mapping(NULL, hwirq);
  514. if (irq == NO_IRQ) {
  515. pr_warning("opal: Failed to map irq 0x%x\n", hwirq);
  516. continue;
  517. }
  518. rc = request_irq(irq, opal_interrupt, 0, "opal", NULL);
  519. if (rc)
  520. pr_warning("opal: Error %d requesting irq %d"
  521. " (0x%x)\n", rc, irq, hwirq);
  522. opal_irqs[i] = irq;
  523. }
  524. /* Create "opal" kobject under /sys/firmware */
  525. rc = opal_sysfs_init();
  526. if (rc == 0) {
  527. /* Setup error log interface */
  528. rc = opal_elog_init();
  529. /* Setup code update interface */
  530. opal_flash_init();
  531. /* Setup platform dump extract interface */
  532. opal_platform_dump_init();
  533. /* Setup system parameters interface */
  534. opal_sys_param_init();
  535. /* Setup message log interface. */
  536. opal_msglog_init();
  537. }
  538. return 0;
  539. }
  540. subsys_initcall(opal_init);
  541. void opal_shutdown(void)
  542. {
  543. unsigned int i;
  544. long rc = OPAL_BUSY;
  545. /* First free interrupts, which will also mask them */
  546. for (i = 0; i < opal_irq_count; i++) {
  547. if (opal_irqs[i])
  548. free_irq(opal_irqs[i], NULL);
  549. opal_irqs[i] = 0;
  550. }
  551. /*
  552. * Then sync with OPAL which ensure anything that can
  553. * potentially write to our memory has completed such
  554. * as an ongoing dump retrieval
  555. */
  556. while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
  557. rc = opal_sync_host_reboot();
  558. if (rc == OPAL_BUSY)
  559. opal_poll_events(NULL);
  560. else
  561. mdelay(10);
  562. }
  563. }
  564. /* Export this so that test modules can use it */
  565. EXPORT_SYMBOL_GPL(opal_invalid_call);
  566. /* Convert a region of vmalloc memory to an opal sg list */
  567. struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
  568. unsigned long vmalloc_size)
  569. {
  570. struct opal_sg_list *sg, *first = NULL;
  571. unsigned long i = 0;
  572. sg = kzalloc(PAGE_SIZE, GFP_KERNEL);
  573. if (!sg)
  574. goto nomem;
  575. first = sg;
  576. while (vmalloc_size > 0) {
  577. uint64_t data = vmalloc_to_pfn(vmalloc_addr) << PAGE_SHIFT;
  578. uint64_t length = min(vmalloc_size, PAGE_SIZE);
  579. sg->entry[i].data = cpu_to_be64(data);
  580. sg->entry[i].length = cpu_to_be64(length);
  581. i++;
  582. if (i >= SG_ENTRIES_PER_NODE) {
  583. struct opal_sg_list *next;
  584. next = kzalloc(PAGE_SIZE, GFP_KERNEL);
  585. if (!next)
  586. goto nomem;
  587. sg->length = cpu_to_be64(
  588. i * sizeof(struct opal_sg_entry) + 16);
  589. i = 0;
  590. sg->next = cpu_to_be64(__pa(next));
  591. sg = next;
  592. }
  593. vmalloc_addr += length;
  594. vmalloc_size -= length;
  595. }
  596. sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16);
  597. return first;
  598. nomem:
  599. pr_err("%s : Failed to allocate memory\n", __func__);
  600. opal_free_sg_list(first);
  601. return NULL;
  602. }
  603. void opal_free_sg_list(struct opal_sg_list *sg)
  604. {
  605. while (sg) {
  606. uint64_t next = be64_to_cpu(sg->next);
  607. kfree(sg);
  608. if (next)
  609. sg = __va(next);
  610. else
  611. sg = NULL;
  612. }
  613. }