opal.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786
  1. /*
  2. * PowerNV OPAL high level interfaces
  3. *
  4. * Copyright 2011 IBM Corp.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #undef DEBUG
  12. #include <linux/types.h>
  13. #include <linux/of.h>
  14. #include <linux/of_fdt.h>
  15. #include <linux/of_platform.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/notifier.h>
  18. #include <linux/slab.h>
  19. #include <linux/sched.h>
  20. #include <linux/kobject.h>
  21. #include <linux/delay.h>
  22. #include <linux/memblock.h>
  23. #include <asm/machdep.h>
  24. #include <asm/opal.h>
  25. #include <asm/firmware.h>
  26. #include <asm/mce.h>
  27. #include "powernv.h"
  28. /* /sys/firmware/opal */
  29. struct kobject *opal_kobj;
  30. struct opal {
  31. u64 base;
  32. u64 entry;
  33. u64 size;
  34. } opal;
  35. struct mcheck_recoverable_range {
  36. u64 start_addr;
  37. u64 end_addr;
  38. u64 recover_addr;
  39. };
  40. static struct mcheck_recoverable_range *mc_recoverable_range;
  41. static int mc_recoverable_range_len;
  42. struct device_node *opal_node;
  43. static DEFINE_SPINLOCK(opal_write_lock);
  44. extern u64 opal_mc_secondary_handler[];
  45. static unsigned int *opal_irqs;
  46. static unsigned int opal_irq_count;
  47. static ATOMIC_NOTIFIER_HEAD(opal_notifier_head);
  48. static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX];
  49. static DEFINE_SPINLOCK(opal_notifier_lock);
  50. static uint64_t last_notified_mask = 0x0ul;
  51. static atomic_t opal_notifier_hold = ATOMIC_INIT(0);
  52. static void opal_reinit_cores(void)
  53. {
  54. /* Do the actual re-init, This will clobber all FPRs, VRs, etc...
  55. *
  56. * It will preserve non volatile GPRs and HSPRG0/1. It will
  57. * also restore HIDs and other SPRs to their original value
  58. * but it might clobber a bunch.
  59. */
  60. #ifdef __BIG_ENDIAN__
  61. opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_BE);
  62. #else
  63. opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_LE);
  64. #endif
  65. }
  66. int __init early_init_dt_scan_opal(unsigned long node,
  67. const char *uname, int depth, void *data)
  68. {
  69. const void *basep, *entryp, *sizep;
  70. int basesz, entrysz, runtimesz;
  71. if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
  72. return 0;
  73. basep = of_get_flat_dt_prop(node, "opal-base-address", &basesz);
  74. entryp = of_get_flat_dt_prop(node, "opal-entry-address", &entrysz);
  75. sizep = of_get_flat_dt_prop(node, "opal-runtime-size", &runtimesz);
  76. if (!basep || !entryp || !sizep)
  77. return 1;
  78. opal.base = of_read_number(basep, basesz/4);
  79. opal.entry = of_read_number(entryp, entrysz/4);
  80. opal.size = of_read_number(sizep, runtimesz/4);
  81. pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%d)\n",
  82. opal.base, basep, basesz);
  83. pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%d)\n",
  84. opal.entry, entryp, entrysz);
  85. pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%d)\n",
  86. opal.size, sizep, runtimesz);
  87. powerpc_firmware_features |= FW_FEATURE_OPAL;
  88. if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
  89. powerpc_firmware_features |= FW_FEATURE_OPALv2;
  90. powerpc_firmware_features |= FW_FEATURE_OPALv3;
  91. pr_info("OPAL V3 detected !\n");
  92. } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
  93. powerpc_firmware_features |= FW_FEATURE_OPALv2;
  94. pr_info("OPAL V2 detected !\n");
  95. } else {
  96. pr_info("OPAL V1 detected !\n");
  97. }
  98. /* Reinit all cores with the right endian */
  99. opal_reinit_cores();
  100. /* Restore some bits */
  101. if (cur_cpu_spec->cpu_restore)
  102. cur_cpu_spec->cpu_restore();
  103. return 1;
  104. }
  105. int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
  106. const char *uname, int depth, void *data)
  107. {
  108. int i, psize, size;
  109. const __be32 *prop;
  110. if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
  111. return 0;
  112. prop = of_get_flat_dt_prop(node, "mcheck-recoverable-ranges", &psize);
  113. if (!prop)
  114. return 1;
  115. pr_debug("Found machine check recoverable ranges.\n");
  116. /*
  117. * Calculate number of available entries.
  118. *
  119. * Each recoverable address range entry is (start address, len,
  120. * recovery address), 2 cells each for start and recovery address,
  121. * 1 cell for len, totalling 5 cells per entry.
  122. */
  123. mc_recoverable_range_len = psize / (sizeof(*prop) * 5);
  124. /* Sanity check */
  125. if (!mc_recoverable_range_len)
  126. return 1;
  127. /* Size required to hold all the entries. */
  128. size = mc_recoverable_range_len *
  129. sizeof(struct mcheck_recoverable_range);
  130. /*
  131. * Allocate a buffer to hold the MC recoverable ranges. We would be
  132. * accessing them in real mode, hence it needs to be within
  133. * RMO region.
  134. */
  135. mc_recoverable_range =__va(memblock_alloc_base(size, __alignof__(u64),
  136. ppc64_rma_size));
  137. memset(mc_recoverable_range, 0, size);
  138. for (i = 0; i < mc_recoverable_range_len; i++) {
  139. mc_recoverable_range[i].start_addr =
  140. of_read_number(prop + (i * 5) + 0, 2);
  141. mc_recoverable_range[i].end_addr =
  142. mc_recoverable_range[i].start_addr +
  143. of_read_number(prop + (i * 5) + 2, 1);
  144. mc_recoverable_range[i].recover_addr =
  145. of_read_number(prop + (i * 5) + 3, 2);
  146. pr_debug("Machine check recoverable range: %llx..%llx: %llx\n",
  147. mc_recoverable_range[i].start_addr,
  148. mc_recoverable_range[i].end_addr,
  149. mc_recoverable_range[i].recover_addr);
  150. }
  151. return 1;
  152. }
  153. static int __init opal_register_exception_handlers(void)
  154. {
  155. #ifdef __BIG_ENDIAN__
  156. u64 glue;
  157. if (!(powerpc_firmware_features & FW_FEATURE_OPAL))
  158. return -ENODEV;
  159. /* Hookup some exception handlers except machine check. We use the
  160. * fwnmi area at 0x7000 to provide the glue space to OPAL
  161. */
  162. glue = 0x7000;
  163. opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue);
  164. #endif
  165. return 0;
  166. }
  167. machine_early_initcall(powernv, opal_register_exception_handlers);
  168. int opal_notifier_register(struct notifier_block *nb)
  169. {
  170. if (!nb) {
  171. pr_warning("%s: Invalid argument (%p)\n",
  172. __func__, nb);
  173. return -EINVAL;
  174. }
  175. atomic_notifier_chain_register(&opal_notifier_head, nb);
  176. return 0;
  177. }
  178. EXPORT_SYMBOL_GPL(opal_notifier_register);
  179. int opal_notifier_unregister(struct notifier_block *nb)
  180. {
  181. if (!nb) {
  182. pr_warning("%s: Invalid argument (%p)\n",
  183. __func__, nb);
  184. return -EINVAL;
  185. }
  186. atomic_notifier_chain_unregister(&opal_notifier_head, nb);
  187. return 0;
  188. }
  189. EXPORT_SYMBOL_GPL(opal_notifier_unregister);
  190. static void opal_do_notifier(uint64_t events)
  191. {
  192. unsigned long flags;
  193. uint64_t changed_mask;
  194. if (atomic_read(&opal_notifier_hold))
  195. return;
  196. spin_lock_irqsave(&opal_notifier_lock, flags);
  197. changed_mask = last_notified_mask ^ events;
  198. last_notified_mask = events;
  199. spin_unlock_irqrestore(&opal_notifier_lock, flags);
  200. /*
  201. * We feed with the event bits and changed bits for
  202. * enough information to the callback.
  203. */
  204. atomic_notifier_call_chain(&opal_notifier_head,
  205. events, (void *)changed_mask);
  206. }
  207. void opal_notifier_update_evt(uint64_t evt_mask,
  208. uint64_t evt_val)
  209. {
  210. unsigned long flags;
  211. spin_lock_irqsave(&opal_notifier_lock, flags);
  212. last_notified_mask &= ~evt_mask;
  213. last_notified_mask |= evt_val;
  214. spin_unlock_irqrestore(&opal_notifier_lock, flags);
  215. }
  216. void opal_notifier_enable(void)
  217. {
  218. int64_t rc;
  219. __be64 evt = 0;
  220. atomic_set(&opal_notifier_hold, 0);
  221. /* Process pending events */
  222. rc = opal_poll_events(&evt);
  223. if (rc == OPAL_SUCCESS && evt)
  224. opal_do_notifier(be64_to_cpu(evt));
  225. }
  226. void opal_notifier_disable(void)
  227. {
  228. atomic_set(&opal_notifier_hold, 1);
  229. }
  230. /*
  231. * Opal message notifier based on message type. Allow subscribers to get
  232. * notified for specific messgae type.
  233. */
  234. int opal_message_notifier_register(enum OpalMessageType msg_type,
  235. struct notifier_block *nb)
  236. {
  237. if (!nb) {
  238. pr_warning("%s: Invalid argument (%p)\n",
  239. __func__, nb);
  240. return -EINVAL;
  241. }
  242. if (msg_type > OPAL_MSG_TYPE_MAX) {
  243. pr_warning("%s: Invalid message type argument (%d)\n",
  244. __func__, msg_type);
  245. return -EINVAL;
  246. }
  247. return atomic_notifier_chain_register(
  248. &opal_msg_notifier_head[msg_type], nb);
  249. }
  250. static void opal_message_do_notify(uint32_t msg_type, void *msg)
  251. {
  252. /* notify subscribers */
  253. atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type],
  254. msg_type, msg);
  255. }
  256. static void opal_handle_message(void)
  257. {
  258. s64 ret;
  259. /*
  260. * TODO: pre-allocate a message buffer depending on opal-msg-size
  261. * value in /proc/device-tree.
  262. */
  263. static struct opal_msg msg;
  264. u32 type;
  265. ret = opal_get_msg(__pa(&msg), sizeof(msg));
  266. /* No opal message pending. */
  267. if (ret == OPAL_RESOURCE)
  268. return;
  269. /* check for errors. */
  270. if (ret) {
  271. pr_warning("%s: Failed to retrieve opal message, err=%lld\n",
  272. __func__, ret);
  273. return;
  274. }
  275. type = be32_to_cpu(msg.msg_type);
  276. /* Sanity check */
  277. if (type > OPAL_MSG_TYPE_MAX) {
  278. pr_warning("%s: Unknown message type: %u\n", __func__, type);
  279. return;
  280. }
  281. opal_message_do_notify(type, (void *)&msg);
  282. }
  283. static int opal_message_notify(struct notifier_block *nb,
  284. unsigned long events, void *change)
  285. {
  286. if (events & OPAL_EVENT_MSG_PENDING)
  287. opal_handle_message();
  288. return 0;
  289. }
  290. static struct notifier_block opal_message_nb = {
  291. .notifier_call = opal_message_notify,
  292. .next = NULL,
  293. .priority = 0,
  294. };
  295. static int __init opal_message_init(void)
  296. {
  297. int ret, i;
  298. for (i = 0; i < OPAL_MSG_TYPE_MAX; i++)
  299. ATOMIC_INIT_NOTIFIER_HEAD(&opal_msg_notifier_head[i]);
  300. ret = opal_notifier_register(&opal_message_nb);
  301. if (ret) {
  302. pr_err("%s: Can't register OPAL event notifier (%d)\n",
  303. __func__, ret);
  304. return ret;
  305. }
  306. return 0;
  307. }
  308. machine_early_initcall(powernv, opal_message_init);
  309. int opal_get_chars(uint32_t vtermno, char *buf, int count)
  310. {
  311. s64 rc;
  312. __be64 evt, len;
  313. if (!opal.entry)
  314. return -ENODEV;
  315. opal_poll_events(&evt);
  316. if ((be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_INPUT) == 0)
  317. return 0;
  318. len = cpu_to_be64(count);
  319. rc = opal_console_read(vtermno, &len, buf);
  320. if (rc == OPAL_SUCCESS)
  321. return be64_to_cpu(len);
  322. return 0;
  323. }
  324. int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
  325. {
  326. int written = 0;
  327. __be64 olen;
  328. s64 len, rc;
  329. unsigned long flags;
  330. __be64 evt;
  331. if (!opal.entry)
  332. return -ENODEV;
  333. /* We want put_chars to be atomic to avoid mangling of hvsi
  334. * packets. To do that, we first test for room and return
  335. * -EAGAIN if there isn't enough.
  336. *
  337. * Unfortunately, opal_console_write_buffer_space() doesn't
  338. * appear to work on opal v1, so we just assume there is
  339. * enough room and be done with it
  340. */
  341. spin_lock_irqsave(&opal_write_lock, flags);
  342. if (firmware_has_feature(FW_FEATURE_OPALv2)) {
  343. rc = opal_console_write_buffer_space(vtermno, &olen);
  344. len = be64_to_cpu(olen);
  345. if (rc || len < total_len) {
  346. spin_unlock_irqrestore(&opal_write_lock, flags);
  347. /* Closed -> drop characters */
  348. if (rc)
  349. return total_len;
  350. opal_poll_events(NULL);
  351. return -EAGAIN;
  352. }
  353. }
  354. /* We still try to handle partial completions, though they
  355. * should no longer happen.
  356. */
  357. rc = OPAL_BUSY;
  358. while(total_len > 0 && (rc == OPAL_BUSY ||
  359. rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) {
  360. olen = cpu_to_be64(total_len);
  361. rc = opal_console_write(vtermno, &olen, data);
  362. len = be64_to_cpu(olen);
  363. /* Closed or other error drop */
  364. if (rc != OPAL_SUCCESS && rc != OPAL_BUSY &&
  365. rc != OPAL_BUSY_EVENT) {
  366. written = total_len;
  367. break;
  368. }
  369. if (rc == OPAL_SUCCESS) {
  370. total_len -= len;
  371. data += len;
  372. written += len;
  373. }
  374. /* This is a bit nasty but we need that for the console to
  375. * flush when there aren't any interrupts. We will clean
  376. * things a bit later to limit that to synchronous path
  377. * such as the kernel console and xmon/udbg
  378. */
  379. do
  380. opal_poll_events(&evt);
  381. while(rc == OPAL_SUCCESS &&
  382. (be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT));
  383. }
  384. spin_unlock_irqrestore(&opal_write_lock, flags);
  385. return written;
  386. }
  387. static int opal_recover_mce(struct pt_regs *regs,
  388. struct machine_check_event *evt)
  389. {
  390. int recovered = 0;
  391. uint64_t ea = get_mce_fault_addr(evt);
  392. if (!(regs->msr & MSR_RI)) {
  393. /* If MSR_RI isn't set, we cannot recover */
  394. recovered = 0;
  395. } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
  396. /* Platform corrected itself */
  397. recovered = 1;
  398. } else if (ea && !is_kernel_addr(ea)) {
  399. /*
  400. * Faulting address is not in kernel text. We should be fine.
  401. * We need to find which process uses this address.
  402. * For now, kill the task if we have received exception when
  403. * in userspace.
  404. *
  405. * TODO: Queue up this address for hwpoisioning later.
  406. */
  407. if (user_mode(regs) && !is_global_init(current)) {
  408. _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
  409. recovered = 1;
  410. } else
  411. recovered = 0;
  412. } else if (user_mode(regs) && !is_global_init(current) &&
  413. evt->severity == MCE_SEV_ERROR_SYNC) {
  414. /*
  415. * If we have received a synchronous error when in userspace
  416. * kill the task.
  417. */
  418. _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
  419. recovered = 1;
  420. }
  421. return recovered;
  422. }
  423. int opal_machine_check(struct pt_regs *regs)
  424. {
  425. struct machine_check_event evt;
  426. if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
  427. return 0;
  428. /* Print things out */
  429. if (evt.version != MCE_V1) {
  430. pr_err("Machine Check Exception, Unknown event version %d !\n",
  431. evt.version);
  432. return 0;
  433. }
  434. machine_check_print_event_info(&evt);
  435. if (opal_recover_mce(regs, &evt))
  436. return 1;
  437. return 0;
  438. }
  439. /* Early hmi handler called in real mode. */
  440. int opal_hmi_exception_early(struct pt_regs *regs)
  441. {
  442. s64 rc;
  443. /*
  444. * call opal hmi handler. Pass paca address as token.
  445. * The return value OPAL_SUCCESS is an indication that there is
  446. * an HMI event generated waiting to pull by Linux.
  447. */
  448. rc = opal_handle_hmi();
  449. if (rc == OPAL_SUCCESS) {
  450. local_paca->hmi_event_available = 1;
  451. return 1;
  452. }
  453. return 0;
  454. }
  455. /* HMI exception handler called in virtual mode during check_irq_replay. */
  456. int opal_handle_hmi_exception(struct pt_regs *regs)
  457. {
  458. s64 rc;
  459. __be64 evt = 0;
  460. /*
  461. * Check if HMI event is available.
  462. * if Yes, then call opal_poll_events to pull opal messages and
  463. * process them.
  464. */
  465. if (!local_paca->hmi_event_available)
  466. return 0;
  467. local_paca->hmi_event_available = 0;
  468. rc = opal_poll_events(&evt);
  469. if (rc == OPAL_SUCCESS && evt)
  470. opal_do_notifier(be64_to_cpu(evt));
  471. return 1;
  472. }
  473. static uint64_t find_recovery_address(uint64_t nip)
  474. {
  475. int i;
  476. for (i = 0; i < mc_recoverable_range_len; i++)
  477. if ((nip >= mc_recoverable_range[i].start_addr) &&
  478. (nip < mc_recoverable_range[i].end_addr))
  479. return mc_recoverable_range[i].recover_addr;
  480. return 0;
  481. }
  482. bool opal_mce_check_early_recovery(struct pt_regs *regs)
  483. {
  484. uint64_t recover_addr = 0;
  485. if (!opal.base || !opal.size)
  486. goto out;
  487. if ((regs->nip >= opal.base) &&
  488. (regs->nip <= (opal.base + opal.size)))
  489. recover_addr = find_recovery_address(regs->nip);
  490. /*
  491. * Setup regs->nip to rfi into fixup address.
  492. */
  493. if (recover_addr)
  494. regs->nip = recover_addr;
  495. out:
  496. return !!recover_addr;
  497. }
  498. static irqreturn_t opal_interrupt(int irq, void *data)
  499. {
  500. __be64 events;
  501. opal_handle_interrupt(virq_to_hw(irq), &events);
  502. opal_do_notifier(be64_to_cpu(events));
  503. return IRQ_HANDLED;
  504. }
  505. static int opal_sysfs_init(void)
  506. {
  507. opal_kobj = kobject_create_and_add("opal", firmware_kobj);
  508. if (!opal_kobj) {
  509. pr_warn("kobject_create_and_add opal failed\n");
  510. return -ENOMEM;
  511. }
  512. return 0;
  513. }
  514. static void __init opal_dump_region_init(void)
  515. {
  516. void *addr;
  517. uint64_t size;
  518. int rc;
  519. /* Register kernel log buffer */
  520. addr = log_buf_addr_get();
  521. size = log_buf_len_get();
  522. rc = opal_register_dump_region(OPAL_DUMP_REGION_LOG_BUF,
  523. __pa(addr), size);
  524. /* Don't warn if this is just an older OPAL that doesn't
  525. * know about that call
  526. */
  527. if (rc && rc != OPAL_UNSUPPORTED)
  528. pr_warn("DUMP: Failed to register kernel log buffer. "
  529. "rc = %d\n", rc);
  530. }
  531. static int __init opal_init(void)
  532. {
  533. struct device_node *np, *consoles;
  534. const __be32 *irqs;
  535. int rc, i, irqlen;
  536. opal_node = of_find_node_by_path("/ibm,opal");
  537. if (!opal_node) {
  538. pr_warn("opal: Node not found\n");
  539. return -ENODEV;
  540. }
  541. /* Register OPAL consoles if any ports */
  542. if (firmware_has_feature(FW_FEATURE_OPALv2))
  543. consoles = of_find_node_by_path("/ibm,opal/consoles");
  544. else
  545. consoles = of_node_get(opal_node);
  546. if (consoles) {
  547. for_each_child_of_node(consoles, np) {
  548. if (strcmp(np->name, "serial"))
  549. continue;
  550. of_platform_device_create(np, NULL, NULL);
  551. }
  552. of_node_put(consoles);
  553. }
  554. /* Find all OPAL interrupts and request them */
  555. irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
  556. pr_debug("opal: Found %d interrupts reserved for OPAL\n",
  557. irqs ? (irqlen / 4) : 0);
  558. opal_irq_count = irqlen / 4;
  559. opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL);
  560. for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) {
  561. unsigned int hwirq = be32_to_cpup(irqs);
  562. unsigned int irq = irq_create_mapping(NULL, hwirq);
  563. if (irq == NO_IRQ) {
  564. pr_warning("opal: Failed to map irq 0x%x\n", hwirq);
  565. continue;
  566. }
  567. rc = request_irq(irq, opal_interrupt, 0, "opal", NULL);
  568. if (rc)
  569. pr_warning("opal: Error %d requesting irq %d"
  570. " (0x%x)\n", rc, irq, hwirq);
  571. opal_irqs[i] = irq;
  572. }
  573. /* Create "opal" kobject under /sys/firmware */
  574. rc = opal_sysfs_init();
  575. if (rc == 0) {
  576. /* Setup dump region interface */
  577. opal_dump_region_init();
  578. /* Setup error log interface */
  579. rc = opal_elog_init();
  580. /* Setup code update interface */
  581. opal_flash_init();
  582. /* Setup platform dump extract interface */
  583. opal_platform_dump_init();
  584. /* Setup system parameters interface */
  585. opal_sys_param_init();
  586. /* Setup message log interface. */
  587. opal_msglog_init();
  588. }
  589. return 0;
  590. }
  591. machine_subsys_initcall(powernv, opal_init);
  592. void opal_shutdown(void)
  593. {
  594. unsigned int i;
  595. long rc = OPAL_BUSY;
  596. /* First free interrupts, which will also mask them */
  597. for (i = 0; i < opal_irq_count; i++) {
  598. if (opal_irqs[i])
  599. free_irq(opal_irqs[i], NULL);
  600. opal_irqs[i] = 0;
  601. }
  602. /*
  603. * Then sync with OPAL which ensure anything that can
  604. * potentially write to our memory has completed such
  605. * as an ongoing dump retrieval
  606. */
  607. while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
  608. rc = opal_sync_host_reboot();
  609. if (rc == OPAL_BUSY)
  610. opal_poll_events(NULL);
  611. else
  612. mdelay(10);
  613. }
  614. /* Unregister memory dump region */
  615. opal_unregister_dump_region(OPAL_DUMP_REGION_LOG_BUF);
  616. }
  617. /* Export this so that test modules can use it */
  618. EXPORT_SYMBOL_GPL(opal_invalid_call);
  619. /* Convert a region of vmalloc memory to an opal sg list */
  620. struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
  621. unsigned long vmalloc_size)
  622. {
  623. struct opal_sg_list *sg, *first = NULL;
  624. unsigned long i = 0;
  625. sg = kzalloc(PAGE_SIZE, GFP_KERNEL);
  626. if (!sg)
  627. goto nomem;
  628. first = sg;
  629. while (vmalloc_size > 0) {
  630. uint64_t data = vmalloc_to_pfn(vmalloc_addr) << PAGE_SHIFT;
  631. uint64_t length = min(vmalloc_size, PAGE_SIZE);
  632. sg->entry[i].data = cpu_to_be64(data);
  633. sg->entry[i].length = cpu_to_be64(length);
  634. i++;
  635. if (i >= SG_ENTRIES_PER_NODE) {
  636. struct opal_sg_list *next;
  637. next = kzalloc(PAGE_SIZE, GFP_KERNEL);
  638. if (!next)
  639. goto nomem;
  640. sg->length = cpu_to_be64(
  641. i * sizeof(struct opal_sg_entry) + 16);
  642. i = 0;
  643. sg->next = cpu_to_be64(__pa(next));
  644. sg = next;
  645. }
  646. vmalloc_addr += length;
  647. vmalloc_size -= length;
  648. }
  649. sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16);
  650. return first;
  651. nomem:
  652. pr_err("%s : Failed to allocate memory\n", __func__);
  653. opal_free_sg_list(first);
  654. return NULL;
  655. }
  656. void opal_free_sg_list(struct opal_sg_list *sg)
  657. {
  658. while (sg) {
  659. uint64_t next = be64_to_cpu(sg->next);
  660. kfree(sg);
  661. if (next)
  662. sg = __va(next);
  663. else
  664. sg = NULL;
  665. }
  666. }