opal.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874
  1. /*
  2. * PowerNV OPAL high level interfaces
  3. *
  4. * Copyright 2011 IBM Corp.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define pr_fmt(fmt) "opal: " fmt
  12. #include <linux/printk.h>
  13. #include <linux/types.h>
  14. #include <linux/of.h>
  15. #include <linux/of_fdt.h>
  16. #include <linux/of_platform.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/notifier.h>
  19. #include <linux/slab.h>
  20. #include <linux/sched.h>
  21. #include <linux/kobject.h>
  22. #include <linux/delay.h>
  23. #include <linux/memblock.h>
  24. #include <asm/machdep.h>
  25. #include <asm/opal.h>
  26. #include <asm/firmware.h>
  27. #include <asm/mce.h>
  28. #include "powernv.h"
  29. /* /sys/firmware/opal */
  30. struct kobject *opal_kobj;
  31. struct opal {
  32. u64 base;
  33. u64 entry;
  34. u64 size;
  35. } opal;
  36. struct mcheck_recoverable_range {
  37. u64 start_addr;
  38. u64 end_addr;
  39. u64 recover_addr;
  40. };
  41. static struct mcheck_recoverable_range *mc_recoverable_range;
  42. static int mc_recoverable_range_len;
  43. struct device_node *opal_node;
  44. static DEFINE_SPINLOCK(opal_write_lock);
  45. static unsigned int *opal_irqs;
  46. static unsigned int opal_irq_count;
  47. static ATOMIC_NOTIFIER_HEAD(opal_notifier_head);
  48. static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX];
  49. static DEFINE_SPINLOCK(opal_notifier_lock);
  50. static uint64_t last_notified_mask = 0x0ul;
  51. static atomic_t opal_notifier_hold = ATOMIC_INIT(0);
  52. static void opal_reinit_cores(void)
  53. {
  54. /* Do the actual re-init, This will clobber all FPRs, VRs, etc...
  55. *
  56. * It will preserve non volatile GPRs and HSPRG0/1. It will
  57. * also restore HIDs and other SPRs to their original value
  58. * but it might clobber a bunch.
  59. */
  60. #ifdef __BIG_ENDIAN__
  61. opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_BE);
  62. #else
  63. opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_LE);
  64. #endif
  65. }
  66. int __init early_init_dt_scan_opal(unsigned long node,
  67. const char *uname, int depth, void *data)
  68. {
  69. const void *basep, *entryp, *sizep;
  70. int basesz, entrysz, runtimesz;
  71. if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
  72. return 0;
  73. basep = of_get_flat_dt_prop(node, "opal-base-address", &basesz);
  74. entryp = of_get_flat_dt_prop(node, "opal-entry-address", &entrysz);
  75. sizep = of_get_flat_dt_prop(node, "opal-runtime-size", &runtimesz);
  76. if (!basep || !entryp || !sizep)
  77. return 1;
  78. opal.base = of_read_number(basep, basesz/4);
  79. opal.entry = of_read_number(entryp, entrysz/4);
  80. opal.size = of_read_number(sizep, runtimesz/4);
  81. pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%d)\n",
  82. opal.base, basep, basesz);
  83. pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%d)\n",
  84. opal.entry, entryp, entrysz);
  85. pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%d)\n",
  86. opal.size, sizep, runtimesz);
  87. powerpc_firmware_features |= FW_FEATURE_OPAL;
  88. if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
  89. powerpc_firmware_features |= FW_FEATURE_OPALv2;
  90. powerpc_firmware_features |= FW_FEATURE_OPALv3;
  91. pr_info("OPAL V3 detected !\n");
  92. } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
  93. powerpc_firmware_features |= FW_FEATURE_OPALv2;
  94. pr_info("OPAL V2 detected !\n");
  95. } else {
  96. pr_info("OPAL V1 detected !\n");
  97. }
  98. /* Reinit all cores with the right endian */
  99. opal_reinit_cores();
  100. /* Restore some bits */
  101. if (cur_cpu_spec->cpu_restore)
  102. cur_cpu_spec->cpu_restore();
  103. return 1;
  104. }
  105. int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
  106. const char *uname, int depth, void *data)
  107. {
  108. int i, psize, size;
  109. const __be32 *prop;
  110. if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
  111. return 0;
  112. prop = of_get_flat_dt_prop(node, "mcheck-recoverable-ranges", &psize);
  113. if (!prop)
  114. return 1;
  115. pr_debug("Found machine check recoverable ranges.\n");
  116. /*
  117. * Calculate number of available entries.
  118. *
  119. * Each recoverable address range entry is (start address, len,
  120. * recovery address), 2 cells each for start and recovery address,
  121. * 1 cell for len, totalling 5 cells per entry.
  122. */
  123. mc_recoverable_range_len = psize / (sizeof(*prop) * 5);
  124. /* Sanity check */
  125. if (!mc_recoverable_range_len)
  126. return 1;
  127. /* Size required to hold all the entries. */
  128. size = mc_recoverable_range_len *
  129. sizeof(struct mcheck_recoverable_range);
  130. /*
  131. * Allocate a buffer to hold the MC recoverable ranges. We would be
  132. * accessing them in real mode, hence it needs to be within
  133. * RMO region.
  134. */
  135. mc_recoverable_range =__va(memblock_alloc_base(size, __alignof__(u64),
  136. ppc64_rma_size));
  137. memset(mc_recoverable_range, 0, size);
  138. for (i = 0; i < mc_recoverable_range_len; i++) {
  139. mc_recoverable_range[i].start_addr =
  140. of_read_number(prop + (i * 5) + 0, 2);
  141. mc_recoverable_range[i].end_addr =
  142. mc_recoverable_range[i].start_addr +
  143. of_read_number(prop + (i * 5) + 2, 1);
  144. mc_recoverable_range[i].recover_addr =
  145. of_read_number(prop + (i * 5) + 3, 2);
  146. pr_debug("Machine check recoverable range: %llx..%llx: %llx\n",
  147. mc_recoverable_range[i].start_addr,
  148. mc_recoverable_range[i].end_addr,
  149. mc_recoverable_range[i].recover_addr);
  150. }
  151. return 1;
  152. }
  153. static int __init opal_register_exception_handlers(void)
  154. {
  155. #ifdef __BIG_ENDIAN__
  156. u64 glue;
  157. if (!(powerpc_firmware_features & FW_FEATURE_OPAL))
  158. return -ENODEV;
  159. /* Hookup some exception handlers except machine check. We use the
  160. * fwnmi area at 0x7000 to provide the glue space to OPAL
  161. */
  162. glue = 0x7000;
  163. /*
  164. * Check if we are running on newer firmware that exports
  165. * OPAL_HANDLE_HMI token. If yes, then don't ask OPAL to patch
  166. * the HMI interrupt and we catch it directly in Linux.
  167. *
  168. * For older firmware (i.e currently released POWER8 System Firmware
  169. * as of today <= SV810_087), we fallback to old behavior and let OPAL
  170. * patch the HMI vector and handle it inside OPAL firmware.
  171. *
  172. * For newer firmware (in development/yet to be released) we will
  173. * start catching/handling HMI directly in Linux.
  174. */
  175. if (!opal_check_token(OPAL_HANDLE_HMI)) {
  176. pr_info("opal: Old firmware detected, OPAL handles HMIs.\n");
  177. opal_register_exception_handler(
  178. OPAL_HYPERVISOR_MAINTENANCE_HANDLER,
  179. 0, glue);
  180. glue += 128;
  181. }
  182. opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue);
  183. #endif
  184. return 0;
  185. }
  186. machine_early_initcall(powernv, opal_register_exception_handlers);
  187. int opal_notifier_register(struct notifier_block *nb)
  188. {
  189. if (!nb) {
  190. pr_warning("%s: Invalid argument (%p)\n",
  191. __func__, nb);
  192. return -EINVAL;
  193. }
  194. atomic_notifier_chain_register(&opal_notifier_head, nb);
  195. return 0;
  196. }
  197. EXPORT_SYMBOL_GPL(opal_notifier_register);
  198. int opal_notifier_unregister(struct notifier_block *nb)
  199. {
  200. if (!nb) {
  201. pr_warning("%s: Invalid argument (%p)\n",
  202. __func__, nb);
  203. return -EINVAL;
  204. }
  205. atomic_notifier_chain_unregister(&opal_notifier_head, nb);
  206. return 0;
  207. }
  208. EXPORT_SYMBOL_GPL(opal_notifier_unregister);
  209. static void opal_do_notifier(uint64_t events)
  210. {
  211. unsigned long flags;
  212. uint64_t changed_mask;
  213. if (atomic_read(&opal_notifier_hold))
  214. return;
  215. spin_lock_irqsave(&opal_notifier_lock, flags);
  216. changed_mask = last_notified_mask ^ events;
  217. last_notified_mask = events;
  218. spin_unlock_irqrestore(&opal_notifier_lock, flags);
  219. /*
  220. * We feed with the event bits and changed bits for
  221. * enough information to the callback.
  222. */
  223. atomic_notifier_call_chain(&opal_notifier_head,
  224. events, (void *)changed_mask);
  225. }
  226. void opal_notifier_update_evt(uint64_t evt_mask,
  227. uint64_t evt_val)
  228. {
  229. unsigned long flags;
  230. spin_lock_irqsave(&opal_notifier_lock, flags);
  231. last_notified_mask &= ~evt_mask;
  232. last_notified_mask |= evt_val;
  233. spin_unlock_irqrestore(&opal_notifier_lock, flags);
  234. }
  235. void opal_notifier_enable(void)
  236. {
  237. int64_t rc;
  238. __be64 evt = 0;
  239. atomic_set(&opal_notifier_hold, 0);
  240. /* Process pending events */
  241. rc = opal_poll_events(&evt);
  242. if (rc == OPAL_SUCCESS && evt)
  243. opal_do_notifier(be64_to_cpu(evt));
  244. }
  245. void opal_notifier_disable(void)
  246. {
  247. atomic_set(&opal_notifier_hold, 1);
  248. }
  249. /*
  250. * Opal message notifier based on message type. Allow subscribers to get
  251. * notified for specific messgae type.
  252. */
  253. int opal_message_notifier_register(enum OpalMessageType msg_type,
  254. struct notifier_block *nb)
  255. {
  256. if (!nb) {
  257. pr_warning("%s: Invalid argument (%p)\n",
  258. __func__, nb);
  259. return -EINVAL;
  260. }
  261. if (msg_type > OPAL_MSG_TYPE_MAX) {
  262. pr_warning("%s: Invalid message type argument (%d)\n",
  263. __func__, msg_type);
  264. return -EINVAL;
  265. }
  266. return atomic_notifier_chain_register(
  267. &opal_msg_notifier_head[msg_type], nb);
  268. }
  269. static void opal_message_do_notify(uint32_t msg_type, void *msg)
  270. {
  271. /* notify subscribers */
  272. atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type],
  273. msg_type, msg);
  274. }
  275. static void opal_handle_message(void)
  276. {
  277. s64 ret;
  278. /*
  279. * TODO: pre-allocate a message buffer depending on opal-msg-size
  280. * value in /proc/device-tree.
  281. */
  282. static struct opal_msg msg;
  283. u32 type;
  284. ret = opal_get_msg(__pa(&msg), sizeof(msg));
  285. /* No opal message pending. */
  286. if (ret == OPAL_RESOURCE)
  287. return;
  288. /* check for errors. */
  289. if (ret) {
  290. pr_warning("%s: Failed to retrieve opal message, err=%lld\n",
  291. __func__, ret);
  292. return;
  293. }
  294. type = be32_to_cpu(msg.msg_type);
  295. /* Sanity check */
  296. if (type > OPAL_MSG_TYPE_MAX) {
  297. pr_warning("%s: Unknown message type: %u\n", __func__, type);
  298. return;
  299. }
  300. opal_message_do_notify(type, (void *)&msg);
  301. }
  302. static int opal_message_notify(struct notifier_block *nb,
  303. unsigned long events, void *change)
  304. {
  305. if (events & OPAL_EVENT_MSG_PENDING)
  306. opal_handle_message();
  307. return 0;
  308. }
  309. static struct notifier_block opal_message_nb = {
  310. .notifier_call = opal_message_notify,
  311. .next = NULL,
  312. .priority = 0,
  313. };
  314. static int __init opal_message_init(void)
  315. {
  316. int ret, i;
  317. for (i = 0; i < OPAL_MSG_TYPE_MAX; i++)
  318. ATOMIC_INIT_NOTIFIER_HEAD(&opal_msg_notifier_head[i]);
  319. ret = opal_notifier_register(&opal_message_nb);
  320. if (ret) {
  321. pr_err("%s: Can't register OPAL event notifier (%d)\n",
  322. __func__, ret);
  323. return ret;
  324. }
  325. return 0;
  326. }
  327. machine_early_initcall(powernv, opal_message_init);
  328. int opal_get_chars(uint32_t vtermno, char *buf, int count)
  329. {
  330. s64 rc;
  331. __be64 evt, len;
  332. if (!opal.entry)
  333. return -ENODEV;
  334. opal_poll_events(&evt);
  335. if ((be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_INPUT) == 0)
  336. return 0;
  337. len = cpu_to_be64(count);
  338. rc = opal_console_read(vtermno, &len, buf);
  339. if (rc == OPAL_SUCCESS)
  340. return be64_to_cpu(len);
  341. return 0;
  342. }
  343. int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
  344. {
  345. int written = 0;
  346. __be64 olen;
  347. s64 len, rc;
  348. unsigned long flags;
  349. __be64 evt;
  350. if (!opal.entry)
  351. return -ENODEV;
  352. /* We want put_chars to be atomic to avoid mangling of hvsi
  353. * packets. To do that, we first test for room and return
  354. * -EAGAIN if there isn't enough.
  355. *
  356. * Unfortunately, opal_console_write_buffer_space() doesn't
  357. * appear to work on opal v1, so we just assume there is
  358. * enough room and be done with it
  359. */
  360. spin_lock_irqsave(&opal_write_lock, flags);
  361. if (firmware_has_feature(FW_FEATURE_OPALv2)) {
  362. rc = opal_console_write_buffer_space(vtermno, &olen);
  363. len = be64_to_cpu(olen);
  364. if (rc || len < total_len) {
  365. spin_unlock_irqrestore(&opal_write_lock, flags);
  366. /* Closed -> drop characters */
  367. if (rc)
  368. return total_len;
  369. opal_poll_events(NULL);
  370. return -EAGAIN;
  371. }
  372. }
  373. /* We still try to handle partial completions, though they
  374. * should no longer happen.
  375. */
  376. rc = OPAL_BUSY;
  377. while(total_len > 0 && (rc == OPAL_BUSY ||
  378. rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) {
  379. olen = cpu_to_be64(total_len);
  380. rc = opal_console_write(vtermno, &olen, data);
  381. len = be64_to_cpu(olen);
  382. /* Closed or other error drop */
  383. if (rc != OPAL_SUCCESS && rc != OPAL_BUSY &&
  384. rc != OPAL_BUSY_EVENT) {
  385. written = total_len;
  386. break;
  387. }
  388. if (rc == OPAL_SUCCESS) {
  389. total_len -= len;
  390. data += len;
  391. written += len;
  392. }
  393. /* This is a bit nasty but we need that for the console to
  394. * flush when there aren't any interrupts. We will clean
  395. * things a bit later to limit that to synchronous path
  396. * such as the kernel console and xmon/udbg
  397. */
  398. do
  399. opal_poll_events(&evt);
  400. while(rc == OPAL_SUCCESS &&
  401. (be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT));
  402. }
  403. spin_unlock_irqrestore(&opal_write_lock, flags);
  404. return written;
  405. }
  406. static int opal_recover_mce(struct pt_regs *regs,
  407. struct machine_check_event *evt)
  408. {
  409. int recovered = 0;
  410. uint64_t ea = get_mce_fault_addr(evt);
  411. if (!(regs->msr & MSR_RI)) {
  412. /* If MSR_RI isn't set, we cannot recover */
  413. recovered = 0;
  414. } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
  415. /* Platform corrected itself */
  416. recovered = 1;
  417. } else if (ea && !is_kernel_addr(ea)) {
  418. /*
  419. * Faulting address is not in kernel text. We should be fine.
  420. * We need to find which process uses this address.
  421. * For now, kill the task if we have received exception when
  422. * in userspace.
  423. *
  424. * TODO: Queue up this address for hwpoisioning later.
  425. */
  426. if (user_mode(regs) && !is_global_init(current)) {
  427. _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
  428. recovered = 1;
  429. } else
  430. recovered = 0;
  431. } else if (user_mode(regs) && !is_global_init(current) &&
  432. evt->severity == MCE_SEV_ERROR_SYNC) {
  433. /*
  434. * If we have received a synchronous error when in userspace
  435. * kill the task.
  436. */
  437. _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
  438. recovered = 1;
  439. }
  440. return recovered;
  441. }
  442. int opal_machine_check(struct pt_regs *regs)
  443. {
  444. struct machine_check_event evt;
  445. if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
  446. return 0;
  447. /* Print things out */
  448. if (evt.version != MCE_V1) {
  449. pr_err("Machine Check Exception, Unknown event version %d !\n",
  450. evt.version);
  451. return 0;
  452. }
  453. machine_check_print_event_info(&evt);
  454. if (opal_recover_mce(regs, &evt))
  455. return 1;
  456. return 0;
  457. }
  458. /* Early hmi handler called in real mode. */
  459. int opal_hmi_exception_early(struct pt_regs *regs)
  460. {
  461. s64 rc;
  462. /*
  463. * call opal hmi handler. Pass paca address as token.
  464. * The return value OPAL_SUCCESS is an indication that there is
  465. * an HMI event generated waiting to pull by Linux.
  466. */
  467. rc = opal_handle_hmi();
  468. if (rc == OPAL_SUCCESS) {
  469. local_paca->hmi_event_available = 1;
  470. return 1;
  471. }
  472. return 0;
  473. }
  474. /* HMI exception handler called in virtual mode during check_irq_replay. */
  475. int opal_handle_hmi_exception(struct pt_regs *regs)
  476. {
  477. s64 rc;
  478. __be64 evt = 0;
  479. /*
  480. * Check if HMI event is available.
  481. * if Yes, then call opal_poll_events to pull opal messages and
  482. * process them.
  483. */
  484. if (!local_paca->hmi_event_available)
  485. return 0;
  486. local_paca->hmi_event_available = 0;
  487. rc = opal_poll_events(&evt);
  488. if (rc == OPAL_SUCCESS && evt)
  489. opal_do_notifier(be64_to_cpu(evt));
  490. return 1;
  491. }
  492. static uint64_t find_recovery_address(uint64_t nip)
  493. {
  494. int i;
  495. for (i = 0; i < mc_recoverable_range_len; i++)
  496. if ((nip >= mc_recoverable_range[i].start_addr) &&
  497. (nip < mc_recoverable_range[i].end_addr))
  498. return mc_recoverable_range[i].recover_addr;
  499. return 0;
  500. }
  501. bool opal_mce_check_early_recovery(struct pt_regs *regs)
  502. {
  503. uint64_t recover_addr = 0;
  504. if (!opal.base || !opal.size)
  505. goto out;
  506. if ((regs->nip >= opal.base) &&
  507. (regs->nip <= (opal.base + opal.size)))
  508. recover_addr = find_recovery_address(regs->nip);
  509. /*
  510. * Setup regs->nip to rfi into fixup address.
  511. */
  512. if (recover_addr)
  513. regs->nip = recover_addr;
  514. out:
  515. return !!recover_addr;
  516. }
  517. static irqreturn_t opal_interrupt(int irq, void *data)
  518. {
  519. __be64 events;
  520. opal_handle_interrupt(virq_to_hw(irq), &events);
  521. opal_do_notifier(be64_to_cpu(events));
  522. return IRQ_HANDLED;
  523. }
  524. static int opal_sysfs_init(void)
  525. {
  526. opal_kobj = kobject_create_and_add("opal", firmware_kobj);
  527. if (!opal_kobj) {
  528. pr_warn("kobject_create_and_add opal failed\n");
  529. return -ENOMEM;
  530. }
  531. return 0;
  532. }
  533. static ssize_t symbol_map_read(struct file *fp, struct kobject *kobj,
  534. struct bin_attribute *bin_attr,
  535. char *buf, loff_t off, size_t count)
  536. {
  537. return memory_read_from_buffer(buf, count, &off, bin_attr->private,
  538. bin_attr->size);
  539. }
  540. static BIN_ATTR_RO(symbol_map, 0);
  541. static void opal_export_symmap(void)
  542. {
  543. const __be64 *syms;
  544. unsigned int size;
  545. struct device_node *fw;
  546. int rc;
  547. fw = of_find_node_by_path("/ibm,opal/firmware");
  548. if (!fw)
  549. return;
  550. syms = of_get_property(fw, "symbol-map", &size);
  551. if (!syms || size != 2 * sizeof(__be64))
  552. return;
  553. /* Setup attributes */
  554. bin_attr_symbol_map.private = __va(be64_to_cpu(syms[0]));
  555. bin_attr_symbol_map.size = be64_to_cpu(syms[1]);
  556. rc = sysfs_create_bin_file(opal_kobj, &bin_attr_symbol_map);
  557. if (rc)
  558. pr_warn("Error %d creating OPAL symbols file\n", rc);
  559. }
  560. static void __init opal_dump_region_init(void)
  561. {
  562. void *addr;
  563. uint64_t size;
  564. int rc;
  565. /* Register kernel log buffer */
  566. addr = log_buf_addr_get();
  567. size = log_buf_len_get();
  568. rc = opal_register_dump_region(OPAL_DUMP_REGION_LOG_BUF,
  569. __pa(addr), size);
  570. /* Don't warn if this is just an older OPAL that doesn't
  571. * know about that call
  572. */
  573. if (rc && rc != OPAL_UNSUPPORTED)
  574. pr_warn("DUMP: Failed to register kernel log buffer. "
  575. "rc = %d\n", rc);
  576. }
  577. static void opal_ipmi_init(struct device_node *opal_node)
  578. {
  579. struct device_node *np;
  580. for_each_child_of_node(opal_node, np)
  581. if (of_device_is_compatible(np, "ibm,opal-ipmi"))
  582. of_platform_device_create(np, NULL, NULL);
  583. }
  584. static void opal_i2c_create_devs(void)
  585. {
  586. struct device_node *np;
  587. for_each_compatible_node(np, NULL, "ibm,opal-i2c")
  588. of_platform_device_create(np, NULL, NULL);
  589. }
  590. static int __init opal_init(void)
  591. {
  592. struct device_node *np, *consoles;
  593. const __be32 *irqs;
  594. int rc, i, irqlen;
  595. opal_node = of_find_node_by_path("/ibm,opal");
  596. if (!opal_node) {
  597. pr_warn("opal: Node not found\n");
  598. return -ENODEV;
  599. }
  600. /* Register OPAL consoles if any ports */
  601. if (firmware_has_feature(FW_FEATURE_OPALv2))
  602. consoles = of_find_node_by_path("/ibm,opal/consoles");
  603. else
  604. consoles = of_node_get(opal_node);
  605. if (consoles) {
  606. for_each_child_of_node(consoles, np) {
  607. if (strcmp(np->name, "serial"))
  608. continue;
  609. of_platform_device_create(np, NULL, NULL);
  610. }
  611. of_node_put(consoles);
  612. }
  613. /* Create i2c platform devices */
  614. opal_i2c_create_devs();
  615. /* Find all OPAL interrupts and request them */
  616. irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
  617. pr_debug("opal: Found %d interrupts reserved for OPAL\n",
  618. irqs ? (irqlen / 4) : 0);
  619. opal_irq_count = irqlen / 4;
  620. opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL);
  621. for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) {
  622. unsigned int hwirq = be32_to_cpup(irqs);
  623. unsigned int irq = irq_create_mapping(NULL, hwirq);
  624. if (irq == NO_IRQ) {
  625. pr_warning("opal: Failed to map irq 0x%x\n", hwirq);
  626. continue;
  627. }
  628. rc = request_irq(irq, opal_interrupt, 0, "opal", NULL);
  629. if (rc)
  630. pr_warning("opal: Error %d requesting irq %d"
  631. " (0x%x)\n", rc, irq, hwirq);
  632. opal_irqs[i] = irq;
  633. }
  634. /* Create "opal" kobject under /sys/firmware */
  635. rc = opal_sysfs_init();
  636. if (rc == 0) {
  637. /* Export symbol map to userspace */
  638. opal_export_symmap();
  639. /* Setup dump region interface */
  640. opal_dump_region_init();
  641. /* Setup error log interface */
  642. rc = opal_elog_init();
  643. /* Setup code update interface */
  644. opal_flash_init();
  645. /* Setup platform dump extract interface */
  646. opal_platform_dump_init();
  647. /* Setup system parameters interface */
  648. opal_sys_param_init();
  649. /* Setup message log interface. */
  650. opal_msglog_init();
  651. }
  652. opal_ipmi_init(opal_node);
  653. return 0;
  654. }
  655. machine_subsys_initcall(powernv, opal_init);
  656. void opal_shutdown(void)
  657. {
  658. unsigned int i;
  659. long rc = OPAL_BUSY;
  660. /* First free interrupts, which will also mask them */
  661. for (i = 0; i < opal_irq_count; i++) {
  662. if (opal_irqs[i])
  663. free_irq(opal_irqs[i], NULL);
  664. opal_irqs[i] = 0;
  665. }
  666. /*
  667. * Then sync with OPAL which ensure anything that can
  668. * potentially write to our memory has completed such
  669. * as an ongoing dump retrieval
  670. */
  671. while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
  672. rc = opal_sync_host_reboot();
  673. if (rc == OPAL_BUSY)
  674. opal_poll_events(NULL);
  675. else
  676. mdelay(10);
  677. }
  678. /* Unregister memory dump region */
  679. opal_unregister_dump_region(OPAL_DUMP_REGION_LOG_BUF);
  680. }
  681. /* Export this so that test modules can use it */
  682. EXPORT_SYMBOL_GPL(opal_invalid_call);
  683. EXPORT_SYMBOL_GPL(opal_ipmi_send);
  684. EXPORT_SYMBOL_GPL(opal_ipmi_recv);
  685. /* Convert a region of vmalloc memory to an opal sg list */
  686. struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
  687. unsigned long vmalloc_size)
  688. {
  689. struct opal_sg_list *sg, *first = NULL;
  690. unsigned long i = 0;
  691. sg = kzalloc(PAGE_SIZE, GFP_KERNEL);
  692. if (!sg)
  693. goto nomem;
  694. first = sg;
  695. while (vmalloc_size > 0) {
  696. uint64_t data = vmalloc_to_pfn(vmalloc_addr) << PAGE_SHIFT;
  697. uint64_t length = min(vmalloc_size, PAGE_SIZE);
  698. sg->entry[i].data = cpu_to_be64(data);
  699. sg->entry[i].length = cpu_to_be64(length);
  700. i++;
  701. if (i >= SG_ENTRIES_PER_NODE) {
  702. struct opal_sg_list *next;
  703. next = kzalloc(PAGE_SIZE, GFP_KERNEL);
  704. if (!next)
  705. goto nomem;
  706. sg->length = cpu_to_be64(
  707. i * sizeof(struct opal_sg_entry) + 16);
  708. i = 0;
  709. sg->next = cpu_to_be64(__pa(next));
  710. sg = next;
  711. }
  712. vmalloc_addr += length;
  713. vmalloc_size -= length;
  714. }
  715. sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16);
  716. return first;
  717. nomem:
  718. pr_err("%s : Failed to allocate memory\n", __func__);
  719. opal_free_sg_list(first);
  720. return NULL;
  721. }
  722. void opal_free_sg_list(struct opal_sg_list *sg)
  723. {
  724. while (sg) {
  725. uint64_t next = be64_to_cpu(sg->next);
  726. kfree(sg);
  727. if (next)
  728. sg = __va(next);
  729. else
  730. sg = NULL;
  731. }
  732. }
  733. EXPORT_SYMBOL_GPL(opal_poll_events);
  734. EXPORT_SYMBOL_GPL(opal_rtc_read);
  735. EXPORT_SYMBOL_GPL(opal_rtc_write);
  736. EXPORT_SYMBOL_GPL(opal_tpo_read);
  737. EXPORT_SYMBOL_GPL(opal_tpo_write);
  738. EXPORT_SYMBOL_GPL(opal_i2c_request);