opal.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972
  1. /*
  2. * PowerNV OPAL high level interfaces
  3. *
  4. * Copyright 2011 IBM Corp.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define pr_fmt(fmt) "opal: " fmt
  12. #include <linux/printk.h>
  13. #include <linux/types.h>
  14. #include <linux/of.h>
  15. #include <linux/of_fdt.h>
  16. #include <linux/of_platform.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/notifier.h>
  19. #include <linux/slab.h>
  20. #include <linux/sched.h>
  21. #include <linux/kobject.h>
  22. #include <linux/delay.h>
  23. #include <linux/memblock.h>
  24. #include <linux/kthread.h>
  25. #include <linux/freezer.h>
  26. #include <asm/machdep.h>
  27. #include <asm/opal.h>
  28. #include <asm/firmware.h>
  29. #include <asm/mce.h>
  30. #include "powernv.h"
  31. /* /sys/firmware/opal */
  32. struct kobject *opal_kobj;
  33. struct opal {
  34. u64 base;
  35. u64 entry;
  36. u64 size;
  37. } opal;
  38. struct mcheck_recoverable_range {
  39. u64 start_addr;
  40. u64 end_addr;
  41. u64 recover_addr;
  42. };
  43. static struct mcheck_recoverable_range *mc_recoverable_range;
  44. static int mc_recoverable_range_len;
  45. struct device_node *opal_node;
  46. static DEFINE_SPINLOCK(opal_write_lock);
  47. static unsigned int *opal_irqs;
  48. static unsigned int opal_irq_count;
  49. static ATOMIC_NOTIFIER_HEAD(opal_notifier_head);
  50. static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX];
  51. static DEFINE_SPINLOCK(opal_notifier_lock);
  52. static uint64_t last_notified_mask = 0x0ul;
  53. static atomic_t opal_notifier_hold = ATOMIC_INIT(0);
  54. static uint32_t opal_heartbeat;
  55. static void opal_reinit_cores(void)
  56. {
  57. /* Do the actual re-init, This will clobber all FPRs, VRs, etc...
  58. *
  59. * It will preserve non volatile GPRs and HSPRG0/1. It will
  60. * also restore HIDs and other SPRs to their original value
  61. * but it might clobber a bunch.
  62. */
  63. #ifdef __BIG_ENDIAN__
  64. opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_BE);
  65. #else
  66. opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_LE);
  67. #endif
  68. }
  69. int __init early_init_dt_scan_opal(unsigned long node,
  70. const char *uname, int depth, void *data)
  71. {
  72. const void *basep, *entryp, *sizep;
  73. int basesz, entrysz, runtimesz;
  74. if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
  75. return 0;
  76. basep = of_get_flat_dt_prop(node, "opal-base-address", &basesz);
  77. entryp = of_get_flat_dt_prop(node, "opal-entry-address", &entrysz);
  78. sizep = of_get_flat_dt_prop(node, "opal-runtime-size", &runtimesz);
  79. if (!basep || !entryp || !sizep)
  80. return 1;
  81. opal.base = of_read_number(basep, basesz/4);
  82. opal.entry = of_read_number(entryp, entrysz/4);
  83. opal.size = of_read_number(sizep, runtimesz/4);
  84. pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%d)\n",
  85. opal.base, basep, basesz);
  86. pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%d)\n",
  87. opal.entry, entryp, entrysz);
  88. pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%d)\n",
  89. opal.size, sizep, runtimesz);
  90. powerpc_firmware_features |= FW_FEATURE_OPAL;
  91. if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
  92. powerpc_firmware_features |= FW_FEATURE_OPALv2;
  93. powerpc_firmware_features |= FW_FEATURE_OPALv3;
  94. pr_info("OPAL V3 detected !\n");
  95. } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
  96. powerpc_firmware_features |= FW_FEATURE_OPALv2;
  97. pr_info("OPAL V2 detected !\n");
  98. } else {
  99. pr_info("OPAL V1 detected !\n");
  100. }
  101. /* Reinit all cores with the right endian */
  102. opal_reinit_cores();
  103. /* Restore some bits */
  104. if (cur_cpu_spec->cpu_restore)
  105. cur_cpu_spec->cpu_restore();
  106. return 1;
  107. }
  108. int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
  109. const char *uname, int depth, void *data)
  110. {
  111. int i, psize, size;
  112. const __be32 *prop;
  113. if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
  114. return 0;
  115. prop = of_get_flat_dt_prop(node, "mcheck-recoverable-ranges", &psize);
  116. if (!prop)
  117. return 1;
  118. pr_debug("Found machine check recoverable ranges.\n");
  119. /*
  120. * Calculate number of available entries.
  121. *
  122. * Each recoverable address range entry is (start address, len,
  123. * recovery address), 2 cells each for start and recovery address,
  124. * 1 cell for len, totalling 5 cells per entry.
  125. */
  126. mc_recoverable_range_len = psize / (sizeof(*prop) * 5);
  127. /* Sanity check */
  128. if (!mc_recoverable_range_len)
  129. return 1;
  130. /* Size required to hold all the entries. */
  131. size = mc_recoverable_range_len *
  132. sizeof(struct mcheck_recoverable_range);
  133. /*
  134. * Allocate a buffer to hold the MC recoverable ranges. We would be
  135. * accessing them in real mode, hence it needs to be within
  136. * RMO region.
  137. */
  138. mc_recoverable_range =__va(memblock_alloc_base(size, __alignof__(u64),
  139. ppc64_rma_size));
  140. memset(mc_recoverable_range, 0, size);
  141. for (i = 0; i < mc_recoverable_range_len; i++) {
  142. mc_recoverable_range[i].start_addr =
  143. of_read_number(prop + (i * 5) + 0, 2);
  144. mc_recoverable_range[i].end_addr =
  145. mc_recoverable_range[i].start_addr +
  146. of_read_number(prop + (i * 5) + 2, 1);
  147. mc_recoverable_range[i].recover_addr =
  148. of_read_number(prop + (i * 5) + 3, 2);
  149. pr_debug("Machine check recoverable range: %llx..%llx: %llx\n",
  150. mc_recoverable_range[i].start_addr,
  151. mc_recoverable_range[i].end_addr,
  152. mc_recoverable_range[i].recover_addr);
  153. }
  154. return 1;
  155. }
  156. static int __init opal_register_exception_handlers(void)
  157. {
  158. #ifdef __BIG_ENDIAN__
  159. u64 glue;
  160. if (!(powerpc_firmware_features & FW_FEATURE_OPAL))
  161. return -ENODEV;
  162. /* Hookup some exception handlers except machine check. We use the
  163. * fwnmi area at 0x7000 to provide the glue space to OPAL
  164. */
  165. glue = 0x7000;
  166. /*
  167. * Check if we are running on newer firmware that exports
  168. * OPAL_HANDLE_HMI token. If yes, then don't ask OPAL to patch
  169. * the HMI interrupt and we catch it directly in Linux.
  170. *
  171. * For older firmware (i.e currently released POWER8 System Firmware
  172. * as of today <= SV810_087), we fallback to old behavior and let OPAL
  173. * patch the HMI vector and handle it inside OPAL firmware.
  174. *
  175. * For newer firmware (in development/yet to be released) we will
  176. * start catching/handling HMI directly in Linux.
  177. */
  178. if (!opal_check_token(OPAL_HANDLE_HMI)) {
  179. pr_info("Old firmware detected, OPAL handles HMIs.\n");
  180. opal_register_exception_handler(
  181. OPAL_HYPERVISOR_MAINTENANCE_HANDLER,
  182. 0, glue);
  183. glue += 128;
  184. }
  185. opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue);
  186. #endif
  187. return 0;
  188. }
  189. machine_early_initcall(powernv, opal_register_exception_handlers);
  190. int opal_notifier_register(struct notifier_block *nb)
  191. {
  192. if (!nb) {
  193. pr_warning("%s: Invalid argument (%p)\n",
  194. __func__, nb);
  195. return -EINVAL;
  196. }
  197. atomic_notifier_chain_register(&opal_notifier_head, nb);
  198. return 0;
  199. }
  200. EXPORT_SYMBOL_GPL(opal_notifier_register);
  201. int opal_notifier_unregister(struct notifier_block *nb)
  202. {
  203. if (!nb) {
  204. pr_warning("%s: Invalid argument (%p)\n",
  205. __func__, nb);
  206. return -EINVAL;
  207. }
  208. atomic_notifier_chain_unregister(&opal_notifier_head, nb);
  209. return 0;
  210. }
  211. EXPORT_SYMBOL_GPL(opal_notifier_unregister);
  212. static void opal_do_notifier(uint64_t events)
  213. {
  214. unsigned long flags;
  215. uint64_t changed_mask;
  216. if (atomic_read(&opal_notifier_hold))
  217. return;
  218. spin_lock_irqsave(&opal_notifier_lock, flags);
  219. changed_mask = last_notified_mask ^ events;
  220. last_notified_mask = events;
  221. spin_unlock_irqrestore(&opal_notifier_lock, flags);
  222. /*
  223. * We feed with the event bits and changed bits for
  224. * enough information to the callback.
  225. */
  226. atomic_notifier_call_chain(&opal_notifier_head,
  227. events, (void *)changed_mask);
  228. }
  229. void opal_notifier_update_evt(uint64_t evt_mask,
  230. uint64_t evt_val)
  231. {
  232. unsigned long flags;
  233. spin_lock_irqsave(&opal_notifier_lock, flags);
  234. last_notified_mask &= ~evt_mask;
  235. last_notified_mask |= evt_val;
  236. spin_unlock_irqrestore(&opal_notifier_lock, flags);
  237. }
  238. void opal_notifier_enable(void)
  239. {
  240. int64_t rc;
  241. __be64 evt = 0;
  242. atomic_set(&opal_notifier_hold, 0);
  243. /* Process pending events */
  244. rc = opal_poll_events(&evt);
  245. if (rc == OPAL_SUCCESS && evt)
  246. opal_do_notifier(be64_to_cpu(evt));
  247. }
  248. void opal_notifier_disable(void)
  249. {
  250. atomic_set(&opal_notifier_hold, 1);
  251. }
  252. /*
  253. * Opal message notifier based on message type. Allow subscribers to get
  254. * notified for specific messgae type.
  255. */
  256. int opal_message_notifier_register(enum opal_msg_type msg_type,
  257. struct notifier_block *nb)
  258. {
  259. if (!nb || msg_type >= OPAL_MSG_TYPE_MAX) {
  260. pr_warning("%s: Invalid arguments, msg_type:%d\n",
  261. __func__, msg_type);
  262. return -EINVAL;
  263. }
  264. return atomic_notifier_chain_register(
  265. &opal_msg_notifier_head[msg_type], nb);
  266. }
  267. int opal_message_notifier_unregister(enum opal_msg_type msg_type,
  268. struct notifier_block *nb)
  269. {
  270. return atomic_notifier_chain_unregister(
  271. &opal_msg_notifier_head[msg_type], nb);
  272. }
  273. static void opal_message_do_notify(uint32_t msg_type, void *msg)
  274. {
  275. /* notify subscribers */
  276. atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type],
  277. msg_type, msg);
  278. }
  279. static void opal_handle_message(void)
  280. {
  281. s64 ret;
  282. /*
  283. * TODO: pre-allocate a message buffer depending on opal-msg-size
  284. * value in /proc/device-tree.
  285. */
  286. static struct opal_msg msg;
  287. u32 type;
  288. ret = opal_get_msg(__pa(&msg), sizeof(msg));
  289. /* No opal message pending. */
  290. if (ret == OPAL_RESOURCE)
  291. return;
  292. /* check for errors. */
  293. if (ret) {
  294. pr_warning("%s: Failed to retrieve opal message, err=%lld\n",
  295. __func__, ret);
  296. return;
  297. }
  298. type = be32_to_cpu(msg.msg_type);
  299. /* Sanity check */
  300. if (type >= OPAL_MSG_TYPE_MAX) {
  301. pr_warning("%s: Unknown message type: %u\n", __func__, type);
  302. return;
  303. }
  304. opal_message_do_notify(type, (void *)&msg);
  305. }
  306. static int opal_message_notify(struct notifier_block *nb,
  307. unsigned long events, void *change)
  308. {
  309. if (events & OPAL_EVENT_MSG_PENDING)
  310. opal_handle_message();
  311. return 0;
  312. }
  313. static struct notifier_block opal_message_nb = {
  314. .notifier_call = opal_message_notify,
  315. .next = NULL,
  316. .priority = 0,
  317. };
  318. static int __init opal_message_init(void)
  319. {
  320. int ret, i;
  321. for (i = 0; i < OPAL_MSG_TYPE_MAX; i++)
  322. ATOMIC_INIT_NOTIFIER_HEAD(&opal_msg_notifier_head[i]);
  323. ret = opal_notifier_register(&opal_message_nb);
  324. if (ret) {
  325. pr_err("%s: Can't register OPAL event notifier (%d)\n",
  326. __func__, ret);
  327. return ret;
  328. }
  329. return 0;
  330. }
  331. machine_early_initcall(powernv, opal_message_init);
  332. int opal_get_chars(uint32_t vtermno, char *buf, int count)
  333. {
  334. s64 rc;
  335. __be64 evt, len;
  336. if (!opal.entry)
  337. return -ENODEV;
  338. opal_poll_events(&evt);
  339. if ((be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_INPUT) == 0)
  340. return 0;
  341. len = cpu_to_be64(count);
  342. rc = opal_console_read(vtermno, &len, buf);
  343. if (rc == OPAL_SUCCESS)
  344. return be64_to_cpu(len);
  345. return 0;
  346. }
  347. int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
  348. {
  349. int written = 0;
  350. __be64 olen;
  351. s64 len, rc;
  352. unsigned long flags;
  353. __be64 evt;
  354. if (!opal.entry)
  355. return -ENODEV;
  356. /* We want put_chars to be atomic to avoid mangling of hvsi
  357. * packets. To do that, we first test for room and return
  358. * -EAGAIN if there isn't enough.
  359. *
  360. * Unfortunately, opal_console_write_buffer_space() doesn't
  361. * appear to work on opal v1, so we just assume there is
  362. * enough room and be done with it
  363. */
  364. spin_lock_irqsave(&opal_write_lock, flags);
  365. if (firmware_has_feature(FW_FEATURE_OPALv2)) {
  366. rc = opal_console_write_buffer_space(vtermno, &olen);
  367. len = be64_to_cpu(olen);
  368. if (rc || len < total_len) {
  369. spin_unlock_irqrestore(&opal_write_lock, flags);
  370. /* Closed -> drop characters */
  371. if (rc)
  372. return total_len;
  373. opal_poll_events(NULL);
  374. return -EAGAIN;
  375. }
  376. }
  377. /* We still try to handle partial completions, though they
  378. * should no longer happen.
  379. */
  380. rc = OPAL_BUSY;
  381. while(total_len > 0 && (rc == OPAL_BUSY ||
  382. rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) {
  383. olen = cpu_to_be64(total_len);
  384. rc = opal_console_write(vtermno, &olen, data);
  385. len = be64_to_cpu(olen);
  386. /* Closed or other error drop */
  387. if (rc != OPAL_SUCCESS && rc != OPAL_BUSY &&
  388. rc != OPAL_BUSY_EVENT) {
  389. written = total_len;
  390. break;
  391. }
  392. if (rc == OPAL_SUCCESS) {
  393. total_len -= len;
  394. data += len;
  395. written += len;
  396. }
  397. /* This is a bit nasty but we need that for the console to
  398. * flush when there aren't any interrupts. We will clean
  399. * things a bit later to limit that to synchronous path
  400. * such as the kernel console and xmon/udbg
  401. */
  402. do
  403. opal_poll_events(&evt);
  404. while(rc == OPAL_SUCCESS &&
  405. (be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT));
  406. }
  407. spin_unlock_irqrestore(&opal_write_lock, flags);
  408. return written;
  409. }
  410. static int opal_recover_mce(struct pt_regs *regs,
  411. struct machine_check_event *evt)
  412. {
  413. int recovered = 0;
  414. uint64_t ea = get_mce_fault_addr(evt);
  415. if (!(regs->msr & MSR_RI)) {
  416. /* If MSR_RI isn't set, we cannot recover */
  417. recovered = 0;
  418. } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
  419. /* Platform corrected itself */
  420. recovered = 1;
  421. } else if (ea && !is_kernel_addr(ea)) {
  422. /*
  423. * Faulting address is not in kernel text. We should be fine.
  424. * We need to find which process uses this address.
  425. * For now, kill the task if we have received exception when
  426. * in userspace.
  427. *
  428. * TODO: Queue up this address for hwpoisioning later.
  429. */
  430. if (user_mode(regs) && !is_global_init(current)) {
  431. _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
  432. recovered = 1;
  433. } else
  434. recovered = 0;
  435. } else if (user_mode(regs) && !is_global_init(current) &&
  436. evt->severity == MCE_SEV_ERROR_SYNC) {
  437. /*
  438. * If we have received a synchronous error when in userspace
  439. * kill the task.
  440. */
  441. _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
  442. recovered = 1;
  443. }
  444. return recovered;
  445. }
  446. int opal_machine_check(struct pt_regs *regs)
  447. {
  448. struct machine_check_event evt;
  449. if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
  450. return 0;
  451. /* Print things out */
  452. if (evt.version != MCE_V1) {
  453. pr_err("Machine Check Exception, Unknown event version %d !\n",
  454. evt.version);
  455. return 0;
  456. }
  457. machine_check_print_event_info(&evt);
  458. if (opal_recover_mce(regs, &evt))
  459. return 1;
  460. return 0;
  461. }
  462. /* Early hmi handler called in real mode. */
  463. int opal_hmi_exception_early(struct pt_regs *regs)
  464. {
  465. s64 rc;
  466. /*
  467. * call opal hmi handler. Pass paca address as token.
  468. * The return value OPAL_SUCCESS is an indication that there is
  469. * an HMI event generated waiting to pull by Linux.
  470. */
  471. rc = opal_handle_hmi();
  472. if (rc == OPAL_SUCCESS) {
  473. local_paca->hmi_event_available = 1;
  474. return 1;
  475. }
  476. return 0;
  477. }
  478. /* HMI exception handler called in virtual mode during check_irq_replay. */
  479. int opal_handle_hmi_exception(struct pt_regs *regs)
  480. {
  481. s64 rc;
  482. __be64 evt = 0;
  483. /*
  484. * Check if HMI event is available.
  485. * if Yes, then call opal_poll_events to pull opal messages and
  486. * process them.
  487. */
  488. if (!local_paca->hmi_event_available)
  489. return 0;
  490. local_paca->hmi_event_available = 0;
  491. rc = opal_poll_events(&evt);
  492. if (rc == OPAL_SUCCESS && evt)
  493. opal_do_notifier(be64_to_cpu(evt));
  494. return 1;
  495. }
  496. static uint64_t find_recovery_address(uint64_t nip)
  497. {
  498. int i;
  499. for (i = 0; i < mc_recoverable_range_len; i++)
  500. if ((nip >= mc_recoverable_range[i].start_addr) &&
  501. (nip < mc_recoverable_range[i].end_addr))
  502. return mc_recoverable_range[i].recover_addr;
  503. return 0;
  504. }
  505. bool opal_mce_check_early_recovery(struct pt_regs *regs)
  506. {
  507. uint64_t recover_addr = 0;
  508. if (!opal.base || !opal.size)
  509. goto out;
  510. if ((regs->nip >= opal.base) &&
  511. (regs->nip <= (opal.base + opal.size)))
  512. recover_addr = find_recovery_address(regs->nip);
  513. /*
  514. * Setup regs->nip to rfi into fixup address.
  515. */
  516. if (recover_addr)
  517. regs->nip = recover_addr;
  518. out:
  519. return !!recover_addr;
  520. }
  521. static irqreturn_t opal_interrupt(int irq, void *data)
  522. {
  523. __be64 events;
  524. opal_handle_interrupt(virq_to_hw(irq), &events);
  525. opal_do_notifier(be64_to_cpu(events));
  526. return IRQ_HANDLED;
  527. }
  528. static int opal_sysfs_init(void)
  529. {
  530. opal_kobj = kobject_create_and_add("opal", firmware_kobj);
  531. if (!opal_kobj) {
  532. pr_warn("kobject_create_and_add opal failed\n");
  533. return -ENOMEM;
  534. }
  535. return 0;
  536. }
  537. static ssize_t symbol_map_read(struct file *fp, struct kobject *kobj,
  538. struct bin_attribute *bin_attr,
  539. char *buf, loff_t off, size_t count)
  540. {
  541. return memory_read_from_buffer(buf, count, &off, bin_attr->private,
  542. bin_attr->size);
  543. }
  544. static BIN_ATTR_RO(symbol_map, 0);
  545. static void opal_export_symmap(void)
  546. {
  547. const __be64 *syms;
  548. unsigned int size;
  549. struct device_node *fw;
  550. int rc;
  551. fw = of_find_node_by_path("/ibm,opal/firmware");
  552. if (!fw)
  553. return;
  554. syms = of_get_property(fw, "symbol-map", &size);
  555. if (!syms || size != 2 * sizeof(__be64))
  556. return;
  557. /* Setup attributes */
  558. bin_attr_symbol_map.private = __va(be64_to_cpu(syms[0]));
  559. bin_attr_symbol_map.size = be64_to_cpu(syms[1]);
  560. rc = sysfs_create_bin_file(opal_kobj, &bin_attr_symbol_map);
  561. if (rc)
  562. pr_warn("Error %d creating OPAL symbols file\n", rc);
  563. }
  564. static void __init opal_dump_region_init(void)
  565. {
  566. void *addr;
  567. uint64_t size;
  568. int rc;
  569. if (!opal_check_token(OPAL_REGISTER_DUMP_REGION))
  570. return;
  571. /* Register kernel log buffer */
  572. addr = log_buf_addr_get();
  573. if (addr == NULL)
  574. return;
  575. size = log_buf_len_get();
  576. if (size == 0)
  577. return;
  578. rc = opal_register_dump_region(OPAL_DUMP_REGION_LOG_BUF,
  579. __pa(addr), size);
  580. /* Don't warn if this is just an older OPAL that doesn't
  581. * know about that call
  582. */
  583. if (rc && rc != OPAL_UNSUPPORTED)
  584. pr_warn("DUMP: Failed to register kernel log buffer. "
  585. "rc = %d\n", rc);
  586. }
  587. static void opal_flash_init(struct device_node *opal_node)
  588. {
  589. struct device_node *np;
  590. for_each_child_of_node(opal_node, np)
  591. if (of_device_is_compatible(np, "ibm,opal-flash"))
  592. of_platform_device_create(np, NULL, NULL);
  593. }
  594. static void opal_ipmi_init(struct device_node *opal_node)
  595. {
  596. struct device_node *np;
  597. for_each_child_of_node(opal_node, np)
  598. if (of_device_is_compatible(np, "ibm,opal-ipmi"))
  599. of_platform_device_create(np, NULL, NULL);
  600. }
  601. static void opal_i2c_create_devs(void)
  602. {
  603. struct device_node *np;
  604. for_each_compatible_node(np, NULL, "ibm,opal-i2c")
  605. of_platform_device_create(np, NULL, NULL);
  606. }
  607. static void __init opal_irq_init(struct device_node *dn)
  608. {
  609. const __be32 *irqs;
  610. int i, irqlen;
  611. /* Get interrupt property */
  612. irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
  613. opal_irq_count = irqs ? (irqlen / 4) : 0;
  614. pr_debug("Found %d interrupts reserved for OPAL\n", opal_irq_count);
  615. if (!opal_irq_count)
  616. return;
  617. /* Install interrupt handlers */
  618. opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL);
  619. for (i = 0; irqs && i < opal_irq_count; i++, irqs++) {
  620. unsigned int irq, virq;
  621. int rc;
  622. /* Get hardware and virtual IRQ */
  623. irq = be32_to_cpup(irqs);
  624. virq = irq_create_mapping(NULL, irq);
  625. if (virq == NO_IRQ) {
  626. pr_warn("Failed to map irq 0x%x\n", irq);
  627. continue;
  628. }
  629. /* Install interrupt handler */
  630. rc = request_irq(virq, opal_interrupt, 0, "opal", NULL);
  631. if (rc) {
  632. irq_dispose_mapping(virq);
  633. pr_warn("Error %d requesting irq %d (0x%x)\n",
  634. rc, virq, irq);
  635. continue;
  636. }
  637. /* Cache IRQ */
  638. opal_irqs[i] = virq;
  639. }
  640. }
  641. static int kopald(void *unused)
  642. {
  643. set_freezable();
  644. do {
  645. try_to_freeze();
  646. opal_poll_events(NULL);
  647. msleep_interruptible(opal_heartbeat);
  648. } while (!kthread_should_stop());
  649. return 0;
  650. }
  651. static void opal_init_heartbeat(void)
  652. {
  653. /* Old firwmware, we assume the HVC heartbeat is sufficient */
  654. if (of_property_read_u32(opal_node, "ibm,heartbeat-ms",
  655. &opal_heartbeat) != 0)
  656. opal_heartbeat = 0;
  657. if (opal_heartbeat)
  658. kthread_run(kopald, NULL, "kopald");
  659. }
  660. static int __init opal_init(void)
  661. {
  662. struct device_node *np, *consoles;
  663. int rc;
  664. opal_node = of_find_node_by_path("/ibm,opal");
  665. if (!opal_node) {
  666. pr_warn("Device node not found\n");
  667. return -ENODEV;
  668. }
  669. /* Register OPAL consoles if any ports */
  670. if (firmware_has_feature(FW_FEATURE_OPALv2))
  671. consoles = of_find_node_by_path("/ibm,opal/consoles");
  672. else
  673. consoles = of_node_get(opal_node);
  674. if (consoles) {
  675. for_each_child_of_node(consoles, np) {
  676. if (strcmp(np->name, "serial"))
  677. continue;
  678. of_platform_device_create(np, NULL, NULL);
  679. }
  680. of_node_put(consoles);
  681. }
  682. /* Create i2c platform devices */
  683. opal_i2c_create_devs();
  684. /* Setup a heatbeat thread if requested by OPAL */
  685. opal_init_heartbeat();
  686. /* Find all OPAL interrupts and request them */
  687. opal_irq_init(opal_node);
  688. /* Create "opal" kobject under /sys/firmware */
  689. rc = opal_sysfs_init();
  690. if (rc == 0) {
  691. /* Export symbol map to userspace */
  692. opal_export_symmap();
  693. /* Setup dump region interface */
  694. opal_dump_region_init();
  695. /* Setup error log interface */
  696. rc = opal_elog_init();
  697. /* Setup code update interface */
  698. opal_flash_update_init();
  699. /* Setup platform dump extract interface */
  700. opal_platform_dump_init();
  701. /* Setup system parameters interface */
  702. opal_sys_param_init();
  703. /* Setup message log interface. */
  704. opal_msglog_init();
  705. }
  706. /* Initialize OPAL IPMI backend */
  707. opal_ipmi_init(opal_node);
  708. opal_flash_init(opal_node);
  709. return 0;
  710. }
  711. machine_subsys_initcall(powernv, opal_init);
  712. void opal_shutdown(void)
  713. {
  714. unsigned int i;
  715. long rc = OPAL_BUSY;
  716. /* First free interrupts, which will also mask them */
  717. for (i = 0; i < opal_irq_count; i++) {
  718. if (opal_irqs[i])
  719. free_irq(opal_irqs[i], NULL);
  720. opal_irqs[i] = 0;
  721. }
  722. /*
  723. * Then sync with OPAL which ensure anything that can
  724. * potentially write to our memory has completed such
  725. * as an ongoing dump retrieval
  726. */
  727. while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
  728. rc = opal_sync_host_reboot();
  729. if (rc == OPAL_BUSY)
  730. opal_poll_events(NULL);
  731. else
  732. mdelay(10);
  733. }
  734. /* Unregister memory dump region */
  735. if (opal_check_token(OPAL_UNREGISTER_DUMP_REGION))
  736. opal_unregister_dump_region(OPAL_DUMP_REGION_LOG_BUF);
  737. }
  738. /* Export this so that test modules can use it */
  739. EXPORT_SYMBOL_GPL(opal_invalid_call);
  740. EXPORT_SYMBOL_GPL(opal_ipmi_send);
  741. EXPORT_SYMBOL_GPL(opal_ipmi_recv);
  742. EXPORT_SYMBOL_GPL(opal_flash_read);
  743. EXPORT_SYMBOL_GPL(opal_flash_write);
  744. EXPORT_SYMBOL_GPL(opal_flash_erase);
  745. /* Convert a region of vmalloc memory to an opal sg list */
  746. struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
  747. unsigned long vmalloc_size)
  748. {
  749. struct opal_sg_list *sg, *first = NULL;
  750. unsigned long i = 0;
  751. sg = kzalloc(PAGE_SIZE, GFP_KERNEL);
  752. if (!sg)
  753. goto nomem;
  754. first = sg;
  755. while (vmalloc_size > 0) {
  756. uint64_t data = vmalloc_to_pfn(vmalloc_addr) << PAGE_SHIFT;
  757. uint64_t length = min(vmalloc_size, PAGE_SIZE);
  758. sg->entry[i].data = cpu_to_be64(data);
  759. sg->entry[i].length = cpu_to_be64(length);
  760. i++;
  761. if (i >= SG_ENTRIES_PER_NODE) {
  762. struct opal_sg_list *next;
  763. next = kzalloc(PAGE_SIZE, GFP_KERNEL);
  764. if (!next)
  765. goto nomem;
  766. sg->length = cpu_to_be64(
  767. i * sizeof(struct opal_sg_entry) + 16);
  768. i = 0;
  769. sg->next = cpu_to_be64(__pa(next));
  770. sg = next;
  771. }
  772. vmalloc_addr += length;
  773. vmalloc_size -= length;
  774. }
  775. sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16);
  776. return first;
  777. nomem:
  778. pr_err("%s : Failed to allocate memory\n", __func__);
  779. opal_free_sg_list(first);
  780. return NULL;
  781. }
  782. void opal_free_sg_list(struct opal_sg_list *sg)
  783. {
  784. while (sg) {
  785. uint64_t next = be64_to_cpu(sg->next);
  786. kfree(sg);
  787. if (next)
  788. sg = __va(next);
  789. else
  790. sg = NULL;
  791. }
  792. }
  793. int opal_error_code(int rc)
  794. {
  795. switch (rc) {
  796. case OPAL_SUCCESS: return 0;
  797. case OPAL_PARAMETER: return -EINVAL;
  798. case OPAL_ASYNC_COMPLETION: return -EINPROGRESS;
  799. case OPAL_BUSY_EVENT: return -EBUSY;
  800. case OPAL_NO_MEM: return -ENOMEM;
  801. case OPAL_UNSUPPORTED: return -EIO;
  802. case OPAL_HARDWARE: return -EIO;
  803. case OPAL_INTERNAL_ERROR: return -EIO;
  804. default:
  805. pr_err("%s: unexpected OPAL error %d\n", __func__, rc);
  806. return -EIO;
  807. }
  808. }
  809. EXPORT_SYMBOL_GPL(opal_poll_events);
  810. EXPORT_SYMBOL_GPL(opal_rtc_read);
  811. EXPORT_SYMBOL_GPL(opal_rtc_write);
  812. EXPORT_SYMBOL_GPL(opal_tpo_read);
  813. EXPORT_SYMBOL_GPL(opal_tpo_write);
  814. EXPORT_SYMBOL_GPL(opal_i2c_request);