opal.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843
  1. /*
  2. * PowerNV OPAL high level interfaces
  3. *
  4. * Copyright 2011 IBM Corp.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define pr_fmt(fmt) "opal: " fmt
  12. #include <linux/printk.h>
  13. #include <linux/types.h>
  14. #include <linux/of.h>
  15. #include <linux/of_fdt.h>
  16. #include <linux/of_platform.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/notifier.h>
  19. #include <linux/slab.h>
  20. #include <linux/sched.h>
  21. #include <linux/kobject.h>
  22. #include <linux/delay.h>
  23. #include <linux/memblock.h>
  24. #include <linux/kthread.h>
  25. #include <linux/freezer.h>
  26. #include <asm/machdep.h>
  27. #include <asm/opal.h>
  28. #include <asm/firmware.h>
  29. #include <asm/mce.h>
  30. #include "powernv.h"
  31. /* /sys/firmware/opal */
  32. struct kobject *opal_kobj;
  33. struct opal {
  34. u64 base;
  35. u64 entry;
  36. u64 size;
  37. } opal;
  38. struct mcheck_recoverable_range {
  39. u64 start_addr;
  40. u64 end_addr;
  41. u64 recover_addr;
  42. };
  43. static struct mcheck_recoverable_range *mc_recoverable_range;
  44. static int mc_recoverable_range_len;
  45. struct device_node *opal_node;
  46. static DEFINE_SPINLOCK(opal_write_lock);
  47. static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX];
  48. static uint32_t opal_heartbeat;
  49. static void opal_reinit_cores(void)
  50. {
  51. /* Do the actual re-init, This will clobber all FPRs, VRs, etc...
  52. *
  53. * It will preserve non volatile GPRs and HSPRG0/1. It will
  54. * also restore HIDs and other SPRs to their original value
  55. * but it might clobber a bunch.
  56. */
  57. #ifdef __BIG_ENDIAN__
  58. opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_BE);
  59. #else
  60. opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_LE);
  61. #endif
  62. }
  63. int __init early_init_dt_scan_opal(unsigned long node,
  64. const char *uname, int depth, void *data)
  65. {
  66. const void *basep, *entryp, *sizep;
  67. int basesz, entrysz, runtimesz;
  68. if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
  69. return 0;
  70. basep = of_get_flat_dt_prop(node, "opal-base-address", &basesz);
  71. entryp = of_get_flat_dt_prop(node, "opal-entry-address", &entrysz);
  72. sizep = of_get_flat_dt_prop(node, "opal-runtime-size", &runtimesz);
  73. if (!basep || !entryp || !sizep)
  74. return 1;
  75. opal.base = of_read_number(basep, basesz/4);
  76. opal.entry = of_read_number(entryp, entrysz/4);
  77. opal.size = of_read_number(sizep, runtimesz/4);
  78. pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%d)\n",
  79. opal.base, basep, basesz);
  80. pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%d)\n",
  81. opal.entry, entryp, entrysz);
  82. pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%d)\n",
  83. opal.size, sizep, runtimesz);
  84. powerpc_firmware_features |= FW_FEATURE_OPAL;
  85. if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
  86. powerpc_firmware_features |= FW_FEATURE_OPALv2;
  87. powerpc_firmware_features |= FW_FEATURE_OPALv3;
  88. pr_info("OPAL V3 detected !\n");
  89. } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
  90. powerpc_firmware_features |= FW_FEATURE_OPALv2;
  91. pr_info("OPAL V2 detected !\n");
  92. } else {
  93. pr_info("OPAL V1 detected !\n");
  94. }
  95. /* Reinit all cores with the right endian */
  96. opal_reinit_cores();
  97. /* Restore some bits */
  98. if (cur_cpu_spec->cpu_restore)
  99. cur_cpu_spec->cpu_restore();
  100. return 1;
  101. }
  102. int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
  103. const char *uname, int depth, void *data)
  104. {
  105. int i, psize, size;
  106. const __be32 *prop;
  107. if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
  108. return 0;
  109. prop = of_get_flat_dt_prop(node, "mcheck-recoverable-ranges", &psize);
  110. if (!prop)
  111. return 1;
  112. pr_debug("Found machine check recoverable ranges.\n");
  113. /*
  114. * Calculate number of available entries.
  115. *
  116. * Each recoverable address range entry is (start address, len,
  117. * recovery address), 2 cells each for start and recovery address,
  118. * 1 cell for len, totalling 5 cells per entry.
  119. */
  120. mc_recoverable_range_len = psize / (sizeof(*prop) * 5);
  121. /* Sanity check */
  122. if (!mc_recoverable_range_len)
  123. return 1;
  124. /* Size required to hold all the entries. */
  125. size = mc_recoverable_range_len *
  126. sizeof(struct mcheck_recoverable_range);
  127. /*
  128. * Allocate a buffer to hold the MC recoverable ranges. We would be
  129. * accessing them in real mode, hence it needs to be within
  130. * RMO region.
  131. */
  132. mc_recoverable_range =__va(memblock_alloc_base(size, __alignof__(u64),
  133. ppc64_rma_size));
  134. memset(mc_recoverable_range, 0, size);
  135. for (i = 0; i < mc_recoverable_range_len; i++) {
  136. mc_recoverable_range[i].start_addr =
  137. of_read_number(prop + (i * 5) + 0, 2);
  138. mc_recoverable_range[i].end_addr =
  139. mc_recoverable_range[i].start_addr +
  140. of_read_number(prop + (i * 5) + 2, 1);
  141. mc_recoverable_range[i].recover_addr =
  142. of_read_number(prop + (i * 5) + 3, 2);
  143. pr_debug("Machine check recoverable range: %llx..%llx: %llx\n",
  144. mc_recoverable_range[i].start_addr,
  145. mc_recoverable_range[i].end_addr,
  146. mc_recoverable_range[i].recover_addr);
  147. }
  148. return 1;
  149. }
  150. static int __init opal_register_exception_handlers(void)
  151. {
  152. #ifdef __BIG_ENDIAN__
  153. u64 glue;
  154. if (!(powerpc_firmware_features & FW_FEATURE_OPAL))
  155. return -ENODEV;
  156. /* Hookup some exception handlers except machine check. We use the
  157. * fwnmi area at 0x7000 to provide the glue space to OPAL
  158. */
  159. glue = 0x7000;
  160. /*
  161. * Check if we are running on newer firmware that exports
  162. * OPAL_HANDLE_HMI token. If yes, then don't ask OPAL to patch
  163. * the HMI interrupt and we catch it directly in Linux.
  164. *
  165. * For older firmware (i.e currently released POWER8 System Firmware
  166. * as of today <= SV810_087), we fallback to old behavior and let OPAL
  167. * patch the HMI vector and handle it inside OPAL firmware.
  168. *
  169. * For newer firmware (in development/yet to be released) we will
  170. * start catching/handling HMI directly in Linux.
  171. */
  172. if (!opal_check_token(OPAL_HANDLE_HMI)) {
  173. pr_info("Old firmware detected, OPAL handles HMIs.\n");
  174. opal_register_exception_handler(
  175. OPAL_HYPERVISOR_MAINTENANCE_HANDLER,
  176. 0, glue);
  177. glue += 128;
  178. }
  179. opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue);
  180. #endif
  181. return 0;
  182. }
  183. machine_early_initcall(powernv, opal_register_exception_handlers);
  184. /*
  185. * Opal message notifier based on message type. Allow subscribers to get
  186. * notified for specific messgae type.
  187. */
  188. int opal_message_notifier_register(enum opal_msg_type msg_type,
  189. struct notifier_block *nb)
  190. {
  191. if (!nb || msg_type >= OPAL_MSG_TYPE_MAX) {
  192. pr_warning("%s: Invalid arguments, msg_type:%d\n",
  193. __func__, msg_type);
  194. return -EINVAL;
  195. }
  196. return atomic_notifier_chain_register(
  197. &opal_msg_notifier_head[msg_type], nb);
  198. }
  199. EXPORT_SYMBOL_GPL(opal_message_notifier_register);
  200. int opal_message_notifier_unregister(enum opal_msg_type msg_type,
  201. struct notifier_block *nb)
  202. {
  203. return atomic_notifier_chain_unregister(
  204. &opal_msg_notifier_head[msg_type], nb);
  205. }
  206. EXPORT_SYMBOL_GPL(opal_message_notifier_unregister);
  207. static void opal_message_do_notify(uint32_t msg_type, void *msg)
  208. {
  209. /* notify subscribers */
  210. atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type],
  211. msg_type, msg);
  212. }
  213. static void opal_handle_message(void)
  214. {
  215. s64 ret;
  216. /*
  217. * TODO: pre-allocate a message buffer depending on opal-msg-size
  218. * value in /proc/device-tree.
  219. */
  220. static struct opal_msg msg;
  221. u32 type;
  222. ret = opal_get_msg(__pa(&msg), sizeof(msg));
  223. /* No opal message pending. */
  224. if (ret == OPAL_RESOURCE)
  225. return;
  226. /* check for errors. */
  227. if (ret) {
  228. pr_warning("%s: Failed to retrieve opal message, err=%lld\n",
  229. __func__, ret);
  230. return;
  231. }
  232. type = be32_to_cpu(msg.msg_type);
  233. /* Sanity check */
  234. if (type >= OPAL_MSG_TYPE_MAX) {
  235. pr_warning("%s: Unknown message type: %u\n", __func__, type);
  236. return;
  237. }
  238. opal_message_do_notify(type, (void *)&msg);
  239. }
  240. static irqreturn_t opal_message_notify(int irq, void *data)
  241. {
  242. opal_handle_message();
  243. return IRQ_HANDLED;
  244. }
  245. static int __init opal_message_init(void)
  246. {
  247. int ret, i, irq;
  248. for (i = 0; i < OPAL_MSG_TYPE_MAX; i++)
  249. ATOMIC_INIT_NOTIFIER_HEAD(&opal_msg_notifier_head[i]);
  250. irq = opal_event_request(ilog2(OPAL_EVENT_MSG_PENDING));
  251. if (!irq) {
  252. pr_err("%s: Can't register OPAL event irq (%d)\n",
  253. __func__, irq);
  254. return irq;
  255. }
  256. ret = request_irq(irq, opal_message_notify,
  257. IRQ_TYPE_LEVEL_HIGH, "opal-msg", NULL);
  258. if (ret) {
  259. pr_err("%s: Can't request OPAL event irq (%d)\n",
  260. __func__, ret);
  261. return ret;
  262. }
  263. return 0;
  264. }
  265. int opal_get_chars(uint32_t vtermno, char *buf, int count)
  266. {
  267. s64 rc;
  268. __be64 evt, len;
  269. if (!opal.entry)
  270. return -ENODEV;
  271. opal_poll_events(&evt);
  272. if ((be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_INPUT) == 0)
  273. return 0;
  274. len = cpu_to_be64(count);
  275. rc = opal_console_read(vtermno, &len, buf);
  276. if (rc == OPAL_SUCCESS)
  277. return be64_to_cpu(len);
  278. return 0;
  279. }
  280. int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
  281. {
  282. int written = 0;
  283. __be64 olen;
  284. s64 len, rc;
  285. unsigned long flags;
  286. __be64 evt;
  287. if (!opal.entry)
  288. return -ENODEV;
  289. /* We want put_chars to be atomic to avoid mangling of hvsi
  290. * packets. To do that, we first test for room and return
  291. * -EAGAIN if there isn't enough.
  292. *
  293. * Unfortunately, opal_console_write_buffer_space() doesn't
  294. * appear to work on opal v1, so we just assume there is
  295. * enough room and be done with it
  296. */
  297. spin_lock_irqsave(&opal_write_lock, flags);
  298. if (firmware_has_feature(FW_FEATURE_OPALv2)) {
  299. rc = opal_console_write_buffer_space(vtermno, &olen);
  300. len = be64_to_cpu(olen);
  301. if (rc || len < total_len) {
  302. spin_unlock_irqrestore(&opal_write_lock, flags);
  303. /* Closed -> drop characters */
  304. if (rc)
  305. return total_len;
  306. opal_poll_events(NULL);
  307. return -EAGAIN;
  308. }
  309. }
  310. /* We still try to handle partial completions, though they
  311. * should no longer happen.
  312. */
  313. rc = OPAL_BUSY;
  314. while(total_len > 0 && (rc == OPAL_BUSY ||
  315. rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) {
  316. olen = cpu_to_be64(total_len);
  317. rc = opal_console_write(vtermno, &olen, data);
  318. len = be64_to_cpu(olen);
  319. /* Closed or other error drop */
  320. if (rc != OPAL_SUCCESS && rc != OPAL_BUSY &&
  321. rc != OPAL_BUSY_EVENT) {
  322. written = total_len;
  323. break;
  324. }
  325. if (rc == OPAL_SUCCESS) {
  326. total_len -= len;
  327. data += len;
  328. written += len;
  329. }
  330. /* This is a bit nasty but we need that for the console to
  331. * flush when there aren't any interrupts. We will clean
  332. * things a bit later to limit that to synchronous path
  333. * such as the kernel console and xmon/udbg
  334. */
  335. do
  336. opal_poll_events(&evt);
  337. while(rc == OPAL_SUCCESS &&
  338. (be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT));
  339. }
  340. spin_unlock_irqrestore(&opal_write_lock, flags);
  341. return written;
  342. }
  343. static int opal_recover_mce(struct pt_regs *regs,
  344. struct machine_check_event *evt)
  345. {
  346. int recovered = 0;
  347. uint64_t ea = get_mce_fault_addr(evt);
  348. if (!(regs->msr & MSR_RI)) {
  349. /* If MSR_RI isn't set, we cannot recover */
  350. recovered = 0;
  351. } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
  352. /* Platform corrected itself */
  353. recovered = 1;
  354. } else if (ea && !is_kernel_addr(ea)) {
  355. /*
  356. * Faulting address is not in kernel text. We should be fine.
  357. * We need to find which process uses this address.
  358. * For now, kill the task if we have received exception when
  359. * in userspace.
  360. *
  361. * TODO: Queue up this address for hwpoisioning later.
  362. */
  363. if (user_mode(regs) && !is_global_init(current)) {
  364. _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
  365. recovered = 1;
  366. } else
  367. recovered = 0;
  368. } else if (user_mode(regs) && !is_global_init(current) &&
  369. evt->severity == MCE_SEV_ERROR_SYNC) {
  370. /*
  371. * If we have received a synchronous error when in userspace
  372. * kill the task.
  373. */
  374. _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
  375. recovered = 1;
  376. }
  377. return recovered;
  378. }
  379. int opal_machine_check(struct pt_regs *regs)
  380. {
  381. struct machine_check_event evt;
  382. if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
  383. return 0;
  384. /* Print things out */
  385. if (evt.version != MCE_V1) {
  386. pr_err("Machine Check Exception, Unknown event version %d !\n",
  387. evt.version);
  388. return 0;
  389. }
  390. machine_check_print_event_info(&evt);
  391. if (opal_recover_mce(regs, &evt))
  392. return 1;
  393. return 0;
  394. }
  395. /* Early hmi handler called in real mode. */
  396. int opal_hmi_exception_early(struct pt_regs *regs)
  397. {
  398. s64 rc;
  399. /*
  400. * call opal hmi handler. Pass paca address as token.
  401. * The return value OPAL_SUCCESS is an indication that there is
  402. * an HMI event generated waiting to pull by Linux.
  403. */
  404. rc = opal_handle_hmi();
  405. if (rc == OPAL_SUCCESS) {
  406. local_paca->hmi_event_available = 1;
  407. return 1;
  408. }
  409. return 0;
  410. }
  411. /* HMI exception handler called in virtual mode during check_irq_replay. */
  412. int opal_handle_hmi_exception(struct pt_regs *regs)
  413. {
  414. s64 rc;
  415. __be64 evt = 0;
  416. /*
  417. * Check if HMI event is available.
  418. * if Yes, then call opal_poll_events to pull opal messages and
  419. * process them.
  420. */
  421. if (!local_paca->hmi_event_available)
  422. return 0;
  423. local_paca->hmi_event_available = 0;
  424. rc = opal_poll_events(&evt);
  425. if (rc == OPAL_SUCCESS && evt)
  426. opal_handle_events(be64_to_cpu(evt));
  427. return 1;
  428. }
  429. static uint64_t find_recovery_address(uint64_t nip)
  430. {
  431. int i;
  432. for (i = 0; i < mc_recoverable_range_len; i++)
  433. if ((nip >= mc_recoverable_range[i].start_addr) &&
  434. (nip < mc_recoverable_range[i].end_addr))
  435. return mc_recoverable_range[i].recover_addr;
  436. return 0;
  437. }
  438. bool opal_mce_check_early_recovery(struct pt_regs *regs)
  439. {
  440. uint64_t recover_addr = 0;
  441. if (!opal.base || !opal.size)
  442. goto out;
  443. if ((regs->nip >= opal.base) &&
  444. (regs->nip <= (opal.base + opal.size)))
  445. recover_addr = find_recovery_address(regs->nip);
  446. /*
  447. * Setup regs->nip to rfi into fixup address.
  448. */
  449. if (recover_addr)
  450. regs->nip = recover_addr;
  451. out:
  452. return !!recover_addr;
  453. }
  454. static int opal_sysfs_init(void)
  455. {
  456. opal_kobj = kobject_create_and_add("opal", firmware_kobj);
  457. if (!opal_kobj) {
  458. pr_warn("kobject_create_and_add opal failed\n");
  459. return -ENOMEM;
  460. }
  461. return 0;
  462. }
  463. static ssize_t symbol_map_read(struct file *fp, struct kobject *kobj,
  464. struct bin_attribute *bin_attr,
  465. char *buf, loff_t off, size_t count)
  466. {
  467. return memory_read_from_buffer(buf, count, &off, bin_attr->private,
  468. bin_attr->size);
  469. }
  470. static BIN_ATTR_RO(symbol_map, 0);
  471. static void opal_export_symmap(void)
  472. {
  473. const __be64 *syms;
  474. unsigned int size;
  475. struct device_node *fw;
  476. int rc;
  477. fw = of_find_node_by_path("/ibm,opal/firmware");
  478. if (!fw)
  479. return;
  480. syms = of_get_property(fw, "symbol-map", &size);
  481. if (!syms || size != 2 * sizeof(__be64))
  482. return;
  483. /* Setup attributes */
  484. bin_attr_symbol_map.private = __va(be64_to_cpu(syms[0]));
  485. bin_attr_symbol_map.size = be64_to_cpu(syms[1]);
  486. rc = sysfs_create_bin_file(opal_kobj, &bin_attr_symbol_map);
  487. if (rc)
  488. pr_warn("Error %d creating OPAL symbols file\n", rc);
  489. }
  490. static void __init opal_dump_region_init(void)
  491. {
  492. void *addr;
  493. uint64_t size;
  494. int rc;
  495. if (!opal_check_token(OPAL_REGISTER_DUMP_REGION))
  496. return;
  497. /* Register kernel log buffer */
  498. addr = log_buf_addr_get();
  499. if (addr == NULL)
  500. return;
  501. size = log_buf_len_get();
  502. if (size == 0)
  503. return;
  504. rc = opal_register_dump_region(OPAL_DUMP_REGION_LOG_BUF,
  505. __pa(addr), size);
  506. /* Don't warn if this is just an older OPAL that doesn't
  507. * know about that call
  508. */
  509. if (rc && rc != OPAL_UNSUPPORTED)
  510. pr_warn("DUMP: Failed to register kernel log buffer. "
  511. "rc = %d\n", rc);
  512. }
  513. static void opal_pdev_init(struct device_node *opal_node,
  514. const char *compatible)
  515. {
  516. struct device_node *np;
  517. for_each_child_of_node(opal_node, np)
  518. if (of_device_is_compatible(np, compatible))
  519. of_platform_device_create(np, NULL, NULL);
  520. }
  521. static void opal_i2c_create_devs(void)
  522. {
  523. struct device_node *np;
  524. for_each_compatible_node(np, NULL, "ibm,opal-i2c")
  525. of_platform_device_create(np, NULL, NULL);
  526. }
  527. static int kopald(void *unused)
  528. {
  529. __be64 events;
  530. set_freezable();
  531. do {
  532. try_to_freeze();
  533. opal_poll_events(&events);
  534. opal_handle_events(be64_to_cpu(events));
  535. msleep_interruptible(opal_heartbeat);
  536. } while (!kthread_should_stop());
  537. return 0;
  538. }
  539. static void opal_init_heartbeat(void)
  540. {
  541. /* Old firwmware, we assume the HVC heartbeat is sufficient */
  542. if (of_property_read_u32(opal_node, "ibm,heartbeat-ms",
  543. &opal_heartbeat) != 0)
  544. opal_heartbeat = 0;
  545. if (opal_heartbeat)
  546. kthread_run(kopald, NULL, "kopald");
  547. }
  548. static int __init opal_init(void)
  549. {
  550. struct device_node *np, *consoles;
  551. int rc;
  552. opal_node = of_find_node_by_path("/ibm,opal");
  553. if (!opal_node) {
  554. pr_warn("Device node not found\n");
  555. return -ENODEV;
  556. }
  557. /* Register OPAL consoles if any ports */
  558. if (firmware_has_feature(FW_FEATURE_OPALv2))
  559. consoles = of_find_node_by_path("/ibm,opal/consoles");
  560. else
  561. consoles = of_node_get(opal_node);
  562. if (consoles) {
  563. for_each_child_of_node(consoles, np) {
  564. if (strcmp(np->name, "serial"))
  565. continue;
  566. of_platform_device_create(np, NULL, NULL);
  567. }
  568. of_node_put(consoles);
  569. }
  570. /* Initialise OPAL messaging system */
  571. opal_message_init();
  572. /* Initialise OPAL asynchronous completion interface */
  573. opal_async_comp_init();
  574. /* Initialise OPAL sensor interface */
  575. opal_sensor_init();
  576. /* Initialise OPAL hypervisor maintainence interrupt handling */
  577. opal_hmi_handler_init();
  578. /* Create i2c platform devices */
  579. opal_i2c_create_devs();
  580. /* Setup a heatbeat thread if requested by OPAL */
  581. opal_init_heartbeat();
  582. /* Create "opal" kobject under /sys/firmware */
  583. rc = opal_sysfs_init();
  584. if (rc == 0) {
  585. /* Export symbol map to userspace */
  586. opal_export_symmap();
  587. /* Setup dump region interface */
  588. opal_dump_region_init();
  589. /* Setup error log interface */
  590. rc = opal_elog_init();
  591. /* Setup code update interface */
  592. opal_flash_update_init();
  593. /* Setup platform dump extract interface */
  594. opal_platform_dump_init();
  595. /* Setup system parameters interface */
  596. opal_sys_param_init();
  597. /* Setup message log interface. */
  598. opal_msglog_init();
  599. }
  600. /* Initialize platform devices: IPMI backend, PRD & flash interface */
  601. opal_pdev_init(opal_node, "ibm,opal-ipmi");
  602. opal_pdev_init(opal_node, "ibm,opal-flash");
  603. opal_pdev_init(opal_node, "ibm,opal-prd");
  604. return 0;
  605. }
  606. machine_subsys_initcall(powernv, opal_init);
  607. void opal_shutdown(void)
  608. {
  609. long rc = OPAL_BUSY;
  610. opal_event_shutdown();
  611. /*
  612. * Then sync with OPAL which ensure anything that can
  613. * potentially write to our memory has completed such
  614. * as an ongoing dump retrieval
  615. */
  616. while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
  617. rc = opal_sync_host_reboot();
  618. if (rc == OPAL_BUSY)
  619. opal_poll_events(NULL);
  620. else
  621. mdelay(10);
  622. }
  623. /* Unregister memory dump region */
  624. if (opal_check_token(OPAL_UNREGISTER_DUMP_REGION))
  625. opal_unregister_dump_region(OPAL_DUMP_REGION_LOG_BUF);
  626. }
  627. /* Export this so that test modules can use it */
  628. EXPORT_SYMBOL_GPL(opal_invalid_call);
  629. EXPORT_SYMBOL_GPL(opal_xscom_read);
  630. EXPORT_SYMBOL_GPL(opal_xscom_write);
  631. EXPORT_SYMBOL_GPL(opal_ipmi_send);
  632. EXPORT_SYMBOL_GPL(opal_ipmi_recv);
  633. EXPORT_SYMBOL_GPL(opal_flash_read);
  634. EXPORT_SYMBOL_GPL(opal_flash_write);
  635. EXPORT_SYMBOL_GPL(opal_flash_erase);
  636. EXPORT_SYMBOL_GPL(opal_prd_msg);
  637. /* Convert a region of vmalloc memory to an opal sg list */
  638. struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
  639. unsigned long vmalloc_size)
  640. {
  641. struct opal_sg_list *sg, *first = NULL;
  642. unsigned long i = 0;
  643. sg = kzalloc(PAGE_SIZE, GFP_KERNEL);
  644. if (!sg)
  645. goto nomem;
  646. first = sg;
  647. while (vmalloc_size > 0) {
  648. uint64_t data = vmalloc_to_pfn(vmalloc_addr) << PAGE_SHIFT;
  649. uint64_t length = min(vmalloc_size, PAGE_SIZE);
  650. sg->entry[i].data = cpu_to_be64(data);
  651. sg->entry[i].length = cpu_to_be64(length);
  652. i++;
  653. if (i >= SG_ENTRIES_PER_NODE) {
  654. struct opal_sg_list *next;
  655. next = kzalloc(PAGE_SIZE, GFP_KERNEL);
  656. if (!next)
  657. goto nomem;
  658. sg->length = cpu_to_be64(
  659. i * sizeof(struct opal_sg_entry) + 16);
  660. i = 0;
  661. sg->next = cpu_to_be64(__pa(next));
  662. sg = next;
  663. }
  664. vmalloc_addr += length;
  665. vmalloc_size -= length;
  666. }
  667. sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16);
  668. return first;
  669. nomem:
  670. pr_err("%s : Failed to allocate memory\n", __func__);
  671. opal_free_sg_list(first);
  672. return NULL;
  673. }
  674. void opal_free_sg_list(struct opal_sg_list *sg)
  675. {
  676. while (sg) {
  677. uint64_t next = be64_to_cpu(sg->next);
  678. kfree(sg);
  679. if (next)
  680. sg = __va(next);
  681. else
  682. sg = NULL;
  683. }
  684. }
  685. int opal_error_code(int rc)
  686. {
  687. switch (rc) {
  688. case OPAL_SUCCESS: return 0;
  689. case OPAL_PARAMETER: return -EINVAL;
  690. case OPAL_ASYNC_COMPLETION: return -EINPROGRESS;
  691. case OPAL_BUSY_EVENT: return -EBUSY;
  692. case OPAL_NO_MEM: return -ENOMEM;
  693. case OPAL_PERMISSION: return -EPERM;
  694. case OPAL_UNSUPPORTED: return -EIO;
  695. case OPAL_HARDWARE: return -EIO;
  696. case OPAL_INTERNAL_ERROR: return -EIO;
  697. default:
  698. pr_err("%s: unexpected OPAL error %d\n", __func__, rc);
  699. return -EIO;
  700. }
  701. }
  702. EXPORT_SYMBOL_GPL(opal_poll_events);
  703. EXPORT_SYMBOL_GPL(opal_rtc_read);
  704. EXPORT_SYMBOL_GPL(opal_rtc_write);
  705. EXPORT_SYMBOL_GPL(opal_tpo_read);
  706. EXPORT_SYMBOL_GPL(opal_tpo_write);
  707. EXPORT_SYMBOL_GPL(opal_i2c_request);