native.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713
  1. /*
  2. * Copyright 2016,2017 IBM Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #define pr_fmt(fmt) "xive: " fmt
  10. #include <linux/types.h>
  11. #include <linux/irq.h>
  12. #include <linux/debugfs.h>
  13. #include <linux/smp.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/init.h>
  17. #include <linux/of.h>
  18. #include <linux/slab.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/delay.h>
  21. #include <linux/cpumask.h>
  22. #include <linux/mm.h>
  23. #include <asm/prom.h>
  24. #include <asm/io.h>
  25. #include <asm/smp.h>
  26. #include <asm/irq.h>
  27. #include <asm/errno.h>
  28. #include <asm/xive.h>
  29. #include <asm/xive-regs.h>
  30. #include <asm/opal.h>
  31. #include <asm/kvm_ppc.h>
  32. #include "xive-internal.h"
  33. static u32 xive_provision_size;
  34. static u32 *xive_provision_chips;
  35. static u32 xive_provision_chip_count;
  36. static u32 xive_queue_shift;
  37. static u32 xive_pool_vps = XIVE_INVALID_VP;
  38. static struct kmem_cache *xive_provision_cache;
  39. static bool xive_has_single_esc;
  40. int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
  41. {
  42. __be64 flags, eoi_page, trig_page;
  43. __be32 esb_shift, src_chip;
  44. u64 opal_flags;
  45. s64 rc;
  46. memset(data, 0, sizeof(*data));
  47. rc = opal_xive_get_irq_info(hw_irq, &flags, &eoi_page, &trig_page,
  48. &esb_shift, &src_chip);
  49. if (rc) {
  50. pr_err("opal_xive_get_irq_info(0x%x) returned %lld\n",
  51. hw_irq, rc);
  52. return -EINVAL;
  53. }
  54. opal_flags = be64_to_cpu(flags);
  55. if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI)
  56. data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
  57. if (opal_flags & OPAL_XIVE_IRQ_LSI)
  58. data->flags |= XIVE_IRQ_FLAG_LSI;
  59. if (opal_flags & OPAL_XIVE_IRQ_SHIFT_BUG)
  60. data->flags |= XIVE_IRQ_FLAG_SHIFT_BUG;
  61. if (opal_flags & OPAL_XIVE_IRQ_MASK_VIA_FW)
  62. data->flags |= XIVE_IRQ_FLAG_MASK_FW;
  63. if (opal_flags & OPAL_XIVE_IRQ_EOI_VIA_FW)
  64. data->flags |= XIVE_IRQ_FLAG_EOI_FW;
  65. data->eoi_page = be64_to_cpu(eoi_page);
  66. data->trig_page = be64_to_cpu(trig_page);
  67. data->esb_shift = be32_to_cpu(esb_shift);
  68. data->src_chip = be32_to_cpu(src_chip);
  69. data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
  70. if (!data->eoi_mmio) {
  71. pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
  72. return -ENOMEM;
  73. }
  74. data->hw_irq = hw_irq;
  75. if (!data->trig_page)
  76. return 0;
  77. if (data->trig_page == data->eoi_page) {
  78. data->trig_mmio = data->eoi_mmio;
  79. return 0;
  80. }
  81. data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
  82. if (!data->trig_mmio) {
  83. pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
  84. return -ENOMEM;
  85. }
  86. return 0;
  87. }
  88. EXPORT_SYMBOL_GPL(xive_native_populate_irq_data);
  89. int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
  90. {
  91. s64 rc;
  92. for (;;) {
  93. rc = opal_xive_set_irq_config(hw_irq, target, prio, sw_irq);
  94. if (rc != OPAL_BUSY)
  95. break;
  96. msleep(OPAL_BUSY_DELAY_MS);
  97. }
  98. return rc == 0 ? 0 : -ENXIO;
  99. }
  100. EXPORT_SYMBOL_GPL(xive_native_configure_irq);
  101. /* This can be called multiple time to change a queue configuration */
  102. int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
  103. __be32 *qpage, u32 order, bool can_escalate)
  104. {
  105. s64 rc = 0;
  106. __be64 qeoi_page_be;
  107. __be32 esc_irq_be;
  108. u64 flags, qpage_phys;
  109. /* If there's an actual queue page, clean it */
  110. if (order) {
  111. if (WARN_ON(!qpage))
  112. return -EINVAL;
  113. qpage_phys = __pa(qpage);
  114. } else
  115. qpage_phys = 0;
  116. /* Initialize the rest of the fields */
  117. q->msk = order ? ((1u << (order - 2)) - 1) : 0;
  118. q->idx = 0;
  119. q->toggle = 0;
  120. rc = opal_xive_get_queue_info(vp_id, prio, NULL, NULL,
  121. &qeoi_page_be,
  122. &esc_irq_be,
  123. NULL);
  124. if (rc) {
  125. pr_err("Error %lld getting queue info prio %d\n", rc, prio);
  126. rc = -EIO;
  127. goto fail;
  128. }
  129. q->eoi_phys = be64_to_cpu(qeoi_page_be);
  130. /* Default flags */
  131. flags = OPAL_XIVE_EQ_ALWAYS_NOTIFY | OPAL_XIVE_EQ_ENABLED;
  132. /* Escalation needed ? */
  133. if (can_escalate) {
  134. q->esc_irq = be32_to_cpu(esc_irq_be);
  135. flags |= OPAL_XIVE_EQ_ESCALATE;
  136. }
  137. /* Configure and enable the queue in HW */
  138. for (;;) {
  139. rc = opal_xive_set_queue_info(vp_id, prio, qpage_phys, order, flags);
  140. if (rc != OPAL_BUSY)
  141. break;
  142. msleep(OPAL_BUSY_DELAY_MS);
  143. }
  144. if (rc) {
  145. pr_err("Error %lld setting queue for prio %d\n", rc, prio);
  146. rc = -EIO;
  147. } else {
  148. /*
  149. * KVM code requires all of the above to be visible before
  150. * q->qpage is set due to how it manages IPI EOIs
  151. */
  152. wmb();
  153. q->qpage = qpage;
  154. }
  155. fail:
  156. return rc;
  157. }
  158. EXPORT_SYMBOL_GPL(xive_native_configure_queue);
  159. static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
  160. {
  161. s64 rc;
  162. /* Disable the queue in HW */
  163. for (;;) {
  164. rc = opal_xive_set_queue_info(vp_id, prio, 0, 0, 0);
  165. if (rc != OPAL_BUSY)
  166. break;
  167. msleep(OPAL_BUSY_DELAY_MS);
  168. }
  169. if (rc)
  170. pr_err("Error %lld disabling queue for prio %d\n", rc, prio);
  171. }
  172. void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
  173. {
  174. __xive_native_disable_queue(vp_id, q, prio);
  175. }
  176. EXPORT_SYMBOL_GPL(xive_native_disable_queue);
  177. static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
  178. {
  179. struct xive_q *q = &xc->queue[prio];
  180. __be32 *qpage;
  181. qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
  182. if (IS_ERR(qpage))
  183. return PTR_ERR(qpage);
  184. return xive_native_configure_queue(get_hard_smp_processor_id(cpu),
  185. q, prio, qpage, xive_queue_shift, false);
  186. }
  187. static void xive_native_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
  188. {
  189. struct xive_q *q = &xc->queue[prio];
  190. unsigned int alloc_order;
  191. /*
  192. * We use the variant with no iounmap as this is called on exec
  193. * from an IPI and iounmap isn't safe
  194. */
  195. __xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio);
  196. alloc_order = xive_alloc_order(xive_queue_shift);
  197. free_pages((unsigned long)q->qpage, alloc_order);
  198. q->qpage = NULL;
  199. }
  200. static bool xive_native_match(struct device_node *node)
  201. {
  202. return of_device_is_compatible(node, "ibm,opal-xive-vc");
  203. }
  204. #ifdef CONFIG_SMP
  205. static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
  206. {
  207. s64 irq;
  208. /* Allocate an IPI and populate info about it */
  209. for (;;) {
  210. irq = opal_xive_allocate_irq(xc->chip_id);
  211. if (irq == OPAL_BUSY) {
  212. msleep(OPAL_BUSY_DELAY_MS);
  213. continue;
  214. }
  215. if (irq < 0) {
  216. pr_err("Failed to allocate IPI on CPU %d\n", cpu);
  217. return -ENXIO;
  218. }
  219. xc->hw_ipi = irq;
  220. break;
  221. }
  222. return 0;
  223. }
  224. #endif /* CONFIG_SMP */
  225. u32 xive_native_alloc_irq(void)
  226. {
  227. s64 rc;
  228. for (;;) {
  229. rc = opal_xive_allocate_irq(OPAL_XIVE_ANY_CHIP);
  230. if (rc != OPAL_BUSY)
  231. break;
  232. msleep(OPAL_BUSY_DELAY_MS);
  233. }
  234. if (rc < 0)
  235. return 0;
  236. return rc;
  237. }
  238. EXPORT_SYMBOL_GPL(xive_native_alloc_irq);
  239. void xive_native_free_irq(u32 irq)
  240. {
  241. for (;;) {
  242. s64 rc = opal_xive_free_irq(irq);
  243. if (rc != OPAL_BUSY)
  244. break;
  245. msleep(OPAL_BUSY_DELAY_MS);
  246. }
  247. }
  248. EXPORT_SYMBOL_GPL(xive_native_free_irq);
  249. #ifdef CONFIG_SMP
  250. static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
  251. {
  252. s64 rc;
  253. /* Free the IPI */
  254. if (!xc->hw_ipi)
  255. return;
  256. for (;;) {
  257. rc = opal_xive_free_irq(xc->hw_ipi);
  258. if (rc == OPAL_BUSY) {
  259. msleep(OPAL_BUSY_DELAY_MS);
  260. continue;
  261. }
  262. xc->hw_ipi = 0;
  263. break;
  264. }
  265. }
  266. #endif /* CONFIG_SMP */
  267. static void xive_native_shutdown(void)
  268. {
  269. /* Switch the XIVE to emulation mode */
  270. opal_xive_reset(OPAL_XIVE_MODE_EMU);
  271. }
  272. /*
  273. * Perform an "ack" cycle on the current thread, thus
  274. * grabbing the pending active priorities and updating
  275. * the CPPR to the most favored one.
  276. */
  277. static void xive_native_update_pending(struct xive_cpu *xc)
  278. {
  279. u8 he, cppr;
  280. u16 ack;
  281. /* Perform the acknowledge hypervisor to register cycle */
  282. ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_HV_REG));
  283. /* Synchronize subsequent queue accesses */
  284. mb();
  285. /*
  286. * Grab the CPPR and the "HE" field which indicates the source
  287. * of the hypervisor interrupt (if any)
  288. */
  289. cppr = ack & 0xff;
  290. he = (ack >> 8) >> 6;
  291. switch(he) {
  292. case TM_QW3_NSR_HE_NONE: /* Nothing to see here */
  293. break;
  294. case TM_QW3_NSR_HE_PHYS: /* Physical thread interrupt */
  295. if (cppr == 0xff)
  296. return;
  297. /* Mark the priority pending */
  298. xc->pending_prio |= 1 << cppr;
  299. /*
  300. * A new interrupt should never have a CPPR less favored
  301. * than our current one.
  302. */
  303. if (cppr >= xc->cppr)
  304. pr_err("CPU %d odd ack CPPR, got %d at %d\n",
  305. smp_processor_id(), cppr, xc->cppr);
  306. /* Update our idea of what the CPPR is */
  307. xc->cppr = cppr;
  308. break;
  309. case TM_QW3_NSR_HE_POOL: /* HV Pool interrupt (unused) */
  310. case TM_QW3_NSR_HE_LSI: /* Legacy FW LSI (unused) */
  311. pr_err("CPU %d got unexpected interrupt type HE=%d\n",
  312. smp_processor_id(), he);
  313. return;
  314. }
  315. }
  316. static void xive_native_eoi(u32 hw_irq)
  317. {
  318. /*
  319. * Not normally used except if specific interrupts need
  320. * a workaround on EOI.
  321. */
  322. opal_int_eoi(hw_irq);
  323. }
  324. static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
  325. {
  326. s64 rc;
  327. u32 vp;
  328. __be64 vp_cam_be;
  329. u64 vp_cam;
  330. if (xive_pool_vps == XIVE_INVALID_VP)
  331. return;
  332. /* Check if pool VP already active, if it is, pull it */
  333. if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP)
  334. in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
  335. /* Enable the pool VP */
  336. vp = xive_pool_vps + cpu;
  337. for (;;) {
  338. rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0);
  339. if (rc != OPAL_BUSY)
  340. break;
  341. msleep(OPAL_BUSY_DELAY_MS);
  342. }
  343. if (rc) {
  344. pr_err("Failed to enable pool VP on CPU %d\n", cpu);
  345. return;
  346. }
  347. /* Grab it's CAM value */
  348. rc = opal_xive_get_vp_info(vp, NULL, &vp_cam_be, NULL, NULL);
  349. if (rc) {
  350. pr_err("Failed to get pool VP info CPU %d\n", cpu);
  351. return;
  352. }
  353. vp_cam = be64_to_cpu(vp_cam_be);
  354. /* Push it on the CPU (set LSMFB to 0xff to skip backlog scan) */
  355. out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD0, 0xff);
  356. out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2, TM_QW2W2_VP | vp_cam);
  357. }
  358. static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
  359. {
  360. s64 rc;
  361. u32 vp;
  362. if (xive_pool_vps == XIVE_INVALID_VP)
  363. return;
  364. /* Pull the pool VP from the CPU */
  365. in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
  366. /* Disable it */
  367. vp = xive_pool_vps + cpu;
  368. for (;;) {
  369. rc = opal_xive_set_vp_info(vp, 0, 0);
  370. if (rc != OPAL_BUSY)
  371. break;
  372. msleep(OPAL_BUSY_DELAY_MS);
  373. }
  374. }
  375. void xive_native_sync_source(u32 hw_irq)
  376. {
  377. opal_xive_sync(XIVE_SYNC_EAS, hw_irq);
  378. }
  379. EXPORT_SYMBOL_GPL(xive_native_sync_source);
  380. static const struct xive_ops xive_native_ops = {
  381. .populate_irq_data = xive_native_populate_irq_data,
  382. .configure_irq = xive_native_configure_irq,
  383. .setup_queue = xive_native_setup_queue,
  384. .cleanup_queue = xive_native_cleanup_queue,
  385. .match = xive_native_match,
  386. .shutdown = xive_native_shutdown,
  387. .update_pending = xive_native_update_pending,
  388. .eoi = xive_native_eoi,
  389. .setup_cpu = xive_native_setup_cpu,
  390. .teardown_cpu = xive_native_teardown_cpu,
  391. .sync_source = xive_native_sync_source,
  392. #ifdef CONFIG_SMP
  393. .get_ipi = xive_native_get_ipi,
  394. .put_ipi = xive_native_put_ipi,
  395. #endif /* CONFIG_SMP */
  396. .name = "native",
  397. };
  398. static bool xive_parse_provisioning(struct device_node *np)
  399. {
  400. int rc;
  401. if (of_property_read_u32(np, "ibm,xive-provision-page-size",
  402. &xive_provision_size) < 0)
  403. return true;
  404. rc = of_property_count_elems_of_size(np, "ibm,xive-provision-chips", 4);
  405. if (rc < 0) {
  406. pr_err("Error %d getting provision chips array\n", rc);
  407. return false;
  408. }
  409. xive_provision_chip_count = rc;
  410. if (rc == 0)
  411. return true;
  412. xive_provision_chips = kcalloc(4, xive_provision_chip_count,
  413. GFP_KERNEL);
  414. if (WARN_ON(!xive_provision_chips))
  415. return false;
  416. rc = of_property_read_u32_array(np, "ibm,xive-provision-chips",
  417. xive_provision_chips,
  418. xive_provision_chip_count);
  419. if (rc < 0) {
  420. pr_err("Error %d reading provision chips array\n", rc);
  421. return false;
  422. }
  423. xive_provision_cache = kmem_cache_create("xive-provision",
  424. xive_provision_size,
  425. xive_provision_size,
  426. 0, NULL);
  427. if (!xive_provision_cache) {
  428. pr_err("Failed to allocate provision cache\n");
  429. return false;
  430. }
  431. return true;
  432. }
  433. static void xive_native_setup_pools(void)
  434. {
  435. /* Allocate a pool big enough */
  436. pr_debug("XIVE: Allocating VP block for pool size %u\n", nr_cpu_ids);
  437. xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids);
  438. if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP))
  439. pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n");
  440. pr_debug("XIVE: Pool VPs allocated at 0x%x for %u max CPUs\n",
  441. xive_pool_vps, nr_cpu_ids);
  442. }
  443. u32 xive_native_default_eq_shift(void)
  444. {
  445. return xive_queue_shift;
  446. }
  447. EXPORT_SYMBOL_GPL(xive_native_default_eq_shift);
  448. bool __init xive_native_init(void)
  449. {
  450. struct device_node *np;
  451. struct resource r;
  452. void __iomem *tima;
  453. struct property *prop;
  454. u8 max_prio = 7;
  455. const __be32 *p;
  456. u32 val, cpu;
  457. s64 rc;
  458. if (xive_cmdline_disabled)
  459. return false;
  460. pr_devel("xive_native_init()\n");
  461. np = of_find_compatible_node(NULL, NULL, "ibm,opal-xive-pe");
  462. if (!np) {
  463. pr_devel("not found !\n");
  464. return false;
  465. }
  466. pr_devel("Found %pOF\n", np);
  467. /* Resource 1 is HV window */
  468. if (of_address_to_resource(np, 1, &r)) {
  469. pr_err("Failed to get thread mgmnt area resource\n");
  470. return false;
  471. }
  472. tima = ioremap(r.start, resource_size(&r));
  473. if (!tima) {
  474. pr_err("Failed to map thread mgmnt area\n");
  475. return false;
  476. }
  477. /* Read number of priorities */
  478. if (of_property_read_u32(np, "ibm,xive-#priorities", &val) == 0)
  479. max_prio = val - 1;
  480. /* Iterate the EQ sizes and pick one */
  481. of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, p, val) {
  482. xive_queue_shift = val;
  483. if (val == PAGE_SHIFT)
  484. break;
  485. }
  486. /* Do we support single escalation */
  487. if (of_get_property(np, "single-escalation-support", NULL) != NULL)
  488. xive_has_single_esc = true;
  489. /* Configure Thread Management areas for KVM */
  490. for_each_possible_cpu(cpu)
  491. kvmppc_set_xive_tima(cpu, r.start, tima);
  492. /* Grab size of provisionning pages */
  493. xive_parse_provisioning(np);
  494. /* Switch the XIVE to exploitation mode */
  495. rc = opal_xive_reset(OPAL_XIVE_MODE_EXPL);
  496. if (rc) {
  497. pr_err("Switch to exploitation mode failed with error %lld\n", rc);
  498. return false;
  499. }
  500. /* Setup some dummy HV pool VPs */
  501. xive_native_setup_pools();
  502. /* Initialize XIVE core with our backend */
  503. if (!xive_core_init(&xive_native_ops, tima, TM_QW3_HV_PHYS,
  504. max_prio)) {
  505. opal_xive_reset(OPAL_XIVE_MODE_EMU);
  506. return false;
  507. }
  508. pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
  509. return true;
  510. }
  511. static bool xive_native_provision_pages(void)
  512. {
  513. u32 i;
  514. void *p;
  515. for (i = 0; i < xive_provision_chip_count; i++) {
  516. u32 chip = xive_provision_chips[i];
  517. /*
  518. * XXX TODO: Try to make the allocation local to the node where
  519. * the chip resides.
  520. */
  521. p = kmem_cache_alloc(xive_provision_cache, GFP_KERNEL);
  522. if (!p) {
  523. pr_err("Failed to allocate provisioning page\n");
  524. return false;
  525. }
  526. opal_xive_donate_page(chip, __pa(p));
  527. }
  528. return true;
  529. }
  530. u32 xive_native_alloc_vp_block(u32 max_vcpus)
  531. {
  532. s64 rc;
  533. u32 order;
  534. order = fls(max_vcpus) - 1;
  535. if (max_vcpus > (1 << order))
  536. order++;
  537. pr_debug("VP block alloc, for max VCPUs %d use order %d\n",
  538. max_vcpus, order);
  539. for (;;) {
  540. rc = opal_xive_alloc_vp_block(order);
  541. switch (rc) {
  542. case OPAL_BUSY:
  543. msleep(OPAL_BUSY_DELAY_MS);
  544. break;
  545. case OPAL_XIVE_PROVISIONING:
  546. if (!xive_native_provision_pages())
  547. return XIVE_INVALID_VP;
  548. break;
  549. default:
  550. if (rc < 0) {
  551. pr_err("OPAL failed to allocate VCPUs order %d, err %lld\n",
  552. order, rc);
  553. return XIVE_INVALID_VP;
  554. }
  555. return rc;
  556. }
  557. }
  558. }
  559. EXPORT_SYMBOL_GPL(xive_native_alloc_vp_block);
  560. void xive_native_free_vp_block(u32 vp_base)
  561. {
  562. s64 rc;
  563. if (vp_base == XIVE_INVALID_VP)
  564. return;
  565. rc = opal_xive_free_vp_block(vp_base);
  566. if (rc < 0)
  567. pr_warn("OPAL error %lld freeing VP block\n", rc);
  568. }
  569. EXPORT_SYMBOL_GPL(xive_native_free_vp_block);
  570. int xive_native_enable_vp(u32 vp_id, bool single_escalation)
  571. {
  572. s64 rc;
  573. u64 flags = OPAL_XIVE_VP_ENABLED;
  574. if (single_escalation)
  575. flags |= OPAL_XIVE_VP_SINGLE_ESCALATION;
  576. for (;;) {
  577. rc = opal_xive_set_vp_info(vp_id, flags, 0);
  578. if (rc != OPAL_BUSY)
  579. break;
  580. msleep(OPAL_BUSY_DELAY_MS);
  581. }
  582. return rc ? -EIO : 0;
  583. }
  584. EXPORT_SYMBOL_GPL(xive_native_enable_vp);
  585. int xive_native_disable_vp(u32 vp_id)
  586. {
  587. s64 rc;
  588. for (;;) {
  589. rc = opal_xive_set_vp_info(vp_id, 0, 0);
  590. if (rc != OPAL_BUSY)
  591. break;
  592. msleep(OPAL_BUSY_DELAY_MS);
  593. }
  594. return rc ? -EIO : 0;
  595. }
  596. EXPORT_SYMBOL_GPL(xive_native_disable_vp);
  597. int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id)
  598. {
  599. __be64 vp_cam_be;
  600. __be32 vp_chip_id_be;
  601. s64 rc;
  602. rc = opal_xive_get_vp_info(vp_id, NULL, &vp_cam_be, NULL, &vp_chip_id_be);
  603. if (rc)
  604. return -EIO;
  605. *out_cam_id = be64_to_cpu(vp_cam_be) & 0xffffffffu;
  606. *out_chip_id = be32_to_cpu(vp_chip_id_be);
  607. return 0;
  608. }
  609. EXPORT_SYMBOL_GPL(xive_native_get_vp_info);
  610. bool xive_native_has_single_escalation(void)
  611. {
  612. return xive_has_single_esc;
  613. }
  614. EXPORT_SYMBOL_GPL(xive_native_has_single_escalation);