native.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726
  1. /*
  2. * Copyright 2016,2017 IBM Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #define pr_fmt(fmt) "xive: " fmt
  10. #include <linux/types.h>
  11. #include <linux/irq.h>
  12. #include <linux/debugfs.h>
  13. #include <linux/smp.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/init.h>
  17. #include <linux/of.h>
  18. #include <linux/slab.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/delay.h>
  21. #include <linux/cpumask.h>
  22. #include <linux/mm.h>
  23. #include <asm/prom.h>
  24. #include <asm/io.h>
  25. #include <asm/smp.h>
  26. #include <asm/irq.h>
  27. #include <asm/errno.h>
  28. #include <asm/xive.h>
  29. #include <asm/xive-regs.h>
  30. #include <asm/opal.h>
  31. #include <asm/kvm_ppc.h>
  32. #include "xive-internal.h"
  33. static u32 xive_provision_size;
  34. static u32 *xive_provision_chips;
  35. static u32 xive_provision_chip_count;
  36. static u32 xive_queue_shift;
  37. static u32 xive_pool_vps = XIVE_INVALID_VP;
  38. static struct kmem_cache *xive_provision_cache;
  39. static bool xive_has_single_esc;
  40. int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
  41. {
  42. __be64 flags, eoi_page, trig_page;
  43. __be32 esb_shift, src_chip;
  44. u64 opal_flags;
  45. s64 rc;
  46. memset(data, 0, sizeof(*data));
  47. rc = opal_xive_get_irq_info(hw_irq, &flags, &eoi_page, &trig_page,
  48. &esb_shift, &src_chip);
  49. if (rc) {
  50. pr_err("opal_xive_get_irq_info(0x%x) returned %lld\n",
  51. hw_irq, rc);
  52. return -EINVAL;
  53. }
  54. opal_flags = be64_to_cpu(flags);
  55. if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI)
  56. data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
  57. if (opal_flags & OPAL_XIVE_IRQ_LSI)
  58. data->flags |= XIVE_IRQ_FLAG_LSI;
  59. if (opal_flags & OPAL_XIVE_IRQ_SHIFT_BUG)
  60. data->flags |= XIVE_IRQ_FLAG_SHIFT_BUG;
  61. if (opal_flags & OPAL_XIVE_IRQ_MASK_VIA_FW)
  62. data->flags |= XIVE_IRQ_FLAG_MASK_FW;
  63. if (opal_flags & OPAL_XIVE_IRQ_EOI_VIA_FW)
  64. data->flags |= XIVE_IRQ_FLAG_EOI_FW;
  65. data->eoi_page = be64_to_cpu(eoi_page);
  66. data->trig_page = be64_to_cpu(trig_page);
  67. data->esb_shift = be32_to_cpu(esb_shift);
  68. data->src_chip = be32_to_cpu(src_chip);
  69. data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
  70. if (!data->eoi_mmio) {
  71. pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
  72. return -ENOMEM;
  73. }
  74. data->hw_irq = hw_irq;
  75. if (!data->trig_page)
  76. return 0;
  77. if (data->trig_page == data->eoi_page) {
  78. data->trig_mmio = data->eoi_mmio;
  79. return 0;
  80. }
  81. data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
  82. if (!data->trig_mmio) {
  83. pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
  84. return -ENOMEM;
  85. }
  86. return 0;
  87. }
  88. EXPORT_SYMBOL_GPL(xive_native_populate_irq_data);
  89. int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
  90. {
  91. s64 rc;
  92. for (;;) {
  93. rc = opal_xive_set_irq_config(hw_irq, target, prio, sw_irq);
  94. if (rc != OPAL_BUSY)
  95. break;
  96. msleep(1);
  97. }
  98. return rc == 0 ? 0 : -ENXIO;
  99. }
  100. EXPORT_SYMBOL_GPL(xive_native_configure_irq);
  101. /* This can be called multiple time to change a queue configuration */
  102. int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
  103. __be32 *qpage, u32 order, bool can_escalate)
  104. {
  105. s64 rc = 0;
  106. __be64 qeoi_page_be;
  107. __be32 esc_irq_be;
  108. u64 flags, qpage_phys;
  109. /* If there's an actual queue page, clean it */
  110. if (order) {
  111. if (WARN_ON(!qpage))
  112. return -EINVAL;
  113. qpage_phys = __pa(qpage);
  114. } else
  115. qpage_phys = 0;
  116. /* Initialize the rest of the fields */
  117. q->msk = order ? ((1u << (order - 2)) - 1) : 0;
  118. q->idx = 0;
  119. q->toggle = 0;
  120. rc = opal_xive_get_queue_info(vp_id, prio, NULL, NULL,
  121. &qeoi_page_be,
  122. &esc_irq_be,
  123. NULL);
  124. if (rc) {
  125. pr_err("Error %lld getting queue info prio %d\n", rc, prio);
  126. rc = -EIO;
  127. goto fail;
  128. }
  129. q->eoi_phys = be64_to_cpu(qeoi_page_be);
  130. /* Default flags */
  131. flags = OPAL_XIVE_EQ_ALWAYS_NOTIFY | OPAL_XIVE_EQ_ENABLED;
  132. /* Escalation needed ? */
  133. if (can_escalate) {
  134. q->esc_irq = be32_to_cpu(esc_irq_be);
  135. flags |= OPAL_XIVE_EQ_ESCALATE;
  136. }
  137. /* Configure and enable the queue in HW */
  138. for (;;) {
  139. rc = opal_xive_set_queue_info(vp_id, prio, qpage_phys, order, flags);
  140. if (rc != OPAL_BUSY)
  141. break;
  142. msleep(1);
  143. }
  144. if (rc) {
  145. pr_err("Error %lld setting queue for prio %d\n", rc, prio);
  146. rc = -EIO;
  147. } else {
  148. /*
  149. * KVM code requires all of the above to be visible before
  150. * q->qpage is set due to how it manages IPI EOIs
  151. */
  152. wmb();
  153. q->qpage = qpage;
  154. }
  155. fail:
  156. return rc;
  157. }
  158. EXPORT_SYMBOL_GPL(xive_native_configure_queue);
  159. static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
  160. {
  161. s64 rc;
  162. /* Disable the queue in HW */
  163. for (;;) {
  164. rc = opal_xive_set_queue_info(vp_id, prio, 0, 0, 0);
  165. if (rc != OPAL_BUSY)
  166. break;
  167. msleep(1);
  168. }
  169. if (rc)
  170. pr_err("Error %lld disabling queue for prio %d\n", rc, prio);
  171. }
  172. void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
  173. {
  174. __xive_native_disable_queue(vp_id, q, prio);
  175. }
  176. EXPORT_SYMBOL_GPL(xive_native_disable_queue);
  177. static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
  178. {
  179. struct xive_q *q = &xc->queue[prio];
  180. __be32 *qpage;
  181. qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
  182. if (IS_ERR(qpage))
  183. return PTR_ERR(qpage);
  184. return xive_native_configure_queue(get_hard_smp_processor_id(cpu),
  185. q, prio, qpage, xive_queue_shift, false);
  186. }
  187. static void xive_native_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
  188. {
  189. struct xive_q *q = &xc->queue[prio];
  190. unsigned int alloc_order;
  191. /*
  192. * We use the variant with no iounmap as this is called on exec
  193. * from an IPI and iounmap isn't safe
  194. */
  195. __xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio);
  196. alloc_order = xive_alloc_order(xive_queue_shift);
  197. free_pages((unsigned long)q->qpage, alloc_order);
  198. q->qpage = NULL;
  199. }
  200. static bool xive_native_match(struct device_node *node)
  201. {
  202. return of_device_is_compatible(node, "ibm,opal-xive-vc");
  203. }
  204. #ifdef CONFIG_SMP
  205. static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
  206. {
  207. struct device_node *np;
  208. unsigned int chip_id;
  209. s64 irq;
  210. /* Find the chip ID */
  211. np = of_get_cpu_node(cpu, NULL);
  212. if (np) {
  213. if (of_property_read_u32(np, "ibm,chip-id", &chip_id) < 0)
  214. chip_id = 0;
  215. }
  216. /* Allocate an IPI and populate info about it */
  217. for (;;) {
  218. irq = opal_xive_allocate_irq(chip_id);
  219. if (irq == OPAL_BUSY) {
  220. msleep(1);
  221. continue;
  222. }
  223. if (irq < 0) {
  224. pr_err("Failed to allocate IPI on CPU %d\n", cpu);
  225. return -ENXIO;
  226. }
  227. xc->hw_ipi = irq;
  228. break;
  229. }
  230. return 0;
  231. }
  232. #endif /* CONFIG_SMP */
  233. u32 xive_native_alloc_irq(void)
  234. {
  235. s64 rc;
  236. for (;;) {
  237. rc = opal_xive_allocate_irq(OPAL_XIVE_ANY_CHIP);
  238. if (rc != OPAL_BUSY)
  239. break;
  240. msleep(1);
  241. }
  242. if (rc < 0)
  243. return 0;
  244. return rc;
  245. }
  246. EXPORT_SYMBOL_GPL(xive_native_alloc_irq);
  247. void xive_native_free_irq(u32 irq)
  248. {
  249. for (;;) {
  250. s64 rc = opal_xive_free_irq(irq);
  251. if (rc != OPAL_BUSY)
  252. break;
  253. msleep(1);
  254. }
  255. }
  256. EXPORT_SYMBOL_GPL(xive_native_free_irq);
  257. #ifdef CONFIG_SMP
  258. static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
  259. {
  260. s64 rc;
  261. /* Free the IPI */
  262. if (!xc->hw_ipi)
  263. return;
  264. for (;;) {
  265. rc = opal_xive_free_irq(xc->hw_ipi);
  266. if (rc == OPAL_BUSY) {
  267. msleep(1);
  268. continue;
  269. }
  270. xc->hw_ipi = 0;
  271. break;
  272. }
  273. }
  274. #endif /* CONFIG_SMP */
  275. static void xive_native_shutdown(void)
  276. {
  277. /* Switch the XIVE to emulation mode */
  278. opal_xive_reset(OPAL_XIVE_MODE_EMU);
  279. }
  280. /*
  281. * Perform an "ack" cycle on the current thread, thus
  282. * grabbing the pending active priorities and updating
  283. * the CPPR to the most favored one.
  284. */
  285. static void xive_native_update_pending(struct xive_cpu *xc)
  286. {
  287. u8 he, cppr;
  288. u16 ack;
  289. /* Perform the acknowledge hypervisor to register cycle */
  290. ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_HV_REG));
  291. /* Synchronize subsequent queue accesses */
  292. mb();
  293. /*
  294. * Grab the CPPR and the "HE" field which indicates the source
  295. * of the hypervisor interrupt (if any)
  296. */
  297. cppr = ack & 0xff;
  298. he = GETFIELD(TM_QW3_NSR_HE, (ack >> 8));
  299. switch(he) {
  300. case TM_QW3_NSR_HE_NONE: /* Nothing to see here */
  301. break;
  302. case TM_QW3_NSR_HE_PHYS: /* Physical thread interrupt */
  303. if (cppr == 0xff)
  304. return;
  305. /* Mark the priority pending */
  306. xc->pending_prio |= 1 << cppr;
  307. /*
  308. * A new interrupt should never have a CPPR less favored
  309. * than our current one.
  310. */
  311. if (cppr >= xc->cppr)
  312. pr_err("CPU %d odd ack CPPR, got %d at %d\n",
  313. smp_processor_id(), cppr, xc->cppr);
  314. /* Update our idea of what the CPPR is */
  315. xc->cppr = cppr;
  316. break;
  317. case TM_QW3_NSR_HE_POOL: /* HV Pool interrupt (unused) */
  318. case TM_QW3_NSR_HE_LSI: /* Legacy FW LSI (unused) */
  319. pr_err("CPU %d got unexpected interrupt type HE=%d\n",
  320. smp_processor_id(), he);
  321. return;
  322. }
  323. }
  324. static void xive_native_eoi(u32 hw_irq)
  325. {
  326. /*
  327. * Not normally used except if specific interrupts need
  328. * a workaround on EOI.
  329. */
  330. opal_int_eoi(hw_irq);
  331. }
  332. static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
  333. {
  334. s64 rc;
  335. u32 vp;
  336. __be64 vp_cam_be;
  337. u64 vp_cam;
  338. if (xive_pool_vps == XIVE_INVALID_VP)
  339. return;
  340. /* Enable the pool VP */
  341. vp = xive_pool_vps + cpu;
  342. pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp);
  343. for (;;) {
  344. rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0);
  345. if (rc != OPAL_BUSY)
  346. break;
  347. msleep(1);
  348. }
  349. if (rc) {
  350. pr_err("Failed to enable pool VP on CPU %d\n", cpu);
  351. return;
  352. }
  353. /* Grab it's CAM value */
  354. rc = opal_xive_get_vp_info(vp, NULL, &vp_cam_be, NULL, NULL);
  355. if (rc) {
  356. pr_err("Failed to get pool VP info CPU %d\n", cpu);
  357. return;
  358. }
  359. vp_cam = be64_to_cpu(vp_cam_be);
  360. pr_debug("VP CAM = %llx\n", vp_cam);
  361. /* Push it on the CPU (set LSMFB to 0xff to skip backlog scan) */
  362. pr_debug("(Old HW value: %08x)\n",
  363. in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2));
  364. out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD0, 0xff);
  365. out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2,
  366. TM_QW2W2_VP | vp_cam);
  367. pr_debug("(New HW value: %08x)\n",
  368. in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2));
  369. }
  370. static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
  371. {
  372. s64 rc;
  373. u32 vp;
  374. if (xive_pool_vps == XIVE_INVALID_VP)
  375. return;
  376. /* Pull the pool VP from the CPU */
  377. in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
  378. /* Disable it */
  379. vp = xive_pool_vps + cpu;
  380. for (;;) {
  381. rc = opal_xive_set_vp_info(vp, 0, 0);
  382. if (rc != OPAL_BUSY)
  383. break;
  384. msleep(1);
  385. }
  386. }
  387. void xive_native_sync_source(u32 hw_irq)
  388. {
  389. opal_xive_sync(XIVE_SYNC_EAS, hw_irq);
  390. }
  391. EXPORT_SYMBOL_GPL(xive_native_sync_source);
  392. static const struct xive_ops xive_native_ops = {
  393. .populate_irq_data = xive_native_populate_irq_data,
  394. .configure_irq = xive_native_configure_irq,
  395. .setup_queue = xive_native_setup_queue,
  396. .cleanup_queue = xive_native_cleanup_queue,
  397. .match = xive_native_match,
  398. .shutdown = xive_native_shutdown,
  399. .update_pending = xive_native_update_pending,
  400. .eoi = xive_native_eoi,
  401. .setup_cpu = xive_native_setup_cpu,
  402. .teardown_cpu = xive_native_teardown_cpu,
  403. .sync_source = xive_native_sync_source,
  404. #ifdef CONFIG_SMP
  405. .get_ipi = xive_native_get_ipi,
  406. .put_ipi = xive_native_put_ipi,
  407. #endif /* CONFIG_SMP */
  408. .name = "native",
  409. };
  410. static bool xive_parse_provisioning(struct device_node *np)
  411. {
  412. int rc;
  413. if (of_property_read_u32(np, "ibm,xive-provision-page-size",
  414. &xive_provision_size) < 0)
  415. return true;
  416. rc = of_property_count_elems_of_size(np, "ibm,xive-provision-chips", 4);
  417. if (rc < 0) {
  418. pr_err("Error %d getting provision chips array\n", rc);
  419. return false;
  420. }
  421. xive_provision_chip_count = rc;
  422. if (rc == 0)
  423. return true;
  424. xive_provision_chips = kzalloc(4 * xive_provision_chip_count,
  425. GFP_KERNEL);
  426. if (WARN_ON(!xive_provision_chips))
  427. return false;
  428. rc = of_property_read_u32_array(np, "ibm,xive-provision-chips",
  429. xive_provision_chips,
  430. xive_provision_chip_count);
  431. if (rc < 0) {
  432. pr_err("Error %d reading provision chips array\n", rc);
  433. return false;
  434. }
  435. xive_provision_cache = kmem_cache_create("xive-provision",
  436. xive_provision_size,
  437. xive_provision_size,
  438. 0, NULL);
  439. if (!xive_provision_cache) {
  440. pr_err("Failed to allocate provision cache\n");
  441. return false;
  442. }
  443. return true;
  444. }
  445. static void xive_native_setup_pools(void)
  446. {
  447. /* Allocate a pool big enough */
  448. pr_debug("XIVE: Allocating VP block for pool size %u\n", nr_cpu_ids);
  449. xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids);
  450. if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP))
  451. pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n");
  452. pr_debug("XIVE: Pool VPs allocated at 0x%x for %u max CPUs\n",
  453. xive_pool_vps, nr_cpu_ids);
  454. }
  455. u32 xive_native_default_eq_shift(void)
  456. {
  457. return xive_queue_shift;
  458. }
  459. EXPORT_SYMBOL_GPL(xive_native_default_eq_shift);
  460. bool __init xive_native_init(void)
  461. {
  462. struct device_node *np;
  463. struct resource r;
  464. void __iomem *tima;
  465. struct property *prop;
  466. u8 max_prio = 7;
  467. const __be32 *p;
  468. u32 val, cpu;
  469. s64 rc;
  470. if (xive_cmdline_disabled)
  471. return false;
  472. pr_devel("xive_native_init()\n");
  473. np = of_find_compatible_node(NULL, NULL, "ibm,opal-xive-pe");
  474. if (!np) {
  475. pr_devel("not found !\n");
  476. return false;
  477. }
  478. pr_devel("Found %pOF\n", np);
  479. /* Resource 1 is HV window */
  480. if (of_address_to_resource(np, 1, &r)) {
  481. pr_err("Failed to get thread mgmnt area resource\n");
  482. return false;
  483. }
  484. tima = ioremap(r.start, resource_size(&r));
  485. if (!tima) {
  486. pr_err("Failed to map thread mgmnt area\n");
  487. return false;
  488. }
  489. /* Read number of priorities */
  490. if (of_property_read_u32(np, "ibm,xive-#priorities", &val) == 0)
  491. max_prio = val - 1;
  492. /* Iterate the EQ sizes and pick one */
  493. of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, p, val) {
  494. xive_queue_shift = val;
  495. if (val == PAGE_SHIFT)
  496. break;
  497. }
  498. /* Do we support single escalation */
  499. if (of_get_property(np, "single-escalation-support", NULL) != NULL)
  500. xive_has_single_esc = true;
  501. /* Configure Thread Management areas for KVM */
  502. for_each_possible_cpu(cpu)
  503. kvmppc_set_xive_tima(cpu, r.start, tima);
  504. /* Grab size of provisionning pages */
  505. xive_parse_provisioning(np);
  506. /* Switch the XIVE to exploitation mode */
  507. rc = opal_xive_reset(OPAL_XIVE_MODE_EXPL);
  508. if (rc) {
  509. pr_err("Switch to exploitation mode failed with error %lld\n", rc);
  510. return false;
  511. }
  512. /* Setup some dummy HV pool VPs */
  513. xive_native_setup_pools();
  514. /* Initialize XIVE core with our backend */
  515. if (!xive_core_init(&xive_native_ops, tima, TM_QW3_HV_PHYS,
  516. max_prio)) {
  517. opal_xive_reset(OPAL_XIVE_MODE_EMU);
  518. return false;
  519. }
  520. pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
  521. return true;
  522. }
  523. static bool xive_native_provision_pages(void)
  524. {
  525. u32 i;
  526. void *p;
  527. for (i = 0; i < xive_provision_chip_count; i++) {
  528. u32 chip = xive_provision_chips[i];
  529. /*
  530. * XXX TODO: Try to make the allocation local to the node where
  531. * the chip resides.
  532. */
  533. p = kmem_cache_alloc(xive_provision_cache, GFP_KERNEL);
  534. if (!p) {
  535. pr_err("Failed to allocate provisioning page\n");
  536. return false;
  537. }
  538. opal_xive_donate_page(chip, __pa(p));
  539. }
  540. return true;
  541. }
  542. u32 xive_native_alloc_vp_block(u32 max_vcpus)
  543. {
  544. s64 rc;
  545. u32 order;
  546. order = fls(max_vcpus) - 1;
  547. if (max_vcpus > (1 << order))
  548. order++;
  549. pr_debug("VP block alloc, for max VCPUs %d use order %d\n",
  550. max_vcpus, order);
  551. for (;;) {
  552. rc = opal_xive_alloc_vp_block(order);
  553. switch (rc) {
  554. case OPAL_BUSY:
  555. msleep(1);
  556. break;
  557. case OPAL_XIVE_PROVISIONING:
  558. if (!xive_native_provision_pages())
  559. return XIVE_INVALID_VP;
  560. break;
  561. default:
  562. if (rc < 0) {
  563. pr_err("OPAL failed to allocate VCPUs order %d, err %lld\n",
  564. order, rc);
  565. return XIVE_INVALID_VP;
  566. }
  567. return rc;
  568. }
  569. }
  570. }
  571. EXPORT_SYMBOL_GPL(xive_native_alloc_vp_block);
  572. void xive_native_free_vp_block(u32 vp_base)
  573. {
  574. s64 rc;
  575. if (vp_base == XIVE_INVALID_VP)
  576. return;
  577. rc = opal_xive_free_vp_block(vp_base);
  578. if (rc < 0)
  579. pr_warn("OPAL error %lld freeing VP block\n", rc);
  580. }
  581. EXPORT_SYMBOL_GPL(xive_native_free_vp_block);
  582. int xive_native_enable_vp(u32 vp_id, bool single_escalation)
  583. {
  584. s64 rc;
  585. u64 flags = OPAL_XIVE_VP_ENABLED;
  586. if (single_escalation)
  587. flags |= OPAL_XIVE_VP_SINGLE_ESCALATION;
  588. for (;;) {
  589. rc = opal_xive_set_vp_info(vp_id, flags, 0);
  590. if (rc != OPAL_BUSY)
  591. break;
  592. msleep(1);
  593. }
  594. return rc ? -EIO : 0;
  595. }
  596. EXPORT_SYMBOL_GPL(xive_native_enable_vp);
  597. int xive_native_disable_vp(u32 vp_id)
  598. {
  599. s64 rc;
  600. for (;;) {
  601. rc = opal_xive_set_vp_info(vp_id, 0, 0);
  602. if (rc != OPAL_BUSY)
  603. break;
  604. msleep(1);
  605. }
  606. return rc ? -EIO : 0;
  607. }
  608. EXPORT_SYMBOL_GPL(xive_native_disable_vp);
  609. int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id)
  610. {
  611. __be64 vp_cam_be;
  612. __be32 vp_chip_id_be;
  613. s64 rc;
  614. rc = opal_xive_get_vp_info(vp_id, NULL, &vp_cam_be, NULL, &vp_chip_id_be);
  615. if (rc)
  616. return -EIO;
  617. *out_cam_id = be64_to_cpu(vp_cam_be) & 0xffffffffu;
  618. *out_chip_id = be32_to_cpu(vp_chip_id_be);
  619. return 0;
  620. }
  621. EXPORT_SYMBOL_GPL(xive_native_get_vp_info);
  622. bool xive_native_has_single_escalation(void)
  623. {
  624. return xive_has_single_esc;
  625. }
  626. EXPORT_SYMBOL_GPL(xive_native_has_single_escalation);