book3s_xive_template.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660
  1. /*
  2. * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License, version 2, as
  6. * published by the Free Software Foundation.
  7. */
  8. /* File to be included by other .c files */
  9. #define XGLUE(a,b) a##b
  10. #define GLUE(a,b) XGLUE(a,b)
  11. /* Dummy interrupt used when taking interrupts out of a queue in H_CPPR */
  12. #define XICS_DUMMY 1
  13. static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
  14. {
  15. u8 cppr;
  16. u16 ack;
  17. /*
  18. * Ensure any previous store to CPPR is ordered vs.
  19. * the subsequent loads from PIPR or ACK.
  20. */
  21. eieio();
  22. /*
  23. * DD1 bug workaround: If PIPR is less favored than CPPR
  24. * ignore the interrupt or we might incorrectly lose an IPB
  25. * bit.
  26. */
  27. if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
  28. __be64 qw1 = __x_readq(__x_tima + TM_QW1_OS);
  29. u8 pipr = be64_to_cpu(qw1) & 0xff;
  30. if (pipr >= xc->hw_cppr)
  31. return;
  32. }
  33. /* Perform the acknowledge OS to register cycle. */
  34. ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
  35. /* Synchronize subsequent queue accesses */
  36. mb();
  37. /* XXX Check grouping level */
  38. /* Anything ? */
  39. if (!((ack >> 8) & TM_QW1_NSR_EO))
  40. return;
  41. /* Grab CPPR of the most favored pending interrupt */
  42. cppr = ack & 0xff;
  43. if (cppr < 8)
  44. xc->pending |= 1 << cppr;
  45. #ifdef XIVE_RUNTIME_CHECKS
  46. /* Check consistency */
  47. if (cppr >= xc->hw_cppr)
  48. pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n",
  49. smp_processor_id(), cppr, xc->hw_cppr);
  50. #endif
  51. /*
  52. * Update our image of the HW CPPR. We don't yet modify
  53. * xc->cppr, this will be done as we scan for interrupts
  54. * in the queues.
  55. */
  56. xc->hw_cppr = cppr;
  57. }
  58. static u8 GLUE(X_PFX,esb_load)(struct xive_irq_data *xd, u32 offset)
  59. {
  60. u64 val;
  61. if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
  62. offset |= offset << 4;
  63. val =__x_readq(__x_eoi_page(xd) + offset);
  64. #ifdef __LITTLE_ENDIAN__
  65. val >>= 64-8;
  66. #endif
  67. return (u8)val;
  68. }
  69. static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
  70. {
  71. /* If the XIVE supports the new "store EOI facility, use it */
  72. if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
  73. __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
  74. else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
  75. opal_int_eoi(hw_irq);
  76. } else {
  77. uint64_t eoi_val;
  78. /*
  79. * Otherwise for EOI, we use the special MMIO that does
  80. * a clear of both P and Q and returns the old Q,
  81. * except for LSIs where we use the "EOI cycle" special
  82. * load.
  83. *
  84. * This allows us to then do a re-trigger if Q was set
  85. * rather than synthetizing an interrupt in software
  86. *
  87. * For LSIs, using the HW EOI cycle works around a problem
  88. * on P9 DD1 PHBs where the other ESB accesses don't work
  89. * properly.
  90. */
  91. if (xd->flags & XIVE_IRQ_FLAG_LSI)
  92. __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
  93. else {
  94. eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
  95. /* Re-trigger if needed */
  96. if ((eoi_val & 1) && __x_trig_page(xd))
  97. __x_writeq(0, __x_trig_page(xd));
  98. }
  99. }
  100. }
  101. enum {
  102. scan_fetch,
  103. scan_poll,
  104. scan_eoi,
  105. };
  106. static u32 GLUE(X_PFX,scan_interrupts)(struct kvmppc_xive_vcpu *xc,
  107. u8 pending, int scan_type)
  108. {
  109. u32 hirq = 0;
  110. u8 prio = 0xff;
  111. /* Find highest pending priority */
  112. while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) {
  113. struct xive_q *q;
  114. u32 idx, toggle;
  115. __be32 *qpage;
  116. /*
  117. * If pending is 0 this will return 0xff which is what
  118. * we want
  119. */
  120. prio = ffs(pending) - 1;
  121. /*
  122. * If the most favoured prio we found pending is less
  123. * favored (or equal) than a pending IPI, we return
  124. * the IPI instead.
  125. *
  126. * Note: If pending was 0 and mfrr is 0xff, we will
  127. * not spurriously take an IPI because mfrr cannot
  128. * then be smaller than cppr.
  129. */
  130. if (prio >= xc->mfrr && xc->mfrr < xc->cppr) {
  131. prio = xc->mfrr;
  132. hirq = XICS_IPI;
  133. break;
  134. }
  135. /* Don't scan past the guest cppr */
  136. if (prio >= xc->cppr || prio > 7)
  137. break;
  138. /* Grab queue and pointers */
  139. q = &xc->queues[prio];
  140. idx = q->idx;
  141. toggle = q->toggle;
  142. /*
  143. * Snapshot the queue page. The test further down for EOI
  144. * must use the same "copy" that was used by __xive_read_eq
  145. * since qpage can be set concurrently and we don't want
  146. * to miss an EOI.
  147. */
  148. qpage = READ_ONCE(q->qpage);
  149. skip_ipi:
  150. /*
  151. * Try to fetch from the queue. Will return 0 for a
  152. * non-queueing priority (ie, qpage = 0).
  153. */
  154. hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle);
  155. /*
  156. * If this was a signal for an MFFR change done by
  157. * H_IPI we skip it. Additionally, if we were fetching
  158. * we EOI it now, thus re-enabling reception of a new
  159. * such signal.
  160. *
  161. * We also need to do that if prio is 0 and we had no
  162. * page for the queue. In this case, we have non-queued
  163. * IPI that needs to be EOId.
  164. *
  165. * This is safe because if we have another pending MFRR
  166. * change that wasn't observed above, the Q bit will have
  167. * been set and another occurrence of the IPI will trigger.
  168. */
  169. if (hirq == XICS_IPI || (prio == 0 && !qpage)) {
  170. if (scan_type == scan_fetch)
  171. GLUE(X_PFX,source_eoi)(xc->vp_ipi,
  172. &xc->vp_ipi_data);
  173. /* Loop back on same queue with updated idx/toggle */
  174. #ifdef XIVE_RUNTIME_CHECKS
  175. WARN_ON(hirq && hirq != XICS_IPI);
  176. #endif
  177. if (hirq)
  178. goto skip_ipi;
  179. }
  180. /* If it's the dummy interrupt, continue searching */
  181. if (hirq == XICS_DUMMY)
  182. goto skip_ipi;
  183. /* If fetching, update queue pointers */
  184. if (scan_type == scan_fetch) {
  185. q->idx = idx;
  186. q->toggle = toggle;
  187. }
  188. /* Something found, stop searching */
  189. if (hirq)
  190. break;
  191. /* Clear the pending bit on the now empty queue */
  192. pending &= ~(1 << prio);
  193. /*
  194. * Check if the queue count needs adjusting due to
  195. * interrupts being moved away.
  196. */
  197. if (atomic_read(&q->pending_count)) {
  198. int p = atomic_xchg(&q->pending_count, 0);
  199. if (p) {
  200. #ifdef XIVE_RUNTIME_CHECKS
  201. WARN_ON(p > atomic_read(&q->count));
  202. #endif
  203. atomic_sub(p, &q->count);
  204. }
  205. }
  206. }
  207. /* If we are just taking a "peek", do nothing else */
  208. if (scan_type == scan_poll)
  209. return hirq;
  210. /* Update the pending bits */
  211. xc->pending = pending;
  212. /*
  213. * If this is an EOI that's it, no CPPR adjustment done here,
  214. * all we needed was cleanup the stale pending bits and check
  215. * if there's anything left.
  216. */
  217. if (scan_type == scan_eoi)
  218. return hirq;
  219. /*
  220. * If we found an interrupt, adjust what the guest CPPR should
  221. * be as if we had just fetched that interrupt from HW.
  222. *
  223. * Note: This can only make xc->cppr smaller as the previous
  224. * loop will only exit with hirq != 0 if prio is lower than
  225. * the current xc->cppr. Thus we don't need to re-check xc->mfrr
  226. * for pending IPIs.
  227. */
  228. if (hirq)
  229. xc->cppr = prio;
  230. /*
  231. * If it was an IPI the HW CPPR might have been lowered too much
  232. * as the HW interrupt we use for IPIs is routed to priority 0.
  233. *
  234. * We re-sync it here.
  235. */
  236. if (xc->cppr != xc->hw_cppr) {
  237. xc->hw_cppr = xc->cppr;
  238. __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
  239. }
  240. return hirq;
  241. }
  242. X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu)
  243. {
  244. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  245. u8 old_cppr;
  246. u32 hirq;
  247. pr_devel("H_XIRR\n");
  248. xc->GLUE(X_STAT_PFX,h_xirr)++;
  249. /* First collect pending bits from HW */
  250. GLUE(X_PFX,ack_pending)(xc);
  251. /*
  252. * Cleanup the old-style bits if needed (they may have been
  253. * set by pull or an escalation interrupts).
  254. */
  255. if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions))
  256. clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
  257. &vcpu->arch.pending_exceptions);
  258. pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n",
  259. xc->pending, xc->hw_cppr, xc->cppr);
  260. /* Grab previous CPPR and reverse map it */
  261. old_cppr = xive_prio_to_guest(xc->cppr);
  262. /* Scan for actual interrupts */
  263. hirq = GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_fetch);
  264. pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n",
  265. hirq, xc->hw_cppr, xc->cppr);
  266. #ifdef XIVE_RUNTIME_CHECKS
  267. /* That should never hit */
  268. if (hirq & 0xff000000)
  269. pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq);
  270. #endif
  271. /*
  272. * XXX We could check if the interrupt is masked here and
  273. * filter it. If we chose to do so, we would need to do:
  274. *
  275. * if (masked) {
  276. * lock();
  277. * if (masked) {
  278. * old_Q = true;
  279. * hirq = 0;
  280. * }
  281. * unlock();
  282. * }
  283. */
  284. /* Return interrupt and old CPPR in GPR4 */
  285. vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24);
  286. return H_SUCCESS;
  287. }
  288. X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server)
  289. {
  290. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  291. u8 pending = xc->pending;
  292. u32 hirq;
  293. pr_devel("H_IPOLL(server=%ld)\n", server);
  294. xc->GLUE(X_STAT_PFX,h_ipoll)++;
  295. /* Grab the target VCPU if not the current one */
  296. if (xc->server_num != server) {
  297. vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
  298. if (!vcpu)
  299. return H_PARAMETER;
  300. xc = vcpu->arch.xive_vcpu;
  301. /* Scan all priorities */
  302. pending = 0xff;
  303. } else {
  304. /* Grab pending interrupt if any */
  305. __be64 qw1 = __x_readq(__x_tima + TM_QW1_OS);
  306. u8 pipr = be64_to_cpu(qw1) & 0xff;
  307. if (pipr < 8)
  308. pending |= 1 << pipr;
  309. }
  310. hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll);
  311. /* Return interrupt and old CPPR in GPR4 */
  312. vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24);
  313. return H_SUCCESS;
  314. }
  315. static void GLUE(X_PFX,push_pending_to_hw)(struct kvmppc_xive_vcpu *xc)
  316. {
  317. u8 pending, prio;
  318. pending = xc->pending;
  319. if (xc->mfrr != 0xff) {
  320. if (xc->mfrr < 8)
  321. pending |= 1 << xc->mfrr;
  322. else
  323. pending |= 0x80;
  324. }
  325. if (!pending)
  326. return;
  327. prio = ffs(pending) - 1;
  328. __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING);
  329. }
  330. static void GLUE(X_PFX,scan_for_rerouted_irqs)(struct kvmppc_xive *xive,
  331. struct kvmppc_xive_vcpu *xc)
  332. {
  333. unsigned int prio;
  334. /* For each priority that is now masked */
  335. for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
  336. struct xive_q *q = &xc->queues[prio];
  337. struct kvmppc_xive_irq_state *state;
  338. struct kvmppc_xive_src_block *sb;
  339. u32 idx, toggle, entry, irq, hw_num;
  340. struct xive_irq_data *xd;
  341. __be32 *qpage;
  342. u16 src;
  343. idx = q->idx;
  344. toggle = q->toggle;
  345. qpage = READ_ONCE(q->qpage);
  346. if (!qpage)
  347. continue;
  348. /* For each interrupt in the queue */
  349. for (;;) {
  350. entry = be32_to_cpup(qpage + idx);
  351. /* No more ? */
  352. if ((entry >> 31) == toggle)
  353. break;
  354. irq = entry & 0x7fffffff;
  355. /* Skip dummies and IPIs */
  356. if (irq == XICS_DUMMY || irq == XICS_IPI)
  357. goto next;
  358. sb = kvmppc_xive_find_source(xive, irq, &src);
  359. if (!sb)
  360. goto next;
  361. state = &sb->irq_state[src];
  362. /* Has it been rerouted ? */
  363. if (xc->server_num == state->act_server)
  364. goto next;
  365. /*
  366. * Allright, it *has* been re-routed, kill it from
  367. * the queue.
  368. */
  369. qpage[idx] = cpu_to_be32((entry & 0x80000000) | XICS_DUMMY);
  370. /* Find the HW interrupt */
  371. kvmppc_xive_select_irq(state, &hw_num, &xd);
  372. /* If it's not an LSI, set PQ to 11 the EOI will force a resend */
  373. if (!(xd->flags & XIVE_IRQ_FLAG_LSI))
  374. GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_11);
  375. /* EOI the source */
  376. GLUE(X_PFX,source_eoi)(hw_num, xd);
  377. next:
  378. idx = (idx + 1) & q->msk;
  379. if (idx == 0)
  380. toggle ^= 1;
  381. }
  382. }
  383. }
  384. X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
  385. {
  386. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  387. struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
  388. u8 old_cppr;
  389. pr_devel("H_CPPR(cppr=%ld)\n", cppr);
  390. xc->GLUE(X_STAT_PFX,h_cppr)++;
  391. /* Map CPPR */
  392. cppr = xive_prio_from_guest(cppr);
  393. /* Remember old and update SW state */
  394. old_cppr = xc->cppr;
  395. xc->cppr = cppr;
  396. /*
  397. * Order the above update of xc->cppr with the subsequent
  398. * read of xc->mfrr inside push_pending_to_hw()
  399. */
  400. smp_mb();
  401. if (cppr > old_cppr) {
  402. /*
  403. * We are masking less, we need to look for pending things
  404. * to deliver and set VP pending bits accordingly to trigger
  405. * a new interrupt otherwise we might miss MFRR changes for
  406. * which we have optimized out sending an IPI signal.
  407. */
  408. GLUE(X_PFX,push_pending_to_hw)(xc);
  409. } else {
  410. /*
  411. * We are masking more, we need to check the queue for any
  412. * interrupt that has been routed to another CPU, take
  413. * it out (replace it with the dummy) and retrigger it.
  414. *
  415. * This is necessary since those interrupts may otherwise
  416. * never be processed, at least not until this CPU restores
  417. * its CPPR.
  418. *
  419. * This is in theory racy vs. HW adding new interrupts to
  420. * the queue. In practice this works because the interesting
  421. * cases are when the guest has done a set_xive() to move the
  422. * interrupt away, which flushes the xive, followed by the
  423. * target CPU doing a H_CPPR. So any new interrupt coming into
  424. * the queue must still be routed to us and isn't a source
  425. * of concern.
  426. */
  427. GLUE(X_PFX,scan_for_rerouted_irqs)(xive, xc);
  428. }
  429. /* Apply new CPPR */
  430. xc->hw_cppr = cppr;
  431. __x_writeb(cppr, __x_tima + TM_QW1_OS + TM_CPPR);
  432. return H_SUCCESS;
  433. }
  434. X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr)
  435. {
  436. struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
  437. struct kvmppc_xive_src_block *sb;
  438. struct kvmppc_xive_irq_state *state;
  439. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  440. struct xive_irq_data *xd;
  441. u8 new_cppr = xirr >> 24;
  442. u32 irq = xirr & 0x00ffffff, hw_num;
  443. u16 src;
  444. int rc = 0;
  445. pr_devel("H_EOI(xirr=%08lx)\n", xirr);
  446. xc->GLUE(X_STAT_PFX,h_eoi)++;
  447. xc->cppr = xive_prio_from_guest(new_cppr);
  448. /*
  449. * IPIs are synthetized from MFRR and thus don't need
  450. * any special EOI handling. The underlying interrupt
  451. * used to signal MFRR changes is EOId when fetched from
  452. * the queue.
  453. */
  454. if (irq == XICS_IPI || irq == 0) {
  455. /*
  456. * This barrier orders the setting of xc->cppr vs.
  457. * subsquent test of xc->mfrr done inside
  458. * scan_interrupts and push_pending_to_hw
  459. */
  460. smp_mb();
  461. goto bail;
  462. }
  463. /* Find interrupt source */
  464. sb = kvmppc_xive_find_source(xive, irq, &src);
  465. if (!sb) {
  466. pr_devel(" source not found !\n");
  467. rc = H_PARAMETER;
  468. /* Same as above */
  469. smp_mb();
  470. goto bail;
  471. }
  472. state = &sb->irq_state[src];
  473. kvmppc_xive_select_irq(state, &hw_num, &xd);
  474. state->in_eoi = true;
  475. /*
  476. * This barrier orders both setting of in_eoi above vs,
  477. * subsequent test of guest_priority, and the setting
  478. * of xc->cppr vs. subsquent test of xc->mfrr done inside
  479. * scan_interrupts and push_pending_to_hw
  480. */
  481. smp_mb();
  482. again:
  483. if (state->guest_priority == MASKED) {
  484. arch_spin_lock(&sb->lock);
  485. if (state->guest_priority != MASKED) {
  486. arch_spin_unlock(&sb->lock);
  487. goto again;
  488. }
  489. pr_devel(" EOI on saved P...\n");
  490. /* Clear old_p, that will cause unmask to perform an EOI */
  491. state->old_p = false;
  492. arch_spin_unlock(&sb->lock);
  493. } else {
  494. pr_devel(" EOI on source...\n");
  495. /* Perform EOI on the source */
  496. GLUE(X_PFX,source_eoi)(hw_num, xd);
  497. /* If it's an emulated LSI, check level and resend */
  498. if (state->lsi && state->asserted)
  499. __x_writeq(0, __x_trig_page(xd));
  500. }
  501. /*
  502. * This barrier orders the above guest_priority check
  503. * and spin_lock/unlock with clearing in_eoi below.
  504. *
  505. * It also has to be a full mb() as it must ensure
  506. * the MMIOs done in source_eoi() are completed before
  507. * state->in_eoi is visible.
  508. */
  509. mb();
  510. state->in_eoi = false;
  511. bail:
  512. /* Re-evaluate pending IRQs and update HW */
  513. GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_eoi);
  514. GLUE(X_PFX,push_pending_to_hw)(xc);
  515. pr_devel(" after scan pending=%02x\n", xc->pending);
  516. /* Apply new CPPR */
  517. xc->hw_cppr = xc->cppr;
  518. __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
  519. return rc;
  520. }
  521. X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
  522. unsigned long mfrr)
  523. {
  524. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  525. pr_devel("H_IPI(server=%08lx,mfrr=%ld)\n", server, mfrr);
  526. xc->GLUE(X_STAT_PFX,h_ipi)++;
  527. /* Find target */
  528. vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
  529. if (!vcpu)
  530. return H_PARAMETER;
  531. xc = vcpu->arch.xive_vcpu;
  532. /* Locklessly write over MFRR */
  533. xc->mfrr = mfrr;
  534. /*
  535. * The load of xc->cppr below and the subsequent MMIO store
  536. * to the IPI must happen after the above mfrr update is
  537. * globally visible so that:
  538. *
  539. * - Synchronize with another CPU doing an H_EOI or a H_CPPR
  540. * updating xc->cppr then reading xc->mfrr.
  541. *
  542. * - The target of the IPI sees the xc->mfrr update
  543. */
  544. mb();
  545. /* Shoot the IPI if most favored than target cppr */
  546. if (mfrr < xc->cppr)
  547. __x_writeq(0, __x_trig_page(&xc->vp_ipi_data));
  548. return H_SUCCESS;
  549. }