book3s_xics.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340
  1. /*
  2. * Copyright 2012 Michael Ellerman, IBM Corporation.
  3. * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License, version 2, as
  7. * published by the Free Software Foundation.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/kvm_host.h>
  11. #include <linux/err.h>
  12. #include <linux/gfp.h>
  13. #include <linux/anon_inodes.h>
  14. #include <asm/uaccess.h>
  15. #include <asm/kvm_book3s.h>
  16. #include <asm/kvm_ppc.h>
  17. #include <asm/hvcall.h>
  18. #include <asm/xics.h>
  19. #include <asm/debug.h>
  20. #include <asm/time.h>
  21. #include <linux/debugfs.h>
  22. #include <linux/seq_file.h>
  23. #include "book3s_xics.h"
  24. #if 1
  25. #define XICS_DBG(fmt...) do { } while (0)
  26. #else
  27. #define XICS_DBG(fmt...) trace_printk(fmt)
  28. #endif
  29. #define ENABLE_REALMODE true
  30. #define DEBUG_REALMODE false
  31. /*
  32. * LOCKING
  33. * =======
  34. *
  35. * Each ICS has a mutex protecting the information about the IRQ
  36. * sources and avoiding simultaneous deliveries if the same interrupt.
  37. *
  38. * ICP operations are done via a single compare & swap transaction
  39. * (most ICP state fits in the union kvmppc_icp_state)
  40. */
  41. /*
  42. * TODO
  43. * ====
  44. *
  45. * - To speed up resends, keep a bitmap of "resend" set bits in the
  46. * ICS
  47. *
  48. * - Speed up server# -> ICP lookup (array ? hash table ?)
  49. *
  50. * - Make ICS lockless as well, or at least a per-interrupt lock or hashed
  51. * locks array to improve scalability
  52. */
  53. /* -- ICS routines -- */
  54. static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
  55. u32 new_irq);
  56. /*
  57. * Return value ideally indicates how the interrupt was handled, but no
  58. * callers look at it (given that we don't implement KVM_IRQ_LINE_STATUS),
  59. * so just return 0.
  60. */
  61. static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level)
  62. {
  63. struct ics_irq_state *state;
  64. struct kvmppc_ics *ics;
  65. u16 src;
  66. XICS_DBG("ics deliver %#x (level: %d)\n", irq, level);
  67. ics = kvmppc_xics_find_ics(xics, irq, &src);
  68. if (!ics) {
  69. XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq);
  70. return -EINVAL;
  71. }
  72. state = &ics->irq_state[src];
  73. if (!state->exists)
  74. return -EINVAL;
  75. /*
  76. * We set state->asserted locklessly. This should be fine as
  77. * we are the only setter, thus concurrent access is undefined
  78. * to begin with.
  79. */
  80. if (level == 1 || level == KVM_INTERRUPT_SET_LEVEL)
  81. state->asserted = 1;
  82. else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
  83. state->asserted = 0;
  84. return 0;
  85. }
  86. /* Attempt delivery */
  87. icp_deliver_irq(xics, NULL, irq);
  88. return 0;
  89. }
  90. static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
  91. struct kvmppc_icp *icp)
  92. {
  93. int i;
  94. mutex_lock(&ics->lock);
  95. for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
  96. struct ics_irq_state *state = &ics->irq_state[i];
  97. if (!state->resend)
  98. continue;
  99. XICS_DBG("resend %#x prio %#x\n", state->number,
  100. state->priority);
  101. mutex_unlock(&ics->lock);
  102. icp_deliver_irq(xics, icp, state->number);
  103. mutex_lock(&ics->lock);
  104. }
  105. mutex_unlock(&ics->lock);
  106. }
  107. static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
  108. struct ics_irq_state *state,
  109. u32 server, u32 priority, u32 saved_priority)
  110. {
  111. bool deliver;
  112. mutex_lock(&ics->lock);
  113. state->server = server;
  114. state->priority = priority;
  115. state->saved_priority = saved_priority;
  116. deliver = false;
  117. if ((state->masked_pending || state->resend) && priority != MASKED) {
  118. state->masked_pending = 0;
  119. deliver = true;
  120. }
  121. mutex_unlock(&ics->lock);
  122. return deliver;
  123. }
  124. int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority)
  125. {
  126. struct kvmppc_xics *xics = kvm->arch.xics;
  127. struct kvmppc_icp *icp;
  128. struct kvmppc_ics *ics;
  129. struct ics_irq_state *state;
  130. u16 src;
  131. if (!xics)
  132. return -ENODEV;
  133. ics = kvmppc_xics_find_ics(xics, irq, &src);
  134. if (!ics)
  135. return -EINVAL;
  136. state = &ics->irq_state[src];
  137. icp = kvmppc_xics_find_server(kvm, server);
  138. if (!icp)
  139. return -EINVAL;
  140. XICS_DBG("set_xive %#x server %#x prio %#x MP:%d RS:%d\n",
  141. irq, server, priority,
  142. state->masked_pending, state->resend);
  143. if (write_xive(xics, ics, state, server, priority, priority))
  144. icp_deliver_irq(xics, icp, irq);
  145. return 0;
  146. }
  147. int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority)
  148. {
  149. struct kvmppc_xics *xics = kvm->arch.xics;
  150. struct kvmppc_ics *ics;
  151. struct ics_irq_state *state;
  152. u16 src;
  153. if (!xics)
  154. return -ENODEV;
  155. ics = kvmppc_xics_find_ics(xics, irq, &src);
  156. if (!ics)
  157. return -EINVAL;
  158. state = &ics->irq_state[src];
  159. mutex_lock(&ics->lock);
  160. *server = state->server;
  161. *priority = state->priority;
  162. mutex_unlock(&ics->lock);
  163. return 0;
  164. }
  165. int kvmppc_xics_int_on(struct kvm *kvm, u32 irq)
  166. {
  167. struct kvmppc_xics *xics = kvm->arch.xics;
  168. struct kvmppc_icp *icp;
  169. struct kvmppc_ics *ics;
  170. struct ics_irq_state *state;
  171. u16 src;
  172. if (!xics)
  173. return -ENODEV;
  174. ics = kvmppc_xics_find_ics(xics, irq, &src);
  175. if (!ics)
  176. return -EINVAL;
  177. state = &ics->irq_state[src];
  178. icp = kvmppc_xics_find_server(kvm, state->server);
  179. if (!icp)
  180. return -EINVAL;
  181. if (write_xive(xics, ics, state, state->server, state->saved_priority,
  182. state->saved_priority))
  183. icp_deliver_irq(xics, icp, irq);
  184. return 0;
  185. }
  186. int kvmppc_xics_int_off(struct kvm *kvm, u32 irq)
  187. {
  188. struct kvmppc_xics *xics = kvm->arch.xics;
  189. struct kvmppc_ics *ics;
  190. struct ics_irq_state *state;
  191. u16 src;
  192. if (!xics)
  193. return -ENODEV;
  194. ics = kvmppc_xics_find_ics(xics, irq, &src);
  195. if (!ics)
  196. return -EINVAL;
  197. state = &ics->irq_state[src];
  198. write_xive(xics, ics, state, state->server, MASKED, state->priority);
  199. return 0;
  200. }
  201. /* -- ICP routines, including hcalls -- */
  202. static inline bool icp_try_update(struct kvmppc_icp *icp,
  203. union kvmppc_icp_state old,
  204. union kvmppc_icp_state new,
  205. bool change_self)
  206. {
  207. bool success;
  208. /* Calculate new output value */
  209. new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
  210. /* Attempt atomic update */
  211. success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
  212. if (!success)
  213. goto bail;
  214. XICS_DBG("UPD [%04x] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
  215. icp->server_num,
  216. old.cppr, old.mfrr, old.pending_pri, old.xisr,
  217. old.need_resend, old.out_ee);
  218. XICS_DBG("UPD - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
  219. new.cppr, new.mfrr, new.pending_pri, new.xisr,
  220. new.need_resend, new.out_ee);
  221. /*
  222. * Check for output state update
  223. *
  224. * Note that this is racy since another processor could be updating
  225. * the state already. This is why we never clear the interrupt output
  226. * here, we only ever set it. The clear only happens prior to doing
  227. * an update and only by the processor itself. Currently we do it
  228. * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
  229. *
  230. * We also do not try to figure out whether the EE state has changed,
  231. * we unconditionally set it if the new state calls for it. The reason
  232. * for that is that we opportunistically remove the pending interrupt
  233. * flag when raising CPPR, so we need to set it back here if an
  234. * interrupt is still pending.
  235. */
  236. if (new.out_ee) {
  237. kvmppc_book3s_queue_irqprio(icp->vcpu,
  238. BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
  239. if (!change_self)
  240. kvmppc_fast_vcpu_kick(icp->vcpu);
  241. }
  242. bail:
  243. return success;
  244. }
  245. static void icp_check_resend(struct kvmppc_xics *xics,
  246. struct kvmppc_icp *icp)
  247. {
  248. u32 icsid;
  249. /* Order this load with the test for need_resend in the caller */
  250. smp_rmb();
  251. for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
  252. struct kvmppc_ics *ics = xics->ics[icsid];
  253. if (!test_and_clear_bit(icsid, icp->resend_map))
  254. continue;
  255. if (!ics)
  256. continue;
  257. ics_check_resend(xics, ics, icp);
  258. }
  259. }
  260. static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
  261. u32 *reject)
  262. {
  263. union kvmppc_icp_state old_state, new_state;
  264. bool success;
  265. XICS_DBG("try deliver %#x(P:%#x) to server %#x\n", irq, priority,
  266. icp->server_num);
  267. do {
  268. old_state = new_state = ACCESS_ONCE(icp->state);
  269. *reject = 0;
  270. /* See if we can deliver */
  271. success = new_state.cppr > priority &&
  272. new_state.mfrr > priority &&
  273. new_state.pending_pri > priority;
  274. /*
  275. * If we can, check for a rejection and perform the
  276. * delivery
  277. */
  278. if (success) {
  279. *reject = new_state.xisr;
  280. new_state.xisr = irq;
  281. new_state.pending_pri = priority;
  282. } else {
  283. /*
  284. * If we failed to deliver we set need_resend
  285. * so a subsequent CPPR state change causes us
  286. * to try a new delivery.
  287. */
  288. new_state.need_resend = true;
  289. }
  290. } while (!icp_try_update(icp, old_state, new_state, false));
  291. return success;
  292. }
  293. static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
  294. u32 new_irq)
  295. {
  296. struct ics_irq_state *state;
  297. struct kvmppc_ics *ics;
  298. u32 reject;
  299. u16 src;
  300. /*
  301. * This is used both for initial delivery of an interrupt and
  302. * for subsequent rejection.
  303. *
  304. * Rejection can be racy vs. resends. We have evaluated the
  305. * rejection in an atomic ICP transaction which is now complete,
  306. * so potentially the ICP can already accept the interrupt again.
  307. *
  308. * So we need to retry the delivery. Essentially the reject path
  309. * boils down to a failed delivery. Always.
  310. *
  311. * Now the interrupt could also have moved to a different target,
  312. * thus we may need to re-do the ICP lookup as well
  313. */
  314. again:
  315. /* Get the ICS state and lock it */
  316. ics = kvmppc_xics_find_ics(xics, new_irq, &src);
  317. if (!ics) {
  318. XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq);
  319. return;
  320. }
  321. state = &ics->irq_state[src];
  322. /* Get a lock on the ICS */
  323. mutex_lock(&ics->lock);
  324. /* Get our server */
  325. if (!icp || state->server != icp->server_num) {
  326. icp = kvmppc_xics_find_server(xics->kvm, state->server);
  327. if (!icp) {
  328. pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n",
  329. new_irq, state->server);
  330. goto out;
  331. }
  332. }
  333. /* Clear the resend bit of that interrupt */
  334. state->resend = 0;
  335. /*
  336. * If masked, bail out
  337. *
  338. * Note: PAPR doesn't mention anything about masked pending
  339. * when doing a resend, only when doing a delivery.
  340. *
  341. * However that would have the effect of losing a masked
  342. * interrupt that was rejected and isn't consistent with
  343. * the whole masked_pending business which is about not
  344. * losing interrupts that occur while masked.
  345. *
  346. * I don't differenciate normal deliveries and resends, this
  347. * implementation will differ from PAPR and not lose such
  348. * interrupts.
  349. */
  350. if (state->priority == MASKED) {
  351. XICS_DBG("irq %#x masked pending\n", new_irq);
  352. state->masked_pending = 1;
  353. goto out;
  354. }
  355. /*
  356. * Try the delivery, this will set the need_resend flag
  357. * in the ICP as part of the atomic transaction if the
  358. * delivery is not possible.
  359. *
  360. * Note that if successful, the new delivery might have itself
  361. * rejected an interrupt that was "delivered" before we took the
  362. * icp mutex.
  363. *
  364. * In this case we do the whole sequence all over again for the
  365. * new guy. We cannot assume that the rejected interrupt is less
  366. * favored than the new one, and thus doesn't need to be delivered,
  367. * because by the time we exit icp_try_to_deliver() the target
  368. * processor may well have alrady consumed & completed it, and thus
  369. * the rejected interrupt might actually be already acceptable.
  370. */
  371. if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) {
  372. /*
  373. * Delivery was successful, did we reject somebody else ?
  374. */
  375. if (reject && reject != XICS_IPI) {
  376. mutex_unlock(&ics->lock);
  377. new_irq = reject;
  378. goto again;
  379. }
  380. } else {
  381. /*
  382. * We failed to deliver the interrupt we need to set the
  383. * resend map bit and mark the ICS state as needing a resend
  384. */
  385. set_bit(ics->icsid, icp->resend_map);
  386. state->resend = 1;
  387. /*
  388. * If the need_resend flag got cleared in the ICP some time
  389. * between icp_try_to_deliver() atomic update and now, then
  390. * we know it might have missed the resend_map bit. So we
  391. * retry
  392. */
  393. smp_mb();
  394. if (!icp->state.need_resend) {
  395. mutex_unlock(&ics->lock);
  396. goto again;
  397. }
  398. }
  399. out:
  400. mutex_unlock(&ics->lock);
  401. }
  402. static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
  403. u8 new_cppr)
  404. {
  405. union kvmppc_icp_state old_state, new_state;
  406. bool resend;
  407. /*
  408. * This handles several related states in one operation:
  409. *
  410. * ICP State: Down_CPPR
  411. *
  412. * Load CPPR with new value and if the XISR is 0
  413. * then check for resends:
  414. *
  415. * ICP State: Resend
  416. *
  417. * If MFRR is more favored than CPPR, check for IPIs
  418. * and notify ICS of a potential resend. This is done
  419. * asynchronously (when used in real mode, we will have
  420. * to exit here).
  421. *
  422. * We do not handle the complete Check_IPI as documented
  423. * here. In the PAPR, this state will be used for both
  424. * Set_MFRR and Down_CPPR. However, we know that we aren't
  425. * changing the MFRR state here so we don't need to handle
  426. * the case of an MFRR causing a reject of a pending irq,
  427. * this will have been handled when the MFRR was set in the
  428. * first place.
  429. *
  430. * Thus we don't have to handle rejects, only resends.
  431. *
  432. * When implementing real mode for HV KVM, resend will lead to
  433. * a H_TOO_HARD return and the whole transaction will be handled
  434. * in virtual mode.
  435. */
  436. do {
  437. old_state = new_state = ACCESS_ONCE(icp->state);
  438. /* Down_CPPR */
  439. new_state.cppr = new_cppr;
  440. /*
  441. * Cut down Resend / Check_IPI / IPI
  442. *
  443. * The logic is that we cannot have a pending interrupt
  444. * trumped by an IPI at this point (see above), so we
  445. * know that either the pending interrupt is already an
  446. * IPI (in which case we don't care to override it) or
  447. * it's either more favored than us or non existent
  448. */
  449. if (new_state.mfrr < new_cppr &&
  450. new_state.mfrr <= new_state.pending_pri) {
  451. WARN_ON(new_state.xisr != XICS_IPI &&
  452. new_state.xisr != 0);
  453. new_state.pending_pri = new_state.mfrr;
  454. new_state.xisr = XICS_IPI;
  455. }
  456. /* Latch/clear resend bit */
  457. resend = new_state.need_resend;
  458. new_state.need_resend = 0;
  459. } while (!icp_try_update(icp, old_state, new_state, true));
  460. /*
  461. * Now handle resend checks. Those are asynchronous to the ICP
  462. * state update in HW (ie bus transactions) so we can handle them
  463. * separately here too
  464. */
  465. if (resend)
  466. icp_check_resend(xics, icp);
  467. }
  468. static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
  469. {
  470. union kvmppc_icp_state old_state, new_state;
  471. struct kvmppc_icp *icp = vcpu->arch.icp;
  472. u32 xirr;
  473. /* First, remove EE from the processor */
  474. kvmppc_book3s_dequeue_irqprio(icp->vcpu,
  475. BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
  476. /*
  477. * ICP State: Accept_Interrupt
  478. *
  479. * Return the pending interrupt (if any) along with the
  480. * current CPPR, then clear the XISR & set CPPR to the
  481. * pending priority
  482. */
  483. do {
  484. old_state = new_state = ACCESS_ONCE(icp->state);
  485. xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
  486. if (!old_state.xisr)
  487. break;
  488. new_state.cppr = new_state.pending_pri;
  489. new_state.pending_pri = 0xff;
  490. new_state.xisr = 0;
  491. } while (!icp_try_update(icp, old_state, new_state, true));
  492. XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu->vcpu_id, xirr);
  493. return xirr;
  494. }
  495. static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
  496. unsigned long mfrr)
  497. {
  498. union kvmppc_icp_state old_state, new_state;
  499. struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
  500. struct kvmppc_icp *icp;
  501. u32 reject;
  502. bool resend;
  503. bool local;
  504. XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n",
  505. vcpu->vcpu_id, server, mfrr);
  506. icp = vcpu->arch.icp;
  507. local = icp->server_num == server;
  508. if (!local) {
  509. icp = kvmppc_xics_find_server(vcpu->kvm, server);
  510. if (!icp)
  511. return H_PARAMETER;
  512. }
  513. /*
  514. * ICP state: Set_MFRR
  515. *
  516. * If the CPPR is more favored than the new MFRR, then
  517. * nothing needs to be rejected as there can be no XISR to
  518. * reject. If the MFRR is being made less favored then
  519. * there might be a previously-rejected interrupt needing
  520. * to be resent.
  521. *
  522. * If the CPPR is less favored, then we might be replacing
  523. * an interrupt, and thus need to possibly reject it as in
  524. *
  525. * ICP state: Check_IPI
  526. */
  527. do {
  528. old_state = new_state = ACCESS_ONCE(icp->state);
  529. /* Set_MFRR */
  530. new_state.mfrr = mfrr;
  531. /* Check_IPI */
  532. reject = 0;
  533. resend = false;
  534. if (mfrr < new_state.cppr) {
  535. /* Reject a pending interrupt if not an IPI */
  536. if (mfrr <= new_state.pending_pri)
  537. reject = new_state.xisr;
  538. new_state.pending_pri = mfrr;
  539. new_state.xisr = XICS_IPI;
  540. }
  541. if (mfrr > old_state.mfrr && mfrr > new_state.cppr) {
  542. resend = new_state.need_resend;
  543. new_state.need_resend = 0;
  544. }
  545. } while (!icp_try_update(icp, old_state, new_state, local));
  546. /* Handle reject */
  547. if (reject && reject != XICS_IPI)
  548. icp_deliver_irq(xics, icp, reject);
  549. /* Handle resend */
  550. if (resend)
  551. icp_check_resend(xics, icp);
  552. return H_SUCCESS;
  553. }
  554. static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
  555. {
  556. union kvmppc_icp_state state;
  557. struct kvmppc_icp *icp;
  558. icp = vcpu->arch.icp;
  559. if (icp->server_num != server) {
  560. icp = kvmppc_xics_find_server(vcpu->kvm, server);
  561. if (!icp)
  562. return H_PARAMETER;
  563. }
  564. state = ACCESS_ONCE(icp->state);
  565. kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
  566. kvmppc_set_gpr(vcpu, 5, state.mfrr);
  567. return H_SUCCESS;
  568. }
  569. static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
  570. {
  571. union kvmppc_icp_state old_state, new_state;
  572. struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
  573. struct kvmppc_icp *icp = vcpu->arch.icp;
  574. u32 reject;
  575. XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu->vcpu_id, cppr);
  576. /*
  577. * ICP State: Set_CPPR
  578. *
  579. * We can safely compare the new value with the current
  580. * value outside of the transaction as the CPPR is only
  581. * ever changed by the processor on itself
  582. */
  583. if (cppr > icp->state.cppr)
  584. icp_down_cppr(xics, icp, cppr);
  585. else if (cppr == icp->state.cppr)
  586. return;
  587. /*
  588. * ICP State: Up_CPPR
  589. *
  590. * The processor is raising its priority, this can result
  591. * in a rejection of a pending interrupt:
  592. *
  593. * ICP State: Reject_Current
  594. *
  595. * We can remove EE from the current processor, the update
  596. * transaction will set it again if needed
  597. */
  598. kvmppc_book3s_dequeue_irqprio(icp->vcpu,
  599. BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
  600. do {
  601. old_state = new_state = ACCESS_ONCE(icp->state);
  602. reject = 0;
  603. new_state.cppr = cppr;
  604. if (cppr <= new_state.pending_pri) {
  605. reject = new_state.xisr;
  606. new_state.xisr = 0;
  607. new_state.pending_pri = 0xff;
  608. }
  609. } while (!icp_try_update(icp, old_state, new_state, true));
  610. /*
  611. * Check for rejects. They are handled by doing a new delivery
  612. * attempt (see comments in icp_deliver_irq).
  613. */
  614. if (reject && reject != XICS_IPI)
  615. icp_deliver_irq(xics, icp, reject);
  616. }
  617. static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
  618. {
  619. struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
  620. struct kvmppc_icp *icp = vcpu->arch.icp;
  621. struct kvmppc_ics *ics;
  622. struct ics_irq_state *state;
  623. u32 irq = xirr & 0x00ffffff;
  624. u16 src;
  625. XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu->vcpu_id, xirr);
  626. /*
  627. * ICP State: EOI
  628. *
  629. * Note: If EOI is incorrectly used by SW to lower the CPPR
  630. * value (ie more favored), we do not check for rejection of
  631. * a pending interrupt, this is a SW error and PAPR sepcifies
  632. * that we don't have to deal with it.
  633. *
  634. * The sending of an EOI to the ICS is handled after the
  635. * CPPR update
  636. *
  637. * ICP State: Down_CPPR which we handle
  638. * in a separate function as it's shared with H_CPPR.
  639. */
  640. icp_down_cppr(xics, icp, xirr >> 24);
  641. /* IPIs have no EOI */
  642. if (irq == XICS_IPI)
  643. return H_SUCCESS;
  644. /*
  645. * EOI handling: If the interrupt is still asserted, we need to
  646. * resend it. We can take a lockless "peek" at the ICS state here.
  647. *
  648. * "Message" interrupts will never have "asserted" set
  649. */
  650. ics = kvmppc_xics_find_ics(xics, irq, &src);
  651. if (!ics) {
  652. XICS_DBG("h_eoi: IRQ 0x%06x not found !\n", irq);
  653. return H_PARAMETER;
  654. }
  655. state = &ics->irq_state[src];
  656. /* Still asserted, resend it */
  657. if (state->asserted)
  658. icp_deliver_irq(xics, icp, irq);
  659. kvm_notify_acked_irq(vcpu->kvm, 0, irq);
  660. return H_SUCCESS;
  661. }
  662. static noinline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
  663. {
  664. struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
  665. struct kvmppc_icp *icp = vcpu->arch.icp;
  666. XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n",
  667. hcall, icp->rm_action, icp->rm_dbgstate.raw, icp->rm_dbgtgt);
  668. if (icp->rm_action & XICS_RM_KICK_VCPU)
  669. kvmppc_fast_vcpu_kick(icp->rm_kick_target);
  670. if (icp->rm_action & XICS_RM_CHECK_RESEND)
  671. icp_check_resend(xics, icp);
  672. if (icp->rm_action & XICS_RM_REJECT)
  673. icp_deliver_irq(xics, icp, icp->rm_reject);
  674. if (icp->rm_action & XICS_RM_NOTIFY_EOI)
  675. kvm_notify_acked_irq(vcpu->kvm, 0, icp->rm_eoied_irq);
  676. icp->rm_action = 0;
  677. return H_SUCCESS;
  678. }
  679. int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
  680. {
  681. struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
  682. unsigned long res;
  683. int rc = H_SUCCESS;
  684. /* Check if we have an ICP */
  685. if (!xics || !vcpu->arch.icp)
  686. return H_HARDWARE;
  687. /* These requests don't have real-mode implementations at present */
  688. switch (req) {
  689. case H_XIRR_X:
  690. res = kvmppc_h_xirr(vcpu);
  691. kvmppc_set_gpr(vcpu, 4, res);
  692. kvmppc_set_gpr(vcpu, 5, get_tb());
  693. return rc;
  694. case H_IPOLL:
  695. rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
  696. return rc;
  697. }
  698. /* Check for real mode returning too hard */
  699. if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm))
  700. return kvmppc_xics_rm_complete(vcpu, req);
  701. switch (req) {
  702. case H_XIRR:
  703. res = kvmppc_h_xirr(vcpu);
  704. kvmppc_set_gpr(vcpu, 4, res);
  705. break;
  706. case H_CPPR:
  707. kvmppc_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
  708. break;
  709. case H_EOI:
  710. rc = kvmppc_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
  711. break;
  712. case H_IPI:
  713. rc = kvmppc_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
  714. kvmppc_get_gpr(vcpu, 5));
  715. break;
  716. }
  717. return rc;
  718. }
  719. EXPORT_SYMBOL_GPL(kvmppc_xics_hcall);
  720. /* -- Initialisation code etc. -- */
  721. static int xics_debug_show(struct seq_file *m, void *private)
  722. {
  723. struct kvmppc_xics *xics = m->private;
  724. struct kvm *kvm = xics->kvm;
  725. struct kvm_vcpu *vcpu;
  726. int icsid, i;
  727. if (!kvm)
  728. return 0;
  729. seq_printf(m, "=========\nICP state\n=========\n");
  730. kvm_for_each_vcpu(i, vcpu, kvm) {
  731. struct kvmppc_icp *icp = vcpu->arch.icp;
  732. union kvmppc_icp_state state;
  733. if (!icp)
  734. continue;
  735. state.raw = ACCESS_ONCE(icp->state.raw);
  736. seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
  737. icp->server_num, state.xisr,
  738. state.pending_pri, state.cppr, state.mfrr,
  739. state.out_ee, state.need_resend);
  740. }
  741. for (icsid = 0; icsid <= KVMPPC_XICS_MAX_ICS_ID; icsid++) {
  742. struct kvmppc_ics *ics = xics->ics[icsid];
  743. if (!ics)
  744. continue;
  745. seq_printf(m, "=========\nICS state for ICS 0x%x\n=========\n",
  746. icsid);
  747. mutex_lock(&ics->lock);
  748. for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
  749. struct ics_irq_state *irq = &ics->irq_state[i];
  750. seq_printf(m, "irq 0x%06x: server %#x prio %#x save prio %#x asserted %d resend %d masked pending %d\n",
  751. irq->number, irq->server, irq->priority,
  752. irq->saved_priority, irq->asserted,
  753. irq->resend, irq->masked_pending);
  754. }
  755. mutex_unlock(&ics->lock);
  756. }
  757. return 0;
  758. }
  759. static int xics_debug_open(struct inode *inode, struct file *file)
  760. {
  761. return single_open(file, xics_debug_show, inode->i_private);
  762. }
  763. static const struct file_operations xics_debug_fops = {
  764. .open = xics_debug_open,
  765. .read = seq_read,
  766. .llseek = seq_lseek,
  767. .release = single_release,
  768. };
  769. static void xics_debugfs_init(struct kvmppc_xics *xics)
  770. {
  771. char *name;
  772. name = kasprintf(GFP_KERNEL, "kvm-xics-%p", xics);
  773. if (!name) {
  774. pr_err("%s: no memory for name\n", __func__);
  775. return;
  776. }
  777. xics->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
  778. xics, &xics_debug_fops);
  779. pr_debug("%s: created %s\n", __func__, name);
  780. kfree(name);
  781. }
  782. static struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm,
  783. struct kvmppc_xics *xics, int irq)
  784. {
  785. struct kvmppc_ics *ics;
  786. int i, icsid;
  787. icsid = irq >> KVMPPC_XICS_ICS_SHIFT;
  788. mutex_lock(&kvm->lock);
  789. /* ICS already exists - somebody else got here first */
  790. if (xics->ics[icsid])
  791. goto out;
  792. /* Create the ICS */
  793. ics = kzalloc(sizeof(struct kvmppc_ics), GFP_KERNEL);
  794. if (!ics)
  795. goto out;
  796. mutex_init(&ics->lock);
  797. ics->icsid = icsid;
  798. for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
  799. ics->irq_state[i].number = (icsid << KVMPPC_XICS_ICS_SHIFT) | i;
  800. ics->irq_state[i].priority = MASKED;
  801. ics->irq_state[i].saved_priority = MASKED;
  802. }
  803. smp_wmb();
  804. xics->ics[icsid] = ics;
  805. if (icsid > xics->max_icsid)
  806. xics->max_icsid = icsid;
  807. out:
  808. mutex_unlock(&kvm->lock);
  809. return xics->ics[icsid];
  810. }
  811. int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num)
  812. {
  813. struct kvmppc_icp *icp;
  814. if (!vcpu->kvm->arch.xics)
  815. return -ENODEV;
  816. if (kvmppc_xics_find_server(vcpu->kvm, server_num))
  817. return -EEXIST;
  818. icp = kzalloc(sizeof(struct kvmppc_icp), GFP_KERNEL);
  819. if (!icp)
  820. return -ENOMEM;
  821. icp->vcpu = vcpu;
  822. icp->server_num = server_num;
  823. icp->state.mfrr = MASKED;
  824. icp->state.pending_pri = MASKED;
  825. vcpu->arch.icp = icp;
  826. XICS_DBG("created server for vcpu %d\n", vcpu->vcpu_id);
  827. return 0;
  828. }
  829. u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu)
  830. {
  831. struct kvmppc_icp *icp = vcpu->arch.icp;
  832. union kvmppc_icp_state state;
  833. if (!icp)
  834. return 0;
  835. state = icp->state;
  836. return ((u64)state.cppr << KVM_REG_PPC_ICP_CPPR_SHIFT) |
  837. ((u64)state.xisr << KVM_REG_PPC_ICP_XISR_SHIFT) |
  838. ((u64)state.mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT) |
  839. ((u64)state.pending_pri << KVM_REG_PPC_ICP_PPRI_SHIFT);
  840. }
  841. int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
  842. {
  843. struct kvmppc_icp *icp = vcpu->arch.icp;
  844. struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
  845. union kvmppc_icp_state old_state, new_state;
  846. struct kvmppc_ics *ics;
  847. u8 cppr, mfrr, pending_pri;
  848. u32 xisr;
  849. u16 src;
  850. bool resend;
  851. if (!icp || !xics)
  852. return -ENOENT;
  853. cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
  854. xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
  855. KVM_REG_PPC_ICP_XISR_MASK;
  856. mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
  857. pending_pri = icpval >> KVM_REG_PPC_ICP_PPRI_SHIFT;
  858. /* Require the new state to be internally consistent */
  859. if (xisr == 0) {
  860. if (pending_pri != 0xff)
  861. return -EINVAL;
  862. } else if (xisr == XICS_IPI) {
  863. if (pending_pri != mfrr || pending_pri >= cppr)
  864. return -EINVAL;
  865. } else {
  866. if (pending_pri >= mfrr || pending_pri >= cppr)
  867. return -EINVAL;
  868. ics = kvmppc_xics_find_ics(xics, xisr, &src);
  869. if (!ics)
  870. return -EINVAL;
  871. }
  872. new_state.raw = 0;
  873. new_state.cppr = cppr;
  874. new_state.xisr = xisr;
  875. new_state.mfrr = mfrr;
  876. new_state.pending_pri = pending_pri;
  877. /*
  878. * Deassert the CPU interrupt request.
  879. * icp_try_update will reassert it if necessary.
  880. */
  881. kvmppc_book3s_dequeue_irqprio(icp->vcpu,
  882. BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
  883. /*
  884. * Note that if we displace an interrupt from old_state.xisr,
  885. * we don't mark it as rejected. We expect userspace to set
  886. * the state of the interrupt sources to be consistent with
  887. * the ICP states (either before or afterwards, which doesn't
  888. * matter). We do handle resends due to CPPR becoming less
  889. * favoured because that is necessary to end up with a
  890. * consistent state in the situation where userspace restores
  891. * the ICS states before the ICP states.
  892. */
  893. do {
  894. old_state = ACCESS_ONCE(icp->state);
  895. if (new_state.mfrr <= old_state.mfrr) {
  896. resend = false;
  897. new_state.need_resend = old_state.need_resend;
  898. } else {
  899. resend = old_state.need_resend;
  900. new_state.need_resend = 0;
  901. }
  902. } while (!icp_try_update(icp, old_state, new_state, false));
  903. if (resend)
  904. icp_check_resend(xics, icp);
  905. return 0;
  906. }
  907. static int xics_get_source(struct kvmppc_xics *xics, long irq, u64 addr)
  908. {
  909. int ret;
  910. struct kvmppc_ics *ics;
  911. struct ics_irq_state *irqp;
  912. u64 __user *ubufp = (u64 __user *) addr;
  913. u16 idx;
  914. u64 val, prio;
  915. ics = kvmppc_xics_find_ics(xics, irq, &idx);
  916. if (!ics)
  917. return -ENOENT;
  918. irqp = &ics->irq_state[idx];
  919. mutex_lock(&ics->lock);
  920. ret = -ENOENT;
  921. if (irqp->exists) {
  922. val = irqp->server;
  923. prio = irqp->priority;
  924. if (prio == MASKED) {
  925. val |= KVM_XICS_MASKED;
  926. prio = irqp->saved_priority;
  927. }
  928. val |= prio << KVM_XICS_PRIORITY_SHIFT;
  929. if (irqp->asserted)
  930. val |= KVM_XICS_LEVEL_SENSITIVE | KVM_XICS_PENDING;
  931. else if (irqp->masked_pending || irqp->resend)
  932. val |= KVM_XICS_PENDING;
  933. ret = 0;
  934. }
  935. mutex_unlock(&ics->lock);
  936. if (!ret && put_user(val, ubufp))
  937. ret = -EFAULT;
  938. return ret;
  939. }
  940. static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr)
  941. {
  942. struct kvmppc_ics *ics;
  943. struct ics_irq_state *irqp;
  944. u64 __user *ubufp = (u64 __user *) addr;
  945. u16 idx;
  946. u64 val;
  947. u8 prio;
  948. u32 server;
  949. if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
  950. return -ENOENT;
  951. ics = kvmppc_xics_find_ics(xics, irq, &idx);
  952. if (!ics) {
  953. ics = kvmppc_xics_create_ics(xics->kvm, xics, irq);
  954. if (!ics)
  955. return -ENOMEM;
  956. }
  957. irqp = &ics->irq_state[idx];
  958. if (get_user(val, ubufp))
  959. return -EFAULT;
  960. server = val & KVM_XICS_DESTINATION_MASK;
  961. prio = val >> KVM_XICS_PRIORITY_SHIFT;
  962. if (prio != MASKED &&
  963. kvmppc_xics_find_server(xics->kvm, server) == NULL)
  964. return -EINVAL;
  965. mutex_lock(&ics->lock);
  966. irqp->server = server;
  967. irqp->saved_priority = prio;
  968. if (val & KVM_XICS_MASKED)
  969. prio = MASKED;
  970. irqp->priority = prio;
  971. irqp->resend = 0;
  972. irqp->masked_pending = 0;
  973. irqp->asserted = 0;
  974. if ((val & KVM_XICS_PENDING) && (val & KVM_XICS_LEVEL_SENSITIVE))
  975. irqp->asserted = 1;
  976. irqp->exists = 1;
  977. mutex_unlock(&ics->lock);
  978. if (val & KVM_XICS_PENDING)
  979. icp_deliver_irq(xics, NULL, irqp->number);
  980. return 0;
  981. }
  982. int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
  983. bool line_status)
  984. {
  985. struct kvmppc_xics *xics = kvm->arch.xics;
  986. return ics_deliver_irq(xics, irq, level);
  987. }
  988. int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
  989. int irq_source_id, int level, bool line_status)
  990. {
  991. if (!level)
  992. return -1;
  993. return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
  994. level, line_status);
  995. }
  996. static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
  997. {
  998. struct kvmppc_xics *xics = dev->private;
  999. switch (attr->group) {
  1000. case KVM_DEV_XICS_GRP_SOURCES:
  1001. return xics_set_source(xics, attr->attr, attr->addr);
  1002. }
  1003. return -ENXIO;
  1004. }
  1005. static int xics_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
  1006. {
  1007. struct kvmppc_xics *xics = dev->private;
  1008. switch (attr->group) {
  1009. case KVM_DEV_XICS_GRP_SOURCES:
  1010. return xics_get_source(xics, attr->attr, attr->addr);
  1011. }
  1012. return -ENXIO;
  1013. }
  1014. static int xics_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
  1015. {
  1016. switch (attr->group) {
  1017. case KVM_DEV_XICS_GRP_SOURCES:
  1018. if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
  1019. attr->attr < KVMPPC_XICS_NR_IRQS)
  1020. return 0;
  1021. break;
  1022. }
  1023. return -ENXIO;
  1024. }
  1025. static void kvmppc_xics_free(struct kvm_device *dev)
  1026. {
  1027. struct kvmppc_xics *xics = dev->private;
  1028. int i;
  1029. struct kvm *kvm = xics->kvm;
  1030. debugfs_remove(xics->dentry);
  1031. if (kvm)
  1032. kvm->arch.xics = NULL;
  1033. for (i = 0; i <= xics->max_icsid; i++)
  1034. kfree(xics->ics[i]);
  1035. kfree(xics);
  1036. kfree(dev);
  1037. }
  1038. static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
  1039. {
  1040. struct kvmppc_xics *xics;
  1041. struct kvm *kvm = dev->kvm;
  1042. int ret = 0;
  1043. xics = kzalloc(sizeof(*xics), GFP_KERNEL);
  1044. if (!xics)
  1045. return -ENOMEM;
  1046. dev->private = xics;
  1047. xics->dev = dev;
  1048. xics->kvm = kvm;
  1049. /* Already there ? */
  1050. mutex_lock(&kvm->lock);
  1051. if (kvm->arch.xics)
  1052. ret = -EEXIST;
  1053. else
  1054. kvm->arch.xics = xics;
  1055. mutex_unlock(&kvm->lock);
  1056. if (ret) {
  1057. kfree(xics);
  1058. return ret;
  1059. }
  1060. xics_debugfs_init(xics);
  1061. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  1062. if (cpu_has_feature(CPU_FTR_ARCH_206)) {
  1063. /* Enable real mode support */
  1064. xics->real_mode = ENABLE_REALMODE;
  1065. xics->real_mode_dbg = DEBUG_REALMODE;
  1066. }
  1067. #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
  1068. return 0;
  1069. }
  1070. struct kvm_device_ops kvm_xics_ops = {
  1071. .name = "kvm-xics",
  1072. .create = kvmppc_xics_create,
  1073. .destroy = kvmppc_xics_free,
  1074. .set_attr = xics_set_attr,
  1075. .get_attr = xics_get_attr,
  1076. .has_attr = xics_has_attr,
  1077. };
  1078. int kvmppc_xics_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
  1079. u32 xcpu)
  1080. {
  1081. struct kvmppc_xics *xics = dev->private;
  1082. int r = -EBUSY;
  1083. if (dev->ops != &kvm_xics_ops)
  1084. return -EPERM;
  1085. if (xics->kvm != vcpu->kvm)
  1086. return -EPERM;
  1087. if (vcpu->arch.irq_type)
  1088. return -EBUSY;
  1089. r = kvmppc_xics_create_icp(vcpu, xcpu);
  1090. if (!r)
  1091. vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
  1092. return r;
  1093. }
  1094. void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu)
  1095. {
  1096. if (!vcpu->arch.icp)
  1097. return;
  1098. kfree(vcpu->arch.icp);
  1099. vcpu->arch.icp = NULL;
  1100. vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
  1101. }
  1102. static int xics_set_irq(struct kvm_kernel_irq_routing_entry *e,
  1103. struct kvm *kvm, int irq_source_id, int level,
  1104. bool line_status)
  1105. {
  1106. return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
  1107. }
  1108. int kvm_irq_map_gsi(struct kvm *kvm,
  1109. struct kvm_kernel_irq_routing_entry *entries, int gsi)
  1110. {
  1111. entries->gsi = gsi;
  1112. entries->type = KVM_IRQ_ROUTING_IRQCHIP;
  1113. entries->set = xics_set_irq;
  1114. entries->irqchip.irqchip = 0;
  1115. entries->irqchip.pin = gsi;
  1116. return 1;
  1117. }
  1118. int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
  1119. {
  1120. return pin;
  1121. }