book3s_xics.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356
  1. /*
  2. * Copyright 2012 Michael Ellerman, IBM Corporation.
  3. * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License, version 2, as
  7. * published by the Free Software Foundation.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/kvm_host.h>
  11. #include <linux/err.h>
  12. #include <linux/gfp.h>
  13. #include <linux/anon_inodes.h>
  14. #include <asm/uaccess.h>
  15. #include <asm/kvm_book3s.h>
  16. #include <asm/kvm_ppc.h>
  17. #include <asm/hvcall.h>
  18. #include <asm/xics.h>
  19. #include <asm/debug.h>
  20. #include <asm/time.h>
  21. #include <linux/debugfs.h>
  22. #include <linux/seq_file.h>
  23. #include "book3s_xics.h"
  24. #if 1
  25. #define XICS_DBG(fmt...) do { } while (0)
  26. #else
  27. #define XICS_DBG(fmt...) trace_printk(fmt)
  28. #endif
  29. #define ENABLE_REALMODE true
  30. #define DEBUG_REALMODE false
  31. /*
  32. * LOCKING
  33. * =======
  34. *
  35. * Each ICS has a mutex protecting the information about the IRQ
  36. * sources and avoiding simultaneous deliveries if the same interrupt.
  37. *
  38. * ICP operations are done via a single compare & swap transaction
  39. * (most ICP state fits in the union kvmppc_icp_state)
  40. */
  41. /*
  42. * TODO
  43. * ====
  44. *
  45. * - To speed up resends, keep a bitmap of "resend" set bits in the
  46. * ICS
  47. *
  48. * - Speed up server# -> ICP lookup (array ? hash table ?)
  49. *
  50. * - Make ICS lockless as well, or at least a per-interrupt lock or hashed
  51. * locks array to improve scalability
  52. */
  53. /* -- ICS routines -- */
  54. static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
  55. u32 new_irq);
  56. /*
  57. * Return value ideally indicates how the interrupt was handled, but no
  58. * callers look at it (given that we don't implement KVM_IRQ_LINE_STATUS),
  59. * so just return 0.
  60. */
  61. static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level)
  62. {
  63. struct ics_irq_state *state;
  64. struct kvmppc_ics *ics;
  65. u16 src;
  66. XICS_DBG("ics deliver %#x (level: %d)\n", irq, level);
  67. ics = kvmppc_xics_find_ics(xics, irq, &src);
  68. if (!ics) {
  69. XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq);
  70. return -EINVAL;
  71. }
  72. state = &ics->irq_state[src];
  73. if (!state->exists)
  74. return -EINVAL;
  75. /*
  76. * We set state->asserted locklessly. This should be fine as
  77. * we are the only setter, thus concurrent access is undefined
  78. * to begin with.
  79. */
  80. if (level == 1 || level == KVM_INTERRUPT_SET_LEVEL)
  81. state->asserted = 1;
  82. else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
  83. state->asserted = 0;
  84. return 0;
  85. }
  86. /* Attempt delivery */
  87. icp_deliver_irq(xics, NULL, irq);
  88. return 0;
  89. }
  90. static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
  91. struct kvmppc_icp *icp)
  92. {
  93. int i;
  94. mutex_lock(&ics->lock);
  95. for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
  96. struct ics_irq_state *state = &ics->irq_state[i];
  97. if (!state->resend)
  98. continue;
  99. XICS_DBG("resend %#x prio %#x\n", state->number,
  100. state->priority);
  101. mutex_unlock(&ics->lock);
  102. icp_deliver_irq(xics, icp, state->number);
  103. mutex_lock(&ics->lock);
  104. }
  105. mutex_unlock(&ics->lock);
  106. }
  107. static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
  108. struct ics_irq_state *state,
  109. u32 server, u32 priority, u32 saved_priority)
  110. {
  111. bool deliver;
  112. mutex_lock(&ics->lock);
  113. state->server = server;
  114. state->priority = priority;
  115. state->saved_priority = saved_priority;
  116. deliver = false;
  117. if ((state->masked_pending || state->resend) && priority != MASKED) {
  118. state->masked_pending = 0;
  119. deliver = true;
  120. }
  121. mutex_unlock(&ics->lock);
  122. return deliver;
  123. }
  124. int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority)
  125. {
  126. struct kvmppc_xics *xics = kvm->arch.xics;
  127. struct kvmppc_icp *icp;
  128. struct kvmppc_ics *ics;
  129. struct ics_irq_state *state;
  130. u16 src;
  131. if (!xics)
  132. return -ENODEV;
  133. ics = kvmppc_xics_find_ics(xics, irq, &src);
  134. if (!ics)
  135. return -EINVAL;
  136. state = &ics->irq_state[src];
  137. icp = kvmppc_xics_find_server(kvm, server);
  138. if (!icp)
  139. return -EINVAL;
  140. XICS_DBG("set_xive %#x server %#x prio %#x MP:%d RS:%d\n",
  141. irq, server, priority,
  142. state->masked_pending, state->resend);
  143. if (write_xive(xics, ics, state, server, priority, priority))
  144. icp_deliver_irq(xics, icp, irq);
  145. return 0;
  146. }
  147. int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority)
  148. {
  149. struct kvmppc_xics *xics = kvm->arch.xics;
  150. struct kvmppc_ics *ics;
  151. struct ics_irq_state *state;
  152. u16 src;
  153. if (!xics)
  154. return -ENODEV;
  155. ics = kvmppc_xics_find_ics(xics, irq, &src);
  156. if (!ics)
  157. return -EINVAL;
  158. state = &ics->irq_state[src];
  159. mutex_lock(&ics->lock);
  160. *server = state->server;
  161. *priority = state->priority;
  162. mutex_unlock(&ics->lock);
  163. return 0;
  164. }
  165. int kvmppc_xics_int_on(struct kvm *kvm, u32 irq)
  166. {
  167. struct kvmppc_xics *xics = kvm->arch.xics;
  168. struct kvmppc_icp *icp;
  169. struct kvmppc_ics *ics;
  170. struct ics_irq_state *state;
  171. u16 src;
  172. if (!xics)
  173. return -ENODEV;
  174. ics = kvmppc_xics_find_ics(xics, irq, &src);
  175. if (!ics)
  176. return -EINVAL;
  177. state = &ics->irq_state[src];
  178. icp = kvmppc_xics_find_server(kvm, state->server);
  179. if (!icp)
  180. return -EINVAL;
  181. if (write_xive(xics, ics, state, state->server, state->saved_priority,
  182. state->saved_priority))
  183. icp_deliver_irq(xics, icp, irq);
  184. return 0;
  185. }
  186. int kvmppc_xics_int_off(struct kvm *kvm, u32 irq)
  187. {
  188. struct kvmppc_xics *xics = kvm->arch.xics;
  189. struct kvmppc_ics *ics;
  190. struct ics_irq_state *state;
  191. u16 src;
  192. if (!xics)
  193. return -ENODEV;
  194. ics = kvmppc_xics_find_ics(xics, irq, &src);
  195. if (!ics)
  196. return -EINVAL;
  197. state = &ics->irq_state[src];
  198. write_xive(xics, ics, state, state->server, MASKED, state->priority);
  199. return 0;
  200. }
  201. /* -- ICP routines, including hcalls -- */
  202. static inline bool icp_try_update(struct kvmppc_icp *icp,
  203. union kvmppc_icp_state old,
  204. union kvmppc_icp_state new,
  205. bool change_self)
  206. {
  207. bool success;
  208. /* Calculate new output value */
  209. new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
  210. /* Attempt atomic update */
  211. success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
  212. if (!success)
  213. goto bail;
  214. XICS_DBG("UPD [%04x] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
  215. icp->server_num,
  216. old.cppr, old.mfrr, old.pending_pri, old.xisr,
  217. old.need_resend, old.out_ee);
  218. XICS_DBG("UPD - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
  219. new.cppr, new.mfrr, new.pending_pri, new.xisr,
  220. new.need_resend, new.out_ee);
  221. /*
  222. * Check for output state update
  223. *
  224. * Note that this is racy since another processor could be updating
  225. * the state already. This is why we never clear the interrupt output
  226. * here, we only ever set it. The clear only happens prior to doing
  227. * an update and only by the processor itself. Currently we do it
  228. * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
  229. *
  230. * We also do not try to figure out whether the EE state has changed,
  231. * we unconditionally set it if the new state calls for it. The reason
  232. * for that is that we opportunistically remove the pending interrupt
  233. * flag when raising CPPR, so we need to set it back here if an
  234. * interrupt is still pending.
  235. */
  236. if (new.out_ee) {
  237. kvmppc_book3s_queue_irqprio(icp->vcpu,
  238. BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
  239. if (!change_self)
  240. kvmppc_fast_vcpu_kick(icp->vcpu);
  241. }
  242. bail:
  243. return success;
  244. }
  245. static void icp_check_resend(struct kvmppc_xics *xics,
  246. struct kvmppc_icp *icp)
  247. {
  248. u32 icsid;
  249. /* Order this load with the test for need_resend in the caller */
  250. smp_rmb();
  251. for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
  252. struct kvmppc_ics *ics = xics->ics[icsid];
  253. if (!test_and_clear_bit(icsid, icp->resend_map))
  254. continue;
  255. if (!ics)
  256. continue;
  257. ics_check_resend(xics, ics, icp);
  258. }
  259. }
  260. static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
  261. u32 *reject)
  262. {
  263. union kvmppc_icp_state old_state, new_state;
  264. bool success;
  265. XICS_DBG("try deliver %#x(P:%#x) to server %#x\n", irq, priority,
  266. icp->server_num);
  267. do {
  268. old_state = new_state = ACCESS_ONCE(icp->state);
  269. *reject = 0;
  270. /* See if we can deliver */
  271. success = new_state.cppr > priority &&
  272. new_state.mfrr > priority &&
  273. new_state.pending_pri > priority;
  274. /*
  275. * If we can, check for a rejection and perform the
  276. * delivery
  277. */
  278. if (success) {
  279. *reject = new_state.xisr;
  280. new_state.xisr = irq;
  281. new_state.pending_pri = priority;
  282. } else {
  283. /*
  284. * If we failed to deliver we set need_resend
  285. * so a subsequent CPPR state change causes us
  286. * to try a new delivery.
  287. */
  288. new_state.need_resend = true;
  289. }
  290. } while (!icp_try_update(icp, old_state, new_state, false));
  291. return success;
  292. }
  293. static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
  294. u32 new_irq)
  295. {
  296. struct ics_irq_state *state;
  297. struct kvmppc_ics *ics;
  298. u32 reject;
  299. u16 src;
  300. /*
  301. * This is used both for initial delivery of an interrupt and
  302. * for subsequent rejection.
  303. *
  304. * Rejection can be racy vs. resends. We have evaluated the
  305. * rejection in an atomic ICP transaction which is now complete,
  306. * so potentially the ICP can already accept the interrupt again.
  307. *
  308. * So we need to retry the delivery. Essentially the reject path
  309. * boils down to a failed delivery. Always.
  310. *
  311. * Now the interrupt could also have moved to a different target,
  312. * thus we may need to re-do the ICP lookup as well
  313. */
  314. again:
  315. /* Get the ICS state and lock it */
  316. ics = kvmppc_xics_find_ics(xics, new_irq, &src);
  317. if (!ics) {
  318. XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq);
  319. return;
  320. }
  321. state = &ics->irq_state[src];
  322. /* Get a lock on the ICS */
  323. mutex_lock(&ics->lock);
  324. /* Get our server */
  325. if (!icp || state->server != icp->server_num) {
  326. icp = kvmppc_xics_find_server(xics->kvm, state->server);
  327. if (!icp) {
  328. pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n",
  329. new_irq, state->server);
  330. goto out;
  331. }
  332. }
  333. /* Clear the resend bit of that interrupt */
  334. state->resend = 0;
  335. /*
  336. * If masked, bail out
  337. *
  338. * Note: PAPR doesn't mention anything about masked pending
  339. * when doing a resend, only when doing a delivery.
  340. *
  341. * However that would have the effect of losing a masked
  342. * interrupt that was rejected and isn't consistent with
  343. * the whole masked_pending business which is about not
  344. * losing interrupts that occur while masked.
  345. *
  346. * I don't differenciate normal deliveries and resends, this
  347. * implementation will differ from PAPR and not lose such
  348. * interrupts.
  349. */
  350. if (state->priority == MASKED) {
  351. XICS_DBG("irq %#x masked pending\n", new_irq);
  352. state->masked_pending = 1;
  353. goto out;
  354. }
  355. /*
  356. * Try the delivery, this will set the need_resend flag
  357. * in the ICP as part of the atomic transaction if the
  358. * delivery is not possible.
  359. *
  360. * Note that if successful, the new delivery might have itself
  361. * rejected an interrupt that was "delivered" before we took the
  362. * icp mutex.
  363. *
  364. * In this case we do the whole sequence all over again for the
  365. * new guy. We cannot assume that the rejected interrupt is less
  366. * favored than the new one, and thus doesn't need to be delivered,
  367. * because by the time we exit icp_try_to_deliver() the target
  368. * processor may well have alrady consumed & completed it, and thus
  369. * the rejected interrupt might actually be already acceptable.
  370. */
  371. if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) {
  372. /*
  373. * Delivery was successful, did we reject somebody else ?
  374. */
  375. if (reject && reject != XICS_IPI) {
  376. mutex_unlock(&ics->lock);
  377. new_irq = reject;
  378. goto again;
  379. }
  380. } else {
  381. /*
  382. * We failed to deliver the interrupt we need to set the
  383. * resend map bit and mark the ICS state as needing a resend
  384. */
  385. set_bit(ics->icsid, icp->resend_map);
  386. state->resend = 1;
  387. /*
  388. * If the need_resend flag got cleared in the ICP some time
  389. * between icp_try_to_deliver() atomic update and now, then
  390. * we know it might have missed the resend_map bit. So we
  391. * retry
  392. */
  393. smp_mb();
  394. if (!icp->state.need_resend) {
  395. mutex_unlock(&ics->lock);
  396. goto again;
  397. }
  398. }
  399. out:
  400. mutex_unlock(&ics->lock);
  401. }
  402. static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
  403. u8 new_cppr)
  404. {
  405. union kvmppc_icp_state old_state, new_state;
  406. bool resend;
  407. /*
  408. * This handles several related states in one operation:
  409. *
  410. * ICP State: Down_CPPR
  411. *
  412. * Load CPPR with new value and if the XISR is 0
  413. * then check for resends:
  414. *
  415. * ICP State: Resend
  416. *
  417. * If MFRR is more favored than CPPR, check for IPIs
  418. * and notify ICS of a potential resend. This is done
  419. * asynchronously (when used in real mode, we will have
  420. * to exit here).
  421. *
  422. * We do not handle the complete Check_IPI as documented
  423. * here. In the PAPR, this state will be used for both
  424. * Set_MFRR and Down_CPPR. However, we know that we aren't
  425. * changing the MFRR state here so we don't need to handle
  426. * the case of an MFRR causing a reject of a pending irq,
  427. * this will have been handled when the MFRR was set in the
  428. * first place.
  429. *
  430. * Thus we don't have to handle rejects, only resends.
  431. *
  432. * When implementing real mode for HV KVM, resend will lead to
  433. * a H_TOO_HARD return and the whole transaction will be handled
  434. * in virtual mode.
  435. */
  436. do {
  437. old_state = new_state = ACCESS_ONCE(icp->state);
  438. /* Down_CPPR */
  439. new_state.cppr = new_cppr;
  440. /*
  441. * Cut down Resend / Check_IPI / IPI
  442. *
  443. * The logic is that we cannot have a pending interrupt
  444. * trumped by an IPI at this point (see above), so we
  445. * know that either the pending interrupt is already an
  446. * IPI (in which case we don't care to override it) or
  447. * it's either more favored than us or non existent
  448. */
  449. if (new_state.mfrr < new_cppr &&
  450. new_state.mfrr <= new_state.pending_pri) {
  451. WARN_ON(new_state.xisr != XICS_IPI &&
  452. new_state.xisr != 0);
  453. new_state.pending_pri = new_state.mfrr;
  454. new_state.xisr = XICS_IPI;
  455. }
  456. /* Latch/clear resend bit */
  457. resend = new_state.need_resend;
  458. new_state.need_resend = 0;
  459. } while (!icp_try_update(icp, old_state, new_state, true));
  460. /*
  461. * Now handle resend checks. Those are asynchronous to the ICP
  462. * state update in HW (ie bus transactions) so we can handle them
  463. * separately here too
  464. */
  465. if (resend)
  466. icp_check_resend(xics, icp);
  467. }
  468. static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
  469. {
  470. union kvmppc_icp_state old_state, new_state;
  471. struct kvmppc_icp *icp = vcpu->arch.icp;
  472. u32 xirr;
  473. /* First, remove EE from the processor */
  474. kvmppc_book3s_dequeue_irqprio(icp->vcpu,
  475. BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
  476. /*
  477. * ICP State: Accept_Interrupt
  478. *
  479. * Return the pending interrupt (if any) along with the
  480. * current CPPR, then clear the XISR & set CPPR to the
  481. * pending priority
  482. */
  483. do {
  484. old_state = new_state = ACCESS_ONCE(icp->state);
  485. xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
  486. if (!old_state.xisr)
  487. break;
  488. new_state.cppr = new_state.pending_pri;
  489. new_state.pending_pri = 0xff;
  490. new_state.xisr = 0;
  491. } while (!icp_try_update(icp, old_state, new_state, true));
  492. XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu->vcpu_id, xirr);
  493. return xirr;
  494. }
  495. static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
  496. unsigned long mfrr)
  497. {
  498. union kvmppc_icp_state old_state, new_state;
  499. struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
  500. struct kvmppc_icp *icp;
  501. u32 reject;
  502. bool resend;
  503. bool local;
  504. XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n",
  505. vcpu->vcpu_id, server, mfrr);
  506. icp = vcpu->arch.icp;
  507. local = icp->server_num == server;
  508. if (!local) {
  509. icp = kvmppc_xics_find_server(vcpu->kvm, server);
  510. if (!icp)
  511. return H_PARAMETER;
  512. }
  513. /*
  514. * ICP state: Set_MFRR
  515. *
  516. * If the CPPR is more favored than the new MFRR, then
  517. * nothing needs to be rejected as there can be no XISR to
  518. * reject. If the MFRR is being made less favored then
  519. * there might be a previously-rejected interrupt needing
  520. * to be resent.
  521. *
  522. * ICP state: Check_IPI
  523. *
  524. * If the CPPR is less favored, then we might be replacing
  525. * an interrupt, and thus need to possibly reject it.
  526. *
  527. * ICP State: IPI
  528. *
  529. * Besides rejecting any pending interrupts, we also
  530. * update XISR and pending_pri to mark IPI as pending.
  531. *
  532. * PAPR does not describe this state, but if the MFRR is being
  533. * made less favored than its earlier value, there might be
  534. * a previously-rejected interrupt needing to be resent.
  535. * Ideally, we would want to resend only if
  536. * prio(pending_interrupt) < mfrr &&
  537. * prio(pending_interrupt) < cppr
  538. * where pending interrupt is the one that was rejected. But
  539. * we don't have that state, so we simply trigger a resend
  540. * whenever the MFRR is made less favored.
  541. */
  542. do {
  543. old_state = new_state = ACCESS_ONCE(icp->state);
  544. /* Set_MFRR */
  545. new_state.mfrr = mfrr;
  546. /* Check_IPI */
  547. reject = 0;
  548. resend = false;
  549. if (mfrr < new_state.cppr) {
  550. /* Reject a pending interrupt if not an IPI */
  551. if (mfrr <= new_state.pending_pri) {
  552. reject = new_state.xisr;
  553. new_state.pending_pri = mfrr;
  554. new_state.xisr = XICS_IPI;
  555. }
  556. }
  557. if (mfrr > old_state.mfrr) {
  558. resend = new_state.need_resend;
  559. new_state.need_resend = 0;
  560. }
  561. } while (!icp_try_update(icp, old_state, new_state, local));
  562. /* Handle reject */
  563. if (reject && reject != XICS_IPI)
  564. icp_deliver_irq(xics, icp, reject);
  565. /* Handle resend */
  566. if (resend)
  567. icp_check_resend(xics, icp);
  568. return H_SUCCESS;
  569. }
  570. static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
  571. {
  572. union kvmppc_icp_state state;
  573. struct kvmppc_icp *icp;
  574. icp = vcpu->arch.icp;
  575. if (icp->server_num != server) {
  576. icp = kvmppc_xics_find_server(vcpu->kvm, server);
  577. if (!icp)
  578. return H_PARAMETER;
  579. }
  580. state = ACCESS_ONCE(icp->state);
  581. kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
  582. kvmppc_set_gpr(vcpu, 5, state.mfrr);
  583. return H_SUCCESS;
  584. }
  585. static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
  586. {
  587. union kvmppc_icp_state old_state, new_state;
  588. struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
  589. struct kvmppc_icp *icp = vcpu->arch.icp;
  590. u32 reject;
  591. XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu->vcpu_id, cppr);
  592. /*
  593. * ICP State: Set_CPPR
  594. *
  595. * We can safely compare the new value with the current
  596. * value outside of the transaction as the CPPR is only
  597. * ever changed by the processor on itself
  598. */
  599. if (cppr > icp->state.cppr)
  600. icp_down_cppr(xics, icp, cppr);
  601. else if (cppr == icp->state.cppr)
  602. return;
  603. /*
  604. * ICP State: Up_CPPR
  605. *
  606. * The processor is raising its priority, this can result
  607. * in a rejection of a pending interrupt:
  608. *
  609. * ICP State: Reject_Current
  610. *
  611. * We can remove EE from the current processor, the update
  612. * transaction will set it again if needed
  613. */
  614. kvmppc_book3s_dequeue_irqprio(icp->vcpu,
  615. BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
  616. do {
  617. old_state = new_state = ACCESS_ONCE(icp->state);
  618. reject = 0;
  619. new_state.cppr = cppr;
  620. if (cppr <= new_state.pending_pri) {
  621. reject = new_state.xisr;
  622. new_state.xisr = 0;
  623. new_state.pending_pri = 0xff;
  624. }
  625. } while (!icp_try_update(icp, old_state, new_state, true));
  626. /*
  627. * Check for rejects. They are handled by doing a new delivery
  628. * attempt (see comments in icp_deliver_irq).
  629. */
  630. if (reject && reject != XICS_IPI)
  631. icp_deliver_irq(xics, icp, reject);
  632. }
  633. static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
  634. {
  635. struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
  636. struct kvmppc_icp *icp = vcpu->arch.icp;
  637. struct kvmppc_ics *ics;
  638. struct ics_irq_state *state;
  639. u32 irq = xirr & 0x00ffffff;
  640. u16 src;
  641. XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu->vcpu_id, xirr);
  642. /*
  643. * ICP State: EOI
  644. *
  645. * Note: If EOI is incorrectly used by SW to lower the CPPR
  646. * value (ie more favored), we do not check for rejection of
  647. * a pending interrupt, this is a SW error and PAPR sepcifies
  648. * that we don't have to deal with it.
  649. *
  650. * The sending of an EOI to the ICS is handled after the
  651. * CPPR update
  652. *
  653. * ICP State: Down_CPPR which we handle
  654. * in a separate function as it's shared with H_CPPR.
  655. */
  656. icp_down_cppr(xics, icp, xirr >> 24);
  657. /* IPIs have no EOI */
  658. if (irq == XICS_IPI)
  659. return H_SUCCESS;
  660. /*
  661. * EOI handling: If the interrupt is still asserted, we need to
  662. * resend it. We can take a lockless "peek" at the ICS state here.
  663. *
  664. * "Message" interrupts will never have "asserted" set
  665. */
  666. ics = kvmppc_xics_find_ics(xics, irq, &src);
  667. if (!ics) {
  668. XICS_DBG("h_eoi: IRQ 0x%06x not found !\n", irq);
  669. return H_PARAMETER;
  670. }
  671. state = &ics->irq_state[src];
  672. /* Still asserted, resend it */
  673. if (state->asserted)
  674. icp_deliver_irq(xics, icp, irq);
  675. kvm_notify_acked_irq(vcpu->kvm, 0, irq);
  676. return H_SUCCESS;
  677. }
  678. static noinline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
  679. {
  680. struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
  681. struct kvmppc_icp *icp = vcpu->arch.icp;
  682. XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n",
  683. hcall, icp->rm_action, icp->rm_dbgstate.raw, icp->rm_dbgtgt);
  684. if (icp->rm_action & XICS_RM_KICK_VCPU)
  685. kvmppc_fast_vcpu_kick(icp->rm_kick_target);
  686. if (icp->rm_action & XICS_RM_CHECK_RESEND)
  687. icp_check_resend(xics, icp->rm_resend_icp);
  688. if (icp->rm_action & XICS_RM_REJECT)
  689. icp_deliver_irq(xics, icp, icp->rm_reject);
  690. if (icp->rm_action & XICS_RM_NOTIFY_EOI)
  691. kvm_notify_acked_irq(vcpu->kvm, 0, icp->rm_eoied_irq);
  692. icp->rm_action = 0;
  693. return H_SUCCESS;
  694. }
  695. int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
  696. {
  697. struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
  698. unsigned long res;
  699. int rc = H_SUCCESS;
  700. /* Check if we have an ICP */
  701. if (!xics || !vcpu->arch.icp)
  702. return H_HARDWARE;
  703. /* These requests don't have real-mode implementations at present */
  704. switch (req) {
  705. case H_XIRR_X:
  706. res = kvmppc_h_xirr(vcpu);
  707. kvmppc_set_gpr(vcpu, 4, res);
  708. kvmppc_set_gpr(vcpu, 5, get_tb());
  709. return rc;
  710. case H_IPOLL:
  711. rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
  712. return rc;
  713. }
  714. /* Check for real mode returning too hard */
  715. if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm))
  716. return kvmppc_xics_rm_complete(vcpu, req);
  717. switch (req) {
  718. case H_XIRR:
  719. res = kvmppc_h_xirr(vcpu);
  720. kvmppc_set_gpr(vcpu, 4, res);
  721. break;
  722. case H_CPPR:
  723. kvmppc_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
  724. break;
  725. case H_EOI:
  726. rc = kvmppc_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
  727. break;
  728. case H_IPI:
  729. rc = kvmppc_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
  730. kvmppc_get_gpr(vcpu, 5));
  731. break;
  732. }
  733. return rc;
  734. }
  735. EXPORT_SYMBOL_GPL(kvmppc_xics_hcall);
  736. /* -- Initialisation code etc. -- */
  737. static int xics_debug_show(struct seq_file *m, void *private)
  738. {
  739. struct kvmppc_xics *xics = m->private;
  740. struct kvm *kvm = xics->kvm;
  741. struct kvm_vcpu *vcpu;
  742. int icsid, i;
  743. if (!kvm)
  744. return 0;
  745. seq_printf(m, "=========\nICP state\n=========\n");
  746. kvm_for_each_vcpu(i, vcpu, kvm) {
  747. struct kvmppc_icp *icp = vcpu->arch.icp;
  748. union kvmppc_icp_state state;
  749. if (!icp)
  750. continue;
  751. state.raw = ACCESS_ONCE(icp->state.raw);
  752. seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
  753. icp->server_num, state.xisr,
  754. state.pending_pri, state.cppr, state.mfrr,
  755. state.out_ee, state.need_resend);
  756. }
  757. for (icsid = 0; icsid <= KVMPPC_XICS_MAX_ICS_ID; icsid++) {
  758. struct kvmppc_ics *ics = xics->ics[icsid];
  759. if (!ics)
  760. continue;
  761. seq_printf(m, "=========\nICS state for ICS 0x%x\n=========\n",
  762. icsid);
  763. mutex_lock(&ics->lock);
  764. for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
  765. struct ics_irq_state *irq = &ics->irq_state[i];
  766. seq_printf(m, "irq 0x%06x: server %#x prio %#x save prio %#x asserted %d resend %d masked pending %d\n",
  767. irq->number, irq->server, irq->priority,
  768. irq->saved_priority, irq->asserted,
  769. irq->resend, irq->masked_pending);
  770. }
  771. mutex_unlock(&ics->lock);
  772. }
  773. return 0;
  774. }
  775. static int xics_debug_open(struct inode *inode, struct file *file)
  776. {
  777. return single_open(file, xics_debug_show, inode->i_private);
  778. }
  779. static const struct file_operations xics_debug_fops = {
  780. .open = xics_debug_open,
  781. .read = seq_read,
  782. .llseek = seq_lseek,
  783. .release = single_release,
  784. };
  785. static void xics_debugfs_init(struct kvmppc_xics *xics)
  786. {
  787. char *name;
  788. name = kasprintf(GFP_KERNEL, "kvm-xics-%p", xics);
  789. if (!name) {
  790. pr_err("%s: no memory for name\n", __func__);
  791. return;
  792. }
  793. xics->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
  794. xics, &xics_debug_fops);
  795. pr_debug("%s: created %s\n", __func__, name);
  796. kfree(name);
  797. }
  798. static struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm,
  799. struct kvmppc_xics *xics, int irq)
  800. {
  801. struct kvmppc_ics *ics;
  802. int i, icsid;
  803. icsid = irq >> KVMPPC_XICS_ICS_SHIFT;
  804. mutex_lock(&kvm->lock);
  805. /* ICS already exists - somebody else got here first */
  806. if (xics->ics[icsid])
  807. goto out;
  808. /* Create the ICS */
  809. ics = kzalloc(sizeof(struct kvmppc_ics), GFP_KERNEL);
  810. if (!ics)
  811. goto out;
  812. mutex_init(&ics->lock);
  813. ics->icsid = icsid;
  814. for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
  815. ics->irq_state[i].number = (icsid << KVMPPC_XICS_ICS_SHIFT) | i;
  816. ics->irq_state[i].priority = MASKED;
  817. ics->irq_state[i].saved_priority = MASKED;
  818. }
  819. smp_wmb();
  820. xics->ics[icsid] = ics;
  821. if (icsid > xics->max_icsid)
  822. xics->max_icsid = icsid;
  823. out:
  824. mutex_unlock(&kvm->lock);
  825. return xics->ics[icsid];
  826. }
  827. int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num)
  828. {
  829. struct kvmppc_icp *icp;
  830. if (!vcpu->kvm->arch.xics)
  831. return -ENODEV;
  832. if (kvmppc_xics_find_server(vcpu->kvm, server_num))
  833. return -EEXIST;
  834. icp = kzalloc(sizeof(struct kvmppc_icp), GFP_KERNEL);
  835. if (!icp)
  836. return -ENOMEM;
  837. icp->vcpu = vcpu;
  838. icp->server_num = server_num;
  839. icp->state.mfrr = MASKED;
  840. icp->state.pending_pri = MASKED;
  841. vcpu->arch.icp = icp;
  842. XICS_DBG("created server for vcpu %d\n", vcpu->vcpu_id);
  843. return 0;
  844. }
  845. u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu)
  846. {
  847. struct kvmppc_icp *icp = vcpu->arch.icp;
  848. union kvmppc_icp_state state;
  849. if (!icp)
  850. return 0;
  851. state = icp->state;
  852. return ((u64)state.cppr << KVM_REG_PPC_ICP_CPPR_SHIFT) |
  853. ((u64)state.xisr << KVM_REG_PPC_ICP_XISR_SHIFT) |
  854. ((u64)state.mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT) |
  855. ((u64)state.pending_pri << KVM_REG_PPC_ICP_PPRI_SHIFT);
  856. }
  857. int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
  858. {
  859. struct kvmppc_icp *icp = vcpu->arch.icp;
  860. struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
  861. union kvmppc_icp_state old_state, new_state;
  862. struct kvmppc_ics *ics;
  863. u8 cppr, mfrr, pending_pri;
  864. u32 xisr;
  865. u16 src;
  866. bool resend;
  867. if (!icp || !xics)
  868. return -ENOENT;
  869. cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
  870. xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
  871. KVM_REG_PPC_ICP_XISR_MASK;
  872. mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
  873. pending_pri = icpval >> KVM_REG_PPC_ICP_PPRI_SHIFT;
  874. /* Require the new state to be internally consistent */
  875. if (xisr == 0) {
  876. if (pending_pri != 0xff)
  877. return -EINVAL;
  878. } else if (xisr == XICS_IPI) {
  879. if (pending_pri != mfrr || pending_pri >= cppr)
  880. return -EINVAL;
  881. } else {
  882. if (pending_pri >= mfrr || pending_pri >= cppr)
  883. return -EINVAL;
  884. ics = kvmppc_xics_find_ics(xics, xisr, &src);
  885. if (!ics)
  886. return -EINVAL;
  887. }
  888. new_state.raw = 0;
  889. new_state.cppr = cppr;
  890. new_state.xisr = xisr;
  891. new_state.mfrr = mfrr;
  892. new_state.pending_pri = pending_pri;
  893. /*
  894. * Deassert the CPU interrupt request.
  895. * icp_try_update will reassert it if necessary.
  896. */
  897. kvmppc_book3s_dequeue_irqprio(icp->vcpu,
  898. BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
  899. /*
  900. * Note that if we displace an interrupt from old_state.xisr,
  901. * we don't mark it as rejected. We expect userspace to set
  902. * the state of the interrupt sources to be consistent with
  903. * the ICP states (either before or afterwards, which doesn't
  904. * matter). We do handle resends due to CPPR becoming less
  905. * favoured because that is necessary to end up with a
  906. * consistent state in the situation where userspace restores
  907. * the ICS states before the ICP states.
  908. */
  909. do {
  910. old_state = ACCESS_ONCE(icp->state);
  911. if (new_state.mfrr <= old_state.mfrr) {
  912. resend = false;
  913. new_state.need_resend = old_state.need_resend;
  914. } else {
  915. resend = old_state.need_resend;
  916. new_state.need_resend = 0;
  917. }
  918. } while (!icp_try_update(icp, old_state, new_state, false));
  919. if (resend)
  920. icp_check_resend(xics, icp);
  921. return 0;
  922. }
  923. static int xics_get_source(struct kvmppc_xics *xics, long irq, u64 addr)
  924. {
  925. int ret;
  926. struct kvmppc_ics *ics;
  927. struct ics_irq_state *irqp;
  928. u64 __user *ubufp = (u64 __user *) addr;
  929. u16 idx;
  930. u64 val, prio;
  931. ics = kvmppc_xics_find_ics(xics, irq, &idx);
  932. if (!ics)
  933. return -ENOENT;
  934. irqp = &ics->irq_state[idx];
  935. mutex_lock(&ics->lock);
  936. ret = -ENOENT;
  937. if (irqp->exists) {
  938. val = irqp->server;
  939. prio = irqp->priority;
  940. if (prio == MASKED) {
  941. val |= KVM_XICS_MASKED;
  942. prio = irqp->saved_priority;
  943. }
  944. val |= prio << KVM_XICS_PRIORITY_SHIFT;
  945. if (irqp->asserted)
  946. val |= KVM_XICS_LEVEL_SENSITIVE | KVM_XICS_PENDING;
  947. else if (irqp->masked_pending || irqp->resend)
  948. val |= KVM_XICS_PENDING;
  949. ret = 0;
  950. }
  951. mutex_unlock(&ics->lock);
  952. if (!ret && put_user(val, ubufp))
  953. ret = -EFAULT;
  954. return ret;
  955. }
  956. static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr)
  957. {
  958. struct kvmppc_ics *ics;
  959. struct ics_irq_state *irqp;
  960. u64 __user *ubufp = (u64 __user *) addr;
  961. u16 idx;
  962. u64 val;
  963. u8 prio;
  964. u32 server;
  965. if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
  966. return -ENOENT;
  967. ics = kvmppc_xics_find_ics(xics, irq, &idx);
  968. if (!ics) {
  969. ics = kvmppc_xics_create_ics(xics->kvm, xics, irq);
  970. if (!ics)
  971. return -ENOMEM;
  972. }
  973. irqp = &ics->irq_state[idx];
  974. if (get_user(val, ubufp))
  975. return -EFAULT;
  976. server = val & KVM_XICS_DESTINATION_MASK;
  977. prio = val >> KVM_XICS_PRIORITY_SHIFT;
  978. if (prio != MASKED &&
  979. kvmppc_xics_find_server(xics->kvm, server) == NULL)
  980. return -EINVAL;
  981. mutex_lock(&ics->lock);
  982. irqp->server = server;
  983. irqp->saved_priority = prio;
  984. if (val & KVM_XICS_MASKED)
  985. prio = MASKED;
  986. irqp->priority = prio;
  987. irqp->resend = 0;
  988. irqp->masked_pending = 0;
  989. irqp->asserted = 0;
  990. if ((val & KVM_XICS_PENDING) && (val & KVM_XICS_LEVEL_SENSITIVE))
  991. irqp->asserted = 1;
  992. irqp->exists = 1;
  993. mutex_unlock(&ics->lock);
  994. if (val & KVM_XICS_PENDING)
  995. icp_deliver_irq(xics, NULL, irqp->number);
  996. return 0;
  997. }
  998. int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
  999. bool line_status)
  1000. {
  1001. struct kvmppc_xics *xics = kvm->arch.xics;
  1002. return ics_deliver_irq(xics, irq, level);
  1003. }
  1004. int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
  1005. int irq_source_id, int level, bool line_status)
  1006. {
  1007. if (!level)
  1008. return -1;
  1009. return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
  1010. level, line_status);
  1011. }
  1012. static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
  1013. {
  1014. struct kvmppc_xics *xics = dev->private;
  1015. switch (attr->group) {
  1016. case KVM_DEV_XICS_GRP_SOURCES:
  1017. return xics_set_source(xics, attr->attr, attr->addr);
  1018. }
  1019. return -ENXIO;
  1020. }
  1021. static int xics_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
  1022. {
  1023. struct kvmppc_xics *xics = dev->private;
  1024. switch (attr->group) {
  1025. case KVM_DEV_XICS_GRP_SOURCES:
  1026. return xics_get_source(xics, attr->attr, attr->addr);
  1027. }
  1028. return -ENXIO;
  1029. }
  1030. static int xics_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
  1031. {
  1032. switch (attr->group) {
  1033. case KVM_DEV_XICS_GRP_SOURCES:
  1034. if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
  1035. attr->attr < KVMPPC_XICS_NR_IRQS)
  1036. return 0;
  1037. break;
  1038. }
  1039. return -ENXIO;
  1040. }
  1041. static void kvmppc_xics_free(struct kvm_device *dev)
  1042. {
  1043. struct kvmppc_xics *xics = dev->private;
  1044. int i;
  1045. struct kvm *kvm = xics->kvm;
  1046. debugfs_remove(xics->dentry);
  1047. if (kvm)
  1048. kvm->arch.xics = NULL;
  1049. for (i = 0; i <= xics->max_icsid; i++)
  1050. kfree(xics->ics[i]);
  1051. kfree(xics);
  1052. kfree(dev);
  1053. }
  1054. static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
  1055. {
  1056. struct kvmppc_xics *xics;
  1057. struct kvm *kvm = dev->kvm;
  1058. int ret = 0;
  1059. xics = kzalloc(sizeof(*xics), GFP_KERNEL);
  1060. if (!xics)
  1061. return -ENOMEM;
  1062. dev->private = xics;
  1063. xics->dev = dev;
  1064. xics->kvm = kvm;
  1065. /* Already there ? */
  1066. mutex_lock(&kvm->lock);
  1067. if (kvm->arch.xics)
  1068. ret = -EEXIST;
  1069. else
  1070. kvm->arch.xics = xics;
  1071. mutex_unlock(&kvm->lock);
  1072. if (ret) {
  1073. kfree(xics);
  1074. return ret;
  1075. }
  1076. xics_debugfs_init(xics);
  1077. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  1078. if (cpu_has_feature(CPU_FTR_ARCH_206)) {
  1079. /* Enable real mode support */
  1080. xics->real_mode = ENABLE_REALMODE;
  1081. xics->real_mode_dbg = DEBUG_REALMODE;
  1082. }
  1083. #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
  1084. return 0;
  1085. }
  1086. struct kvm_device_ops kvm_xics_ops = {
  1087. .name = "kvm-xics",
  1088. .create = kvmppc_xics_create,
  1089. .destroy = kvmppc_xics_free,
  1090. .set_attr = xics_set_attr,
  1091. .get_attr = xics_get_attr,
  1092. .has_attr = xics_has_attr,
  1093. };
  1094. int kvmppc_xics_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
  1095. u32 xcpu)
  1096. {
  1097. struct kvmppc_xics *xics = dev->private;
  1098. int r = -EBUSY;
  1099. if (dev->ops != &kvm_xics_ops)
  1100. return -EPERM;
  1101. if (xics->kvm != vcpu->kvm)
  1102. return -EPERM;
  1103. if (vcpu->arch.irq_type)
  1104. return -EBUSY;
  1105. r = kvmppc_xics_create_icp(vcpu, xcpu);
  1106. if (!r)
  1107. vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
  1108. return r;
  1109. }
  1110. void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu)
  1111. {
  1112. if (!vcpu->arch.icp)
  1113. return;
  1114. kfree(vcpu->arch.icp);
  1115. vcpu->arch.icp = NULL;
  1116. vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
  1117. }
  1118. static int xics_set_irq(struct kvm_kernel_irq_routing_entry *e,
  1119. struct kvm *kvm, int irq_source_id, int level,
  1120. bool line_status)
  1121. {
  1122. return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
  1123. }
  1124. int kvm_irq_map_gsi(struct kvm *kvm,
  1125. struct kvm_kernel_irq_routing_entry *entries, int gsi)
  1126. {
  1127. entries->gsi = gsi;
  1128. entries->type = KVM_IRQ_ROUTING_IRQCHIP;
  1129. entries->set = xics_set_irq;
  1130. entries->irqchip.irqchip = 0;
  1131. entries->irqchip.pin = gsi;
  1132. return 1;
  1133. }
  1134. int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
  1135. {
  1136. return pin;
  1137. }