vgic-mmio.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845
  1. /*
  2. * VGIC MMIO handling functions
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/bitops.h>
  14. #include <linux/bsearch.h>
  15. #include <linux/kvm.h>
  16. #include <linux/kvm_host.h>
  17. #include <kvm/iodev.h>
  18. #include <kvm/arm_arch_timer.h>
  19. #include <kvm/arm_vgic.h>
  20. #include "vgic.h"
  21. #include "vgic-mmio.h"
  22. unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
  23. gpa_t addr, unsigned int len)
  24. {
  25. return 0;
  26. }
  27. unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
  28. gpa_t addr, unsigned int len)
  29. {
  30. return -1UL;
  31. }
  32. void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
  33. unsigned int len, unsigned long val)
  34. {
  35. /* Ignore */
  36. }
  37. /*
  38. * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
  39. * of the enabled bit, so there is only one function for both here.
  40. */
  41. unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
  42. gpa_t addr, unsigned int len)
  43. {
  44. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  45. u32 value = 0;
  46. int i;
  47. /* Loop over all IRQs affected by this read */
  48. for (i = 0; i < len * 8; i++) {
  49. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  50. if (irq->enabled)
  51. value |= (1U << i);
  52. vgic_put_irq(vcpu->kvm, irq);
  53. }
  54. return value;
  55. }
  56. void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
  57. gpa_t addr, unsigned int len,
  58. unsigned long val)
  59. {
  60. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  61. int i;
  62. unsigned long flags;
  63. for_each_set_bit(i, &val, len * 8) {
  64. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  65. spin_lock_irqsave(&irq->irq_lock, flags);
  66. irq->enabled = true;
  67. vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
  68. vgic_put_irq(vcpu->kvm, irq);
  69. }
  70. }
  71. void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
  72. gpa_t addr, unsigned int len,
  73. unsigned long val)
  74. {
  75. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  76. int i;
  77. unsigned long flags;
  78. for_each_set_bit(i, &val, len * 8) {
  79. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  80. spin_lock_irqsave(&irq->irq_lock, flags);
  81. irq->enabled = false;
  82. spin_unlock_irqrestore(&irq->irq_lock, flags);
  83. vgic_put_irq(vcpu->kvm, irq);
  84. }
  85. }
  86. unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
  87. gpa_t addr, unsigned int len)
  88. {
  89. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  90. u32 value = 0;
  91. int i;
  92. /* Loop over all IRQs affected by this read */
  93. for (i = 0; i < len * 8; i++) {
  94. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  95. unsigned long flags;
  96. spin_lock_irqsave(&irq->irq_lock, flags);
  97. if (irq_is_pending(irq))
  98. value |= (1U << i);
  99. spin_unlock_irqrestore(&irq->irq_lock, flags);
  100. vgic_put_irq(vcpu->kvm, irq);
  101. }
  102. return value;
  103. }
  104. /*
  105. * This function will return the VCPU that performed the MMIO access and
  106. * trapped from within the VM, and will return NULL if this is a userspace
  107. * access.
  108. *
  109. * We can disable preemption locally around accessing the per-CPU variable,
  110. * and use the resolved vcpu pointer after enabling preemption again, because
  111. * even if the current thread is migrated to another CPU, reading the per-CPU
  112. * value later will give us the same value as we update the per-CPU variable
  113. * in the preempt notifier handlers.
  114. */
  115. static struct kvm_vcpu *vgic_get_mmio_requester_vcpu(void)
  116. {
  117. struct kvm_vcpu *vcpu;
  118. preempt_disable();
  119. vcpu = kvm_arm_get_running_vcpu();
  120. preempt_enable();
  121. return vcpu;
  122. }
  123. /* Must be called with irq->irq_lock held */
  124. static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
  125. bool is_uaccess)
  126. {
  127. if (is_uaccess)
  128. return;
  129. irq->pending_latch = true;
  130. vgic_irq_set_phys_active(irq, true);
  131. }
  132. void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
  133. gpa_t addr, unsigned int len,
  134. unsigned long val)
  135. {
  136. bool is_uaccess = !vgic_get_mmio_requester_vcpu();
  137. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  138. int i;
  139. unsigned long flags;
  140. for_each_set_bit(i, &val, len * 8) {
  141. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  142. spin_lock_irqsave(&irq->irq_lock, flags);
  143. if (irq->hw)
  144. vgic_hw_irq_spending(vcpu, irq, is_uaccess);
  145. else
  146. irq->pending_latch = true;
  147. vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
  148. vgic_put_irq(vcpu->kvm, irq);
  149. }
  150. }
  151. /* Must be called with irq->irq_lock held */
  152. static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
  153. bool is_uaccess)
  154. {
  155. if (is_uaccess)
  156. return;
  157. irq->pending_latch = false;
  158. /*
  159. * We don't want the guest to effectively mask the physical
  160. * interrupt by doing a write to SPENDR followed by a write to
  161. * CPENDR for HW interrupts, so we clear the active state on
  162. * the physical side if the virtual interrupt is not active.
  163. * This may lead to taking an additional interrupt on the
  164. * host, but that should not be a problem as the worst that
  165. * can happen is an additional vgic injection. We also clear
  166. * the pending state to maintain proper semantics for edge HW
  167. * interrupts.
  168. */
  169. vgic_irq_set_phys_pending(irq, false);
  170. if (!irq->active)
  171. vgic_irq_set_phys_active(irq, false);
  172. }
  173. void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
  174. gpa_t addr, unsigned int len,
  175. unsigned long val)
  176. {
  177. bool is_uaccess = !vgic_get_mmio_requester_vcpu();
  178. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  179. int i;
  180. unsigned long flags;
  181. for_each_set_bit(i, &val, len * 8) {
  182. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  183. spin_lock_irqsave(&irq->irq_lock, flags);
  184. if (irq->hw)
  185. vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
  186. else
  187. irq->pending_latch = false;
  188. spin_unlock_irqrestore(&irq->irq_lock, flags);
  189. vgic_put_irq(vcpu->kvm, irq);
  190. }
  191. }
  192. unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
  193. gpa_t addr, unsigned int len)
  194. {
  195. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  196. u32 value = 0;
  197. int i;
  198. /* Loop over all IRQs affected by this read */
  199. for (i = 0; i < len * 8; i++) {
  200. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  201. if (irq->active)
  202. value |= (1U << i);
  203. vgic_put_irq(vcpu->kvm, irq);
  204. }
  205. return value;
  206. }
  207. /* Must be called with irq->irq_lock held */
  208. static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
  209. bool active, bool is_uaccess)
  210. {
  211. if (is_uaccess)
  212. return;
  213. irq->active = active;
  214. vgic_irq_set_phys_active(irq, active);
  215. }
  216. static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
  217. bool active)
  218. {
  219. unsigned long flags;
  220. struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu();
  221. spin_lock_irqsave(&irq->irq_lock, flags);
  222. /*
  223. * If this virtual IRQ was written into a list register, we
  224. * have to make sure the CPU that runs the VCPU thread has
  225. * synced back the LR state to the struct vgic_irq.
  226. *
  227. * As long as the conditions below are true, we know the VCPU thread
  228. * may be on its way back from the guest (we kicked the VCPU thread in
  229. * vgic_change_active_prepare) and still has to sync back this IRQ,
  230. * so we release and re-acquire the spin_lock to let the other thread
  231. * sync back the IRQ.
  232. *
  233. * When accessing VGIC state from user space, requester_vcpu is
  234. * NULL, which is fine, because we guarantee that no VCPUs are running
  235. * when accessing VGIC state from user space so irq->vcpu->cpu is
  236. * always -1.
  237. */
  238. while (irq->vcpu && /* IRQ may have state in an LR somewhere */
  239. irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
  240. irq->vcpu->cpu != -1) /* VCPU thread is running */
  241. cond_resched_lock(&irq->irq_lock);
  242. if (irq->hw)
  243. vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
  244. else
  245. irq->active = active;
  246. if (irq->active)
  247. vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
  248. else
  249. spin_unlock_irqrestore(&irq->irq_lock, flags);
  250. }
  251. /*
  252. * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
  253. * is not queued on some running VCPU's LRs, because then the change to the
  254. * active state can be overwritten when the VCPU's state is synced coming back
  255. * from the guest.
  256. *
  257. * For shared interrupts, we have to stop all the VCPUs because interrupts can
  258. * be migrated while we don't hold the IRQ locks and we don't want to be
  259. * chasing moving targets.
  260. *
  261. * For private interrupts we don't have to do anything because userspace
  262. * accesses to the VGIC state already require all VCPUs to be stopped, and
  263. * only the VCPU itself can modify its private interrupts active state, which
  264. * guarantees that the VCPU is not running.
  265. */
  266. static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
  267. {
  268. if (intid > VGIC_NR_PRIVATE_IRQS)
  269. kvm_arm_halt_guest(vcpu->kvm);
  270. }
  271. /* See vgic_change_active_prepare */
  272. static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
  273. {
  274. if (intid > VGIC_NR_PRIVATE_IRQS)
  275. kvm_arm_resume_guest(vcpu->kvm);
  276. }
  277. static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
  278. gpa_t addr, unsigned int len,
  279. unsigned long val)
  280. {
  281. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  282. int i;
  283. for_each_set_bit(i, &val, len * 8) {
  284. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  285. vgic_mmio_change_active(vcpu, irq, false);
  286. vgic_put_irq(vcpu->kvm, irq);
  287. }
  288. }
  289. void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
  290. gpa_t addr, unsigned int len,
  291. unsigned long val)
  292. {
  293. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  294. mutex_lock(&vcpu->kvm->lock);
  295. vgic_change_active_prepare(vcpu, intid);
  296. __vgic_mmio_write_cactive(vcpu, addr, len, val);
  297. vgic_change_active_finish(vcpu, intid);
  298. mutex_unlock(&vcpu->kvm->lock);
  299. }
  300. void vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
  301. gpa_t addr, unsigned int len,
  302. unsigned long val)
  303. {
  304. __vgic_mmio_write_cactive(vcpu, addr, len, val);
  305. }
  306. static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
  307. gpa_t addr, unsigned int len,
  308. unsigned long val)
  309. {
  310. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  311. int i;
  312. for_each_set_bit(i, &val, len * 8) {
  313. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  314. vgic_mmio_change_active(vcpu, irq, true);
  315. vgic_put_irq(vcpu->kvm, irq);
  316. }
  317. }
  318. void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
  319. gpa_t addr, unsigned int len,
  320. unsigned long val)
  321. {
  322. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  323. mutex_lock(&vcpu->kvm->lock);
  324. vgic_change_active_prepare(vcpu, intid);
  325. __vgic_mmio_write_sactive(vcpu, addr, len, val);
  326. vgic_change_active_finish(vcpu, intid);
  327. mutex_unlock(&vcpu->kvm->lock);
  328. }
  329. void vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
  330. gpa_t addr, unsigned int len,
  331. unsigned long val)
  332. {
  333. __vgic_mmio_write_sactive(vcpu, addr, len, val);
  334. }
  335. unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
  336. gpa_t addr, unsigned int len)
  337. {
  338. u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
  339. int i;
  340. u64 val = 0;
  341. for (i = 0; i < len; i++) {
  342. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  343. val |= (u64)irq->priority << (i * 8);
  344. vgic_put_irq(vcpu->kvm, irq);
  345. }
  346. return val;
  347. }
  348. /*
  349. * We currently don't handle changing the priority of an interrupt that
  350. * is already pending on a VCPU. If there is a need for this, we would
  351. * need to make this VCPU exit and re-evaluate the priorities, potentially
  352. * leading to this interrupt getting presented now to the guest (if it has
  353. * been masked by the priority mask before).
  354. */
  355. void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
  356. gpa_t addr, unsigned int len,
  357. unsigned long val)
  358. {
  359. u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
  360. int i;
  361. unsigned long flags;
  362. for (i = 0; i < len; i++) {
  363. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  364. spin_lock_irqsave(&irq->irq_lock, flags);
  365. /* Narrow the priority range to what we actually support */
  366. irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
  367. spin_unlock_irqrestore(&irq->irq_lock, flags);
  368. vgic_put_irq(vcpu->kvm, irq);
  369. }
  370. }
  371. unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
  372. gpa_t addr, unsigned int len)
  373. {
  374. u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
  375. u32 value = 0;
  376. int i;
  377. for (i = 0; i < len * 4; i++) {
  378. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  379. if (irq->config == VGIC_CONFIG_EDGE)
  380. value |= (2U << (i * 2));
  381. vgic_put_irq(vcpu->kvm, irq);
  382. }
  383. return value;
  384. }
  385. void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
  386. gpa_t addr, unsigned int len,
  387. unsigned long val)
  388. {
  389. u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
  390. int i;
  391. unsigned long flags;
  392. for (i = 0; i < len * 4; i++) {
  393. struct vgic_irq *irq;
  394. /*
  395. * The configuration cannot be changed for SGIs in general,
  396. * for PPIs this is IMPLEMENTATION DEFINED. The arch timer
  397. * code relies on PPIs being level triggered, so we also
  398. * make them read-only here.
  399. */
  400. if (intid + i < VGIC_NR_PRIVATE_IRQS)
  401. continue;
  402. irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  403. spin_lock_irqsave(&irq->irq_lock, flags);
  404. if (test_bit(i * 2 + 1, &val))
  405. irq->config = VGIC_CONFIG_EDGE;
  406. else
  407. irq->config = VGIC_CONFIG_LEVEL;
  408. spin_unlock_irqrestore(&irq->irq_lock, flags);
  409. vgic_put_irq(vcpu->kvm, irq);
  410. }
  411. }
  412. u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
  413. {
  414. int i;
  415. u64 val = 0;
  416. int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
  417. for (i = 0; i < 32; i++) {
  418. struct vgic_irq *irq;
  419. if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
  420. continue;
  421. irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  422. if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level)
  423. val |= (1U << i);
  424. vgic_put_irq(vcpu->kvm, irq);
  425. }
  426. return val;
  427. }
  428. void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
  429. const u64 val)
  430. {
  431. int i;
  432. int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
  433. unsigned long flags;
  434. for (i = 0; i < 32; i++) {
  435. struct vgic_irq *irq;
  436. bool new_level;
  437. if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
  438. continue;
  439. irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  440. /*
  441. * Line level is set irrespective of irq type
  442. * (level or edge) to avoid dependency that VM should
  443. * restore irq config before line level.
  444. */
  445. new_level = !!(val & (1U << i));
  446. spin_lock_irqsave(&irq->irq_lock, flags);
  447. irq->line_level = new_level;
  448. if (new_level)
  449. vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
  450. else
  451. spin_unlock_irqrestore(&irq->irq_lock, flags);
  452. vgic_put_irq(vcpu->kvm, irq);
  453. }
  454. }
  455. static int match_region(const void *key, const void *elt)
  456. {
  457. const unsigned int offset = (unsigned long)key;
  458. const struct vgic_register_region *region = elt;
  459. if (offset < region->reg_offset)
  460. return -1;
  461. if (offset >= region->reg_offset + region->len)
  462. return 1;
  463. return 0;
  464. }
  465. const struct vgic_register_region *
  466. vgic_find_mmio_region(const struct vgic_register_region *regions,
  467. int nr_regions, unsigned int offset)
  468. {
  469. return bsearch((void *)(uintptr_t)offset, regions, nr_regions,
  470. sizeof(regions[0]), match_region);
  471. }
  472. void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
  473. {
  474. if (kvm_vgic_global_state.type == VGIC_V2)
  475. vgic_v2_set_vmcr(vcpu, vmcr);
  476. else
  477. vgic_v3_set_vmcr(vcpu, vmcr);
  478. }
  479. void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
  480. {
  481. if (kvm_vgic_global_state.type == VGIC_V2)
  482. vgic_v2_get_vmcr(vcpu, vmcr);
  483. else
  484. vgic_v3_get_vmcr(vcpu, vmcr);
  485. }
  486. /*
  487. * kvm_mmio_read_buf() returns a value in a format where it can be converted
  488. * to a byte array and be directly observed as the guest wanted it to appear
  489. * in memory if it had done the store itself, which is LE for the GIC, as the
  490. * guest knows the GIC is always LE.
  491. *
  492. * We convert this value to the CPUs native format to deal with it as a data
  493. * value.
  494. */
  495. unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
  496. {
  497. unsigned long data = kvm_mmio_read_buf(val, len);
  498. switch (len) {
  499. case 1:
  500. return data;
  501. case 2:
  502. return le16_to_cpu(data);
  503. case 4:
  504. return le32_to_cpu(data);
  505. default:
  506. return le64_to_cpu(data);
  507. }
  508. }
  509. /*
  510. * kvm_mmio_write_buf() expects a value in a format such that if converted to
  511. * a byte array it is observed as the guest would see it if it could perform
  512. * the load directly. Since the GIC is LE, and the guest knows this, the
  513. * guest expects a value in little endian format.
  514. *
  515. * We convert the data value from the CPUs native format to LE so that the
  516. * value is returned in the proper format.
  517. */
  518. void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
  519. unsigned long data)
  520. {
  521. switch (len) {
  522. case 1:
  523. break;
  524. case 2:
  525. data = cpu_to_le16(data);
  526. break;
  527. case 4:
  528. data = cpu_to_le32(data);
  529. break;
  530. default:
  531. data = cpu_to_le64(data);
  532. }
  533. kvm_mmio_write_buf(buf, len, data);
  534. }
  535. static
  536. struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
  537. {
  538. return container_of(dev, struct vgic_io_device, dev);
  539. }
  540. static bool check_region(const struct kvm *kvm,
  541. const struct vgic_register_region *region,
  542. gpa_t addr, int len)
  543. {
  544. int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
  545. switch (len) {
  546. case sizeof(u8):
  547. flags = VGIC_ACCESS_8bit;
  548. break;
  549. case sizeof(u32):
  550. flags = VGIC_ACCESS_32bit;
  551. break;
  552. case sizeof(u64):
  553. flags = VGIC_ACCESS_64bit;
  554. break;
  555. default:
  556. return false;
  557. }
  558. if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
  559. if (!region->bits_per_irq)
  560. return true;
  561. /* Do we access a non-allocated IRQ? */
  562. return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
  563. }
  564. return false;
  565. }
  566. const struct vgic_register_region *
  567. vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
  568. gpa_t addr, int len)
  569. {
  570. const struct vgic_register_region *region;
  571. region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
  572. addr - iodev->base_addr);
  573. if (!region || !check_region(vcpu->kvm, region, addr, len))
  574. return NULL;
  575. return region;
  576. }
  577. static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
  578. gpa_t addr, u32 *val)
  579. {
  580. struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
  581. const struct vgic_register_region *region;
  582. struct kvm_vcpu *r_vcpu;
  583. region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
  584. if (!region) {
  585. *val = 0;
  586. return 0;
  587. }
  588. r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
  589. if (region->uaccess_read)
  590. *val = region->uaccess_read(r_vcpu, addr, sizeof(u32));
  591. else
  592. *val = region->read(r_vcpu, addr, sizeof(u32));
  593. return 0;
  594. }
  595. static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
  596. gpa_t addr, const u32 *val)
  597. {
  598. struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
  599. const struct vgic_register_region *region;
  600. struct kvm_vcpu *r_vcpu;
  601. region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
  602. if (!region)
  603. return 0;
  604. r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
  605. if (region->uaccess_write)
  606. region->uaccess_write(r_vcpu, addr, sizeof(u32), *val);
  607. else
  608. region->write(r_vcpu, addr, sizeof(u32), *val);
  609. return 0;
  610. }
  611. /*
  612. * Userland access to VGIC registers.
  613. */
  614. int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
  615. bool is_write, int offset, u32 *val)
  616. {
  617. if (is_write)
  618. return vgic_uaccess_write(vcpu, &dev->dev, offset, val);
  619. else
  620. return vgic_uaccess_read(vcpu, &dev->dev, offset, val);
  621. }
  622. static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
  623. gpa_t addr, int len, void *val)
  624. {
  625. struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
  626. const struct vgic_register_region *region;
  627. unsigned long data = 0;
  628. region = vgic_get_mmio_region(vcpu, iodev, addr, len);
  629. if (!region) {
  630. memset(val, 0, len);
  631. return 0;
  632. }
  633. switch (iodev->iodev_type) {
  634. case IODEV_CPUIF:
  635. data = region->read(vcpu, addr, len);
  636. break;
  637. case IODEV_DIST:
  638. data = region->read(vcpu, addr, len);
  639. break;
  640. case IODEV_REDIST:
  641. data = region->read(iodev->redist_vcpu, addr, len);
  642. break;
  643. case IODEV_ITS:
  644. data = region->its_read(vcpu->kvm, iodev->its, addr, len);
  645. break;
  646. }
  647. vgic_data_host_to_mmio_bus(val, len, data);
  648. return 0;
  649. }
  650. static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
  651. gpa_t addr, int len, const void *val)
  652. {
  653. struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
  654. const struct vgic_register_region *region;
  655. unsigned long data = vgic_data_mmio_bus_to_host(val, len);
  656. region = vgic_get_mmio_region(vcpu, iodev, addr, len);
  657. if (!region)
  658. return 0;
  659. switch (iodev->iodev_type) {
  660. case IODEV_CPUIF:
  661. region->write(vcpu, addr, len, data);
  662. break;
  663. case IODEV_DIST:
  664. region->write(vcpu, addr, len, data);
  665. break;
  666. case IODEV_REDIST:
  667. region->write(iodev->redist_vcpu, addr, len, data);
  668. break;
  669. case IODEV_ITS:
  670. region->its_write(vcpu->kvm, iodev->its, addr, len, data);
  671. break;
  672. }
  673. return 0;
  674. }
  675. struct kvm_io_device_ops kvm_io_gic_ops = {
  676. .read = dispatch_mmio_read,
  677. .write = dispatch_mmio_write,
  678. };
  679. int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
  680. enum vgic_type type)
  681. {
  682. struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
  683. int ret = 0;
  684. unsigned int len;
  685. switch (type) {
  686. case VGIC_V2:
  687. len = vgic_v2_init_dist_iodev(io_device);
  688. break;
  689. case VGIC_V3:
  690. len = vgic_v3_init_dist_iodev(io_device);
  691. break;
  692. default:
  693. BUG_ON(1);
  694. }
  695. io_device->base_addr = dist_base_address;
  696. io_device->iodev_type = IODEV_DIST;
  697. io_device->redist_vcpu = NULL;
  698. mutex_lock(&kvm->slots_lock);
  699. ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
  700. len, &io_device->dev);
  701. mutex_unlock(&kvm->slots_lock);
  702. return ret;
  703. }