vgic-v3-emul.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036
  1. /*
  2. * GICv3 distributor and redistributor emulation
  3. *
  4. * GICv3 emulation is currently only supported on a GICv3 host (because
  5. * we rely on the hardware's CPU interface virtualization support), but
  6. * supports both hardware with or without the optional GICv2 backwards
  7. * compatibility features.
  8. *
  9. * Limitations of the emulation:
  10. * (RAZ/WI: read as zero, write ignore, RAO/WI: read as one, write ignore)
  11. * - We do not support LPIs (yet). TYPER.LPIS is reported as 0 and is RAZ/WI.
  12. * - We do not support the message based interrupts (MBIs) triggered by
  13. * writes to the GICD_{SET,CLR}SPI_* registers. TYPER.MBIS is reported as 0.
  14. * - We do not support the (optional) backwards compatibility feature.
  15. * GICD_CTLR.ARE resets to 1 and is RAO/WI. If the _host_ GIC supports
  16. * the compatiblity feature, you can use a GICv2 in the guest, though.
  17. * - We only support a single security state. GICD_CTLR.DS is 1 and is RAO/WI.
  18. * - Priorities are not emulated (same as the GICv2 emulation). Linux
  19. * as a guest is fine with this, because it does not use priorities.
  20. * - We only support Group1 interrupts. Again Linux uses only those.
  21. *
  22. * Copyright (C) 2014 ARM Ltd.
  23. * Author: Andre Przywara <andre.przywara@arm.com>
  24. *
  25. * This program is free software; you can redistribute it and/or modify
  26. * it under the terms of the GNU General Public License version 2 as
  27. * published by the Free Software Foundation.
  28. *
  29. * This program is distributed in the hope that it will be useful,
  30. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  31. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  32. * GNU General Public License for more details.
  33. *
  34. * You should have received a copy of the GNU General Public License
  35. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  36. */
  37. #include <linux/cpu.h>
  38. #include <linux/kvm.h>
  39. #include <linux/kvm_host.h>
  40. #include <linux/interrupt.h>
  41. #include <linux/irqchip/arm-gic-v3.h>
  42. #include <kvm/arm_vgic.h>
  43. #include <asm/kvm_emulate.h>
  44. #include <asm/kvm_arm.h>
  45. #include <asm/kvm_mmu.h>
  46. #include "vgic.h"
  47. static bool handle_mmio_rao_wi(struct kvm_vcpu *vcpu,
  48. struct kvm_exit_mmio *mmio, phys_addr_t offset)
  49. {
  50. u32 reg = 0xffffffff;
  51. vgic_reg_access(mmio, &reg, offset,
  52. ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
  53. return false;
  54. }
  55. static bool handle_mmio_ctlr(struct kvm_vcpu *vcpu,
  56. struct kvm_exit_mmio *mmio, phys_addr_t offset)
  57. {
  58. u32 reg = 0;
  59. /*
  60. * Force ARE and DS to 1, the guest cannot change this.
  61. * For the time being we only support Group1 interrupts.
  62. */
  63. if (vcpu->kvm->arch.vgic.enabled)
  64. reg = GICD_CTLR_ENABLE_SS_G1;
  65. reg |= GICD_CTLR_ARE_NS | GICD_CTLR_DS;
  66. vgic_reg_access(mmio, &reg, offset,
  67. ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
  68. if (mmio->is_write) {
  69. if (reg & GICD_CTLR_ENABLE_SS_G0)
  70. kvm_info("guest tried to enable unsupported Group0 interrupts\n");
  71. vcpu->kvm->arch.vgic.enabled = !!(reg & GICD_CTLR_ENABLE_SS_G1);
  72. vgic_update_state(vcpu->kvm);
  73. return true;
  74. }
  75. return false;
  76. }
  77. /*
  78. * As this implementation does not provide compatibility
  79. * with GICv2 (ARE==1), we report zero CPUs in bits [5..7].
  80. * Also LPIs and MBIs are not supported, so we set the respective bits to 0.
  81. * Also we report at most 2**10=1024 interrupt IDs (to match 1024 SPIs).
  82. */
  83. #define INTERRUPT_ID_BITS 10
  84. static bool handle_mmio_typer(struct kvm_vcpu *vcpu,
  85. struct kvm_exit_mmio *mmio, phys_addr_t offset)
  86. {
  87. u32 reg;
  88. reg = (min(vcpu->kvm->arch.vgic.nr_irqs, 1024) >> 5) - 1;
  89. reg |= (INTERRUPT_ID_BITS - 1) << 19;
  90. vgic_reg_access(mmio, &reg, offset,
  91. ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
  92. return false;
  93. }
  94. static bool handle_mmio_iidr(struct kvm_vcpu *vcpu,
  95. struct kvm_exit_mmio *mmio, phys_addr_t offset)
  96. {
  97. u32 reg;
  98. reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
  99. vgic_reg_access(mmio, &reg, offset,
  100. ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
  101. return false;
  102. }
  103. static bool handle_mmio_set_enable_reg_dist(struct kvm_vcpu *vcpu,
  104. struct kvm_exit_mmio *mmio,
  105. phys_addr_t offset)
  106. {
  107. if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
  108. return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
  109. vcpu->vcpu_id,
  110. ACCESS_WRITE_SETBIT);
  111. vgic_reg_access(mmio, NULL, offset,
  112. ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
  113. return false;
  114. }
  115. static bool handle_mmio_clear_enable_reg_dist(struct kvm_vcpu *vcpu,
  116. struct kvm_exit_mmio *mmio,
  117. phys_addr_t offset)
  118. {
  119. if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
  120. return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
  121. vcpu->vcpu_id,
  122. ACCESS_WRITE_CLEARBIT);
  123. vgic_reg_access(mmio, NULL, offset,
  124. ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
  125. return false;
  126. }
  127. static bool handle_mmio_set_pending_reg_dist(struct kvm_vcpu *vcpu,
  128. struct kvm_exit_mmio *mmio,
  129. phys_addr_t offset)
  130. {
  131. if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
  132. return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
  133. vcpu->vcpu_id);
  134. vgic_reg_access(mmio, NULL, offset,
  135. ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
  136. return false;
  137. }
  138. static bool handle_mmio_clear_pending_reg_dist(struct kvm_vcpu *vcpu,
  139. struct kvm_exit_mmio *mmio,
  140. phys_addr_t offset)
  141. {
  142. if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
  143. return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
  144. vcpu->vcpu_id);
  145. vgic_reg_access(mmio, NULL, offset,
  146. ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
  147. return false;
  148. }
  149. static bool handle_mmio_priority_reg_dist(struct kvm_vcpu *vcpu,
  150. struct kvm_exit_mmio *mmio,
  151. phys_addr_t offset)
  152. {
  153. u32 *reg;
  154. if (unlikely(offset < VGIC_NR_PRIVATE_IRQS)) {
  155. vgic_reg_access(mmio, NULL, offset,
  156. ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
  157. return false;
  158. }
  159. reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
  160. vcpu->vcpu_id, offset);
  161. vgic_reg_access(mmio, reg, offset,
  162. ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
  163. return false;
  164. }
  165. static bool handle_mmio_cfg_reg_dist(struct kvm_vcpu *vcpu,
  166. struct kvm_exit_mmio *mmio,
  167. phys_addr_t offset)
  168. {
  169. u32 *reg;
  170. if (unlikely(offset < VGIC_NR_PRIVATE_IRQS / 4)) {
  171. vgic_reg_access(mmio, NULL, offset,
  172. ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
  173. return false;
  174. }
  175. reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
  176. vcpu->vcpu_id, offset >> 1);
  177. return vgic_handle_cfg_reg(reg, mmio, offset);
  178. }
  179. /*
  180. * We use a compressed version of the MPIDR (all 32 bits in one 32-bit word)
  181. * when we store the target MPIDR written by the guest.
  182. */
  183. static u32 compress_mpidr(unsigned long mpidr)
  184. {
  185. u32 ret;
  186. ret = MPIDR_AFFINITY_LEVEL(mpidr, 0);
  187. ret |= MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8;
  188. ret |= MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16;
  189. ret |= MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24;
  190. return ret;
  191. }
  192. static unsigned long uncompress_mpidr(u32 value)
  193. {
  194. unsigned long mpidr;
  195. mpidr = ((value >> 0) & 0xFF) << MPIDR_LEVEL_SHIFT(0);
  196. mpidr |= ((value >> 8) & 0xFF) << MPIDR_LEVEL_SHIFT(1);
  197. mpidr |= ((value >> 16) & 0xFF) << MPIDR_LEVEL_SHIFT(2);
  198. mpidr |= (u64)((value >> 24) & 0xFF) << MPIDR_LEVEL_SHIFT(3);
  199. return mpidr;
  200. }
  201. /*
  202. * Lookup the given MPIDR value to get the vcpu_id (if there is one)
  203. * and store that in the irq_spi_cpu[] array.
  204. * This limits the number of VCPUs to 255 for now, extending the data
  205. * type (or storing kvm_vcpu pointers) should lift the limit.
  206. * Store the original MPIDR value in an extra array to support read-as-written.
  207. * Unallocated MPIDRs are translated to a special value and caught
  208. * before any array accesses.
  209. */
  210. static bool handle_mmio_route_reg(struct kvm_vcpu *vcpu,
  211. struct kvm_exit_mmio *mmio,
  212. phys_addr_t offset)
  213. {
  214. struct kvm *kvm = vcpu->kvm;
  215. struct vgic_dist *dist = &kvm->arch.vgic;
  216. int spi;
  217. u32 reg;
  218. int vcpu_id;
  219. unsigned long *bmap, mpidr;
  220. /*
  221. * The upper 32 bits of each 64 bit register are zero,
  222. * as we don't support Aff3.
  223. */
  224. if ((offset & 4)) {
  225. vgic_reg_access(mmio, NULL, offset,
  226. ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
  227. return false;
  228. }
  229. /* This region only covers SPIs, so no handling of private IRQs here. */
  230. spi = offset / 8;
  231. /* get the stored MPIDR for this IRQ */
  232. mpidr = uncompress_mpidr(dist->irq_spi_mpidr[spi]);
  233. reg = mpidr;
  234. vgic_reg_access(mmio, &reg, offset,
  235. ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
  236. if (!mmio->is_write)
  237. return false;
  238. /*
  239. * Now clear the currently assigned vCPU from the map, making room
  240. * for the new one to be written below
  241. */
  242. vcpu = kvm_mpidr_to_vcpu(kvm, mpidr);
  243. if (likely(vcpu)) {
  244. vcpu_id = vcpu->vcpu_id;
  245. bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]);
  246. __clear_bit(spi, bmap);
  247. }
  248. dist->irq_spi_mpidr[spi] = compress_mpidr(reg);
  249. vcpu = kvm_mpidr_to_vcpu(kvm, reg & MPIDR_HWID_BITMASK);
  250. /*
  251. * The spec says that non-existent MPIDR values should not be
  252. * forwarded to any existent (v)CPU, but should be able to become
  253. * pending anyway. We simply keep the irq_spi_target[] array empty, so
  254. * the interrupt will never be injected.
  255. * irq_spi_cpu[irq] gets a magic value in this case.
  256. */
  257. if (likely(vcpu)) {
  258. vcpu_id = vcpu->vcpu_id;
  259. dist->irq_spi_cpu[spi] = vcpu_id;
  260. bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]);
  261. __set_bit(spi, bmap);
  262. } else {
  263. dist->irq_spi_cpu[spi] = VCPU_NOT_ALLOCATED;
  264. }
  265. vgic_update_state(kvm);
  266. return true;
  267. }
  268. /*
  269. * We should be careful about promising too much when a guest reads
  270. * this register. Don't claim to be like any hardware implementation,
  271. * but just report the GIC as version 3 - which is what a Linux guest
  272. * would check.
  273. */
  274. static bool handle_mmio_idregs(struct kvm_vcpu *vcpu,
  275. struct kvm_exit_mmio *mmio,
  276. phys_addr_t offset)
  277. {
  278. u32 reg = 0;
  279. switch (offset + GICD_IDREGS) {
  280. case GICD_PIDR2:
  281. reg = 0x3b;
  282. break;
  283. }
  284. vgic_reg_access(mmio, &reg, offset,
  285. ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
  286. return false;
  287. }
  288. static const struct kvm_mmio_range vgic_v3_dist_ranges[] = {
  289. {
  290. .base = GICD_CTLR,
  291. .len = 0x04,
  292. .bits_per_irq = 0,
  293. .handle_mmio = handle_mmio_ctlr,
  294. },
  295. {
  296. .base = GICD_TYPER,
  297. .len = 0x04,
  298. .bits_per_irq = 0,
  299. .handle_mmio = handle_mmio_typer,
  300. },
  301. {
  302. .base = GICD_IIDR,
  303. .len = 0x04,
  304. .bits_per_irq = 0,
  305. .handle_mmio = handle_mmio_iidr,
  306. },
  307. {
  308. /* this register is optional, it is RAZ/WI if not implemented */
  309. .base = GICD_STATUSR,
  310. .len = 0x04,
  311. .bits_per_irq = 0,
  312. .handle_mmio = handle_mmio_raz_wi,
  313. },
  314. {
  315. /* this write only register is WI when TYPER.MBIS=0 */
  316. .base = GICD_SETSPI_NSR,
  317. .len = 0x04,
  318. .bits_per_irq = 0,
  319. .handle_mmio = handle_mmio_raz_wi,
  320. },
  321. {
  322. /* this write only register is WI when TYPER.MBIS=0 */
  323. .base = GICD_CLRSPI_NSR,
  324. .len = 0x04,
  325. .bits_per_irq = 0,
  326. .handle_mmio = handle_mmio_raz_wi,
  327. },
  328. {
  329. /* this is RAZ/WI when DS=1 */
  330. .base = GICD_SETSPI_SR,
  331. .len = 0x04,
  332. .bits_per_irq = 0,
  333. .handle_mmio = handle_mmio_raz_wi,
  334. },
  335. {
  336. /* this is RAZ/WI when DS=1 */
  337. .base = GICD_CLRSPI_SR,
  338. .len = 0x04,
  339. .bits_per_irq = 0,
  340. .handle_mmio = handle_mmio_raz_wi,
  341. },
  342. {
  343. .base = GICD_IGROUPR,
  344. .len = 0x80,
  345. .bits_per_irq = 1,
  346. .handle_mmio = handle_mmio_rao_wi,
  347. },
  348. {
  349. .base = GICD_ISENABLER,
  350. .len = 0x80,
  351. .bits_per_irq = 1,
  352. .handle_mmio = handle_mmio_set_enable_reg_dist,
  353. },
  354. {
  355. .base = GICD_ICENABLER,
  356. .len = 0x80,
  357. .bits_per_irq = 1,
  358. .handle_mmio = handle_mmio_clear_enable_reg_dist,
  359. },
  360. {
  361. .base = GICD_ISPENDR,
  362. .len = 0x80,
  363. .bits_per_irq = 1,
  364. .handle_mmio = handle_mmio_set_pending_reg_dist,
  365. },
  366. {
  367. .base = GICD_ICPENDR,
  368. .len = 0x80,
  369. .bits_per_irq = 1,
  370. .handle_mmio = handle_mmio_clear_pending_reg_dist,
  371. },
  372. {
  373. .base = GICD_ISACTIVER,
  374. .len = 0x80,
  375. .bits_per_irq = 1,
  376. .handle_mmio = handle_mmio_raz_wi,
  377. },
  378. {
  379. .base = GICD_ICACTIVER,
  380. .len = 0x80,
  381. .bits_per_irq = 1,
  382. .handle_mmio = handle_mmio_raz_wi,
  383. },
  384. {
  385. .base = GICD_IPRIORITYR,
  386. .len = 0x400,
  387. .bits_per_irq = 8,
  388. .handle_mmio = handle_mmio_priority_reg_dist,
  389. },
  390. {
  391. /* TARGETSRn is RES0 when ARE=1 */
  392. .base = GICD_ITARGETSR,
  393. .len = 0x400,
  394. .bits_per_irq = 8,
  395. .handle_mmio = handle_mmio_raz_wi,
  396. },
  397. {
  398. .base = GICD_ICFGR,
  399. .len = 0x100,
  400. .bits_per_irq = 2,
  401. .handle_mmio = handle_mmio_cfg_reg_dist,
  402. },
  403. {
  404. /* this is RAZ/WI when DS=1 */
  405. .base = GICD_IGRPMODR,
  406. .len = 0x80,
  407. .bits_per_irq = 1,
  408. .handle_mmio = handle_mmio_raz_wi,
  409. },
  410. {
  411. /* this is RAZ/WI when DS=1 */
  412. .base = GICD_NSACR,
  413. .len = 0x100,
  414. .bits_per_irq = 2,
  415. .handle_mmio = handle_mmio_raz_wi,
  416. },
  417. {
  418. /* this is RAZ/WI when ARE=1 */
  419. .base = GICD_SGIR,
  420. .len = 0x04,
  421. .handle_mmio = handle_mmio_raz_wi,
  422. },
  423. {
  424. /* this is RAZ/WI when ARE=1 */
  425. .base = GICD_CPENDSGIR,
  426. .len = 0x10,
  427. .handle_mmio = handle_mmio_raz_wi,
  428. },
  429. {
  430. /* this is RAZ/WI when ARE=1 */
  431. .base = GICD_SPENDSGIR,
  432. .len = 0x10,
  433. .handle_mmio = handle_mmio_raz_wi,
  434. },
  435. {
  436. .base = GICD_IROUTER + 0x100,
  437. .len = 0x1ee0,
  438. .bits_per_irq = 64,
  439. .handle_mmio = handle_mmio_route_reg,
  440. },
  441. {
  442. .base = GICD_IDREGS,
  443. .len = 0x30,
  444. .bits_per_irq = 0,
  445. .handle_mmio = handle_mmio_idregs,
  446. },
  447. {},
  448. };
  449. static bool handle_mmio_set_enable_reg_redist(struct kvm_vcpu *vcpu,
  450. struct kvm_exit_mmio *mmio,
  451. phys_addr_t offset)
  452. {
  453. struct kvm_vcpu *redist_vcpu = mmio->private;
  454. return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
  455. redist_vcpu->vcpu_id,
  456. ACCESS_WRITE_SETBIT);
  457. }
  458. static bool handle_mmio_clear_enable_reg_redist(struct kvm_vcpu *vcpu,
  459. struct kvm_exit_mmio *mmio,
  460. phys_addr_t offset)
  461. {
  462. struct kvm_vcpu *redist_vcpu = mmio->private;
  463. return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
  464. redist_vcpu->vcpu_id,
  465. ACCESS_WRITE_CLEARBIT);
  466. }
  467. static bool handle_mmio_set_pending_reg_redist(struct kvm_vcpu *vcpu,
  468. struct kvm_exit_mmio *mmio,
  469. phys_addr_t offset)
  470. {
  471. struct kvm_vcpu *redist_vcpu = mmio->private;
  472. return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
  473. redist_vcpu->vcpu_id);
  474. }
  475. static bool handle_mmio_clear_pending_reg_redist(struct kvm_vcpu *vcpu,
  476. struct kvm_exit_mmio *mmio,
  477. phys_addr_t offset)
  478. {
  479. struct kvm_vcpu *redist_vcpu = mmio->private;
  480. return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
  481. redist_vcpu->vcpu_id);
  482. }
  483. static bool handle_mmio_priority_reg_redist(struct kvm_vcpu *vcpu,
  484. struct kvm_exit_mmio *mmio,
  485. phys_addr_t offset)
  486. {
  487. struct kvm_vcpu *redist_vcpu = mmio->private;
  488. u32 *reg;
  489. reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
  490. redist_vcpu->vcpu_id, offset);
  491. vgic_reg_access(mmio, reg, offset,
  492. ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
  493. return false;
  494. }
  495. static bool handle_mmio_cfg_reg_redist(struct kvm_vcpu *vcpu,
  496. struct kvm_exit_mmio *mmio,
  497. phys_addr_t offset)
  498. {
  499. struct kvm_vcpu *redist_vcpu = mmio->private;
  500. u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
  501. redist_vcpu->vcpu_id, offset >> 1);
  502. return vgic_handle_cfg_reg(reg, mmio, offset);
  503. }
  504. static const struct kvm_mmio_range vgic_redist_sgi_ranges[] = {
  505. {
  506. .base = GICR_IGROUPR0,
  507. .len = 0x04,
  508. .bits_per_irq = 1,
  509. .handle_mmio = handle_mmio_rao_wi,
  510. },
  511. {
  512. .base = GICR_ISENABLER0,
  513. .len = 0x04,
  514. .bits_per_irq = 1,
  515. .handle_mmio = handle_mmio_set_enable_reg_redist,
  516. },
  517. {
  518. .base = GICR_ICENABLER0,
  519. .len = 0x04,
  520. .bits_per_irq = 1,
  521. .handle_mmio = handle_mmio_clear_enable_reg_redist,
  522. },
  523. {
  524. .base = GICR_ISPENDR0,
  525. .len = 0x04,
  526. .bits_per_irq = 1,
  527. .handle_mmio = handle_mmio_set_pending_reg_redist,
  528. },
  529. {
  530. .base = GICR_ICPENDR0,
  531. .len = 0x04,
  532. .bits_per_irq = 1,
  533. .handle_mmio = handle_mmio_clear_pending_reg_redist,
  534. },
  535. {
  536. .base = GICR_ISACTIVER0,
  537. .len = 0x04,
  538. .bits_per_irq = 1,
  539. .handle_mmio = handle_mmio_raz_wi,
  540. },
  541. {
  542. .base = GICR_ICACTIVER0,
  543. .len = 0x04,
  544. .bits_per_irq = 1,
  545. .handle_mmio = handle_mmio_raz_wi,
  546. },
  547. {
  548. .base = GICR_IPRIORITYR0,
  549. .len = 0x20,
  550. .bits_per_irq = 8,
  551. .handle_mmio = handle_mmio_priority_reg_redist,
  552. },
  553. {
  554. .base = GICR_ICFGR0,
  555. .len = 0x08,
  556. .bits_per_irq = 2,
  557. .handle_mmio = handle_mmio_cfg_reg_redist,
  558. },
  559. {
  560. .base = GICR_IGRPMODR0,
  561. .len = 0x04,
  562. .bits_per_irq = 1,
  563. .handle_mmio = handle_mmio_raz_wi,
  564. },
  565. {
  566. .base = GICR_NSACR,
  567. .len = 0x04,
  568. .handle_mmio = handle_mmio_raz_wi,
  569. },
  570. {},
  571. };
  572. static bool handle_mmio_ctlr_redist(struct kvm_vcpu *vcpu,
  573. struct kvm_exit_mmio *mmio,
  574. phys_addr_t offset)
  575. {
  576. /* since we don't support LPIs, this register is zero for now */
  577. vgic_reg_access(mmio, NULL, offset,
  578. ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
  579. return false;
  580. }
  581. static bool handle_mmio_typer_redist(struct kvm_vcpu *vcpu,
  582. struct kvm_exit_mmio *mmio,
  583. phys_addr_t offset)
  584. {
  585. u32 reg;
  586. u64 mpidr;
  587. struct kvm_vcpu *redist_vcpu = mmio->private;
  588. int target_vcpu_id = redist_vcpu->vcpu_id;
  589. /* the upper 32 bits contain the affinity value */
  590. if ((offset & ~3) == 4) {
  591. mpidr = kvm_vcpu_get_mpidr_aff(redist_vcpu);
  592. reg = compress_mpidr(mpidr);
  593. vgic_reg_access(mmio, &reg, offset,
  594. ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
  595. return false;
  596. }
  597. reg = redist_vcpu->vcpu_id << 8;
  598. if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
  599. reg |= GICR_TYPER_LAST;
  600. vgic_reg_access(mmio, &reg, offset,
  601. ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
  602. return false;
  603. }
  604. static const struct kvm_mmio_range vgic_redist_ranges[] = {
  605. {
  606. .base = GICR_CTLR,
  607. .len = 0x04,
  608. .bits_per_irq = 0,
  609. .handle_mmio = handle_mmio_ctlr_redist,
  610. },
  611. {
  612. .base = GICR_TYPER,
  613. .len = 0x08,
  614. .bits_per_irq = 0,
  615. .handle_mmio = handle_mmio_typer_redist,
  616. },
  617. {
  618. .base = GICR_IIDR,
  619. .len = 0x04,
  620. .bits_per_irq = 0,
  621. .handle_mmio = handle_mmio_iidr,
  622. },
  623. {
  624. .base = GICR_WAKER,
  625. .len = 0x04,
  626. .bits_per_irq = 0,
  627. .handle_mmio = handle_mmio_raz_wi,
  628. },
  629. {
  630. .base = GICR_IDREGS,
  631. .len = 0x30,
  632. .bits_per_irq = 0,
  633. .handle_mmio = handle_mmio_idregs,
  634. },
  635. {},
  636. };
  637. /*
  638. * This function splits accesses between the distributor and the two
  639. * redistributor parts (private/SPI). As each redistributor is accessible
  640. * from any CPU, we have to determine the affected VCPU by taking the faulting
  641. * address into account. We then pass this VCPU to the handler function via
  642. * the private parameter.
  643. */
  644. #define SGI_BASE_OFFSET SZ_64K
  645. static bool vgic_v3_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
  646. struct kvm_exit_mmio *mmio)
  647. {
  648. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  649. unsigned long dbase = dist->vgic_dist_base;
  650. unsigned long rdbase = dist->vgic_redist_base;
  651. int nrcpus = atomic_read(&vcpu->kvm->online_vcpus);
  652. int vcpu_id;
  653. const struct kvm_mmio_range *mmio_range;
  654. if (is_in_range(mmio->phys_addr, mmio->len, dbase, GIC_V3_DIST_SIZE)) {
  655. return vgic_handle_mmio_range(vcpu, run, mmio,
  656. vgic_v3_dist_ranges, dbase);
  657. }
  658. if (!is_in_range(mmio->phys_addr, mmio->len, rdbase,
  659. GIC_V3_REDIST_SIZE * nrcpus))
  660. return false;
  661. vcpu_id = (mmio->phys_addr - rdbase) / GIC_V3_REDIST_SIZE;
  662. rdbase += (vcpu_id * GIC_V3_REDIST_SIZE);
  663. mmio->private = kvm_get_vcpu(vcpu->kvm, vcpu_id);
  664. if (mmio->phys_addr >= rdbase + SGI_BASE_OFFSET) {
  665. rdbase += SGI_BASE_OFFSET;
  666. mmio_range = vgic_redist_sgi_ranges;
  667. } else {
  668. mmio_range = vgic_redist_ranges;
  669. }
  670. return vgic_handle_mmio_range(vcpu, run, mmio, mmio_range, rdbase);
  671. }
  672. static bool vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, int irq)
  673. {
  674. if (vgic_queue_irq(vcpu, 0, irq)) {
  675. vgic_dist_irq_clear_pending(vcpu, irq);
  676. vgic_cpu_irq_clear(vcpu, irq);
  677. return true;
  678. }
  679. return false;
  680. }
  681. static int vgic_v3_map_resources(struct kvm *kvm,
  682. const struct vgic_params *params)
  683. {
  684. int ret = 0;
  685. struct vgic_dist *dist = &kvm->arch.vgic;
  686. if (!irqchip_in_kernel(kvm))
  687. return 0;
  688. mutex_lock(&kvm->lock);
  689. if (vgic_ready(kvm))
  690. goto out;
  691. if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
  692. IS_VGIC_ADDR_UNDEF(dist->vgic_redist_base)) {
  693. kvm_err("Need to set vgic distributor addresses first\n");
  694. ret = -ENXIO;
  695. goto out;
  696. }
  697. /*
  698. * For a VGICv3 we require the userland to explicitly initialize
  699. * the VGIC before we need to use it.
  700. */
  701. if (!vgic_initialized(kvm)) {
  702. ret = -EBUSY;
  703. goto out;
  704. }
  705. kvm->arch.vgic.ready = true;
  706. out:
  707. if (ret)
  708. kvm_vgic_destroy(kvm);
  709. mutex_unlock(&kvm->lock);
  710. return ret;
  711. }
  712. static int vgic_v3_init_model(struct kvm *kvm)
  713. {
  714. int i;
  715. u32 mpidr;
  716. struct vgic_dist *dist = &kvm->arch.vgic;
  717. int nr_spis = dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
  718. dist->irq_spi_mpidr = kcalloc(nr_spis, sizeof(dist->irq_spi_mpidr[0]),
  719. GFP_KERNEL);
  720. if (!dist->irq_spi_mpidr)
  721. return -ENOMEM;
  722. /* Initialize the target VCPUs for each IRQ to VCPU 0 */
  723. mpidr = compress_mpidr(kvm_vcpu_get_mpidr_aff(kvm_get_vcpu(kvm, 0)));
  724. for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i++) {
  725. dist->irq_spi_cpu[i - VGIC_NR_PRIVATE_IRQS] = 0;
  726. dist->irq_spi_mpidr[i - VGIC_NR_PRIVATE_IRQS] = mpidr;
  727. vgic_bitmap_set_irq_val(dist->irq_spi_target, 0, i, 1);
  728. }
  729. return 0;
  730. }
  731. /* GICv3 does not keep track of SGI sources anymore. */
  732. static void vgic_v3_add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
  733. {
  734. }
  735. void vgic_v3_init_emulation(struct kvm *kvm)
  736. {
  737. struct vgic_dist *dist = &kvm->arch.vgic;
  738. dist->vm_ops.handle_mmio = vgic_v3_handle_mmio;
  739. dist->vm_ops.queue_sgi = vgic_v3_queue_sgi;
  740. dist->vm_ops.add_sgi_source = vgic_v3_add_sgi_source;
  741. dist->vm_ops.init_model = vgic_v3_init_model;
  742. dist->vm_ops.map_resources = vgic_v3_map_resources;
  743. kvm->arch.max_vcpus = KVM_MAX_VCPUS;
  744. }
  745. /*
  746. * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI
  747. * generation register ICC_SGI1R_EL1) with a given VCPU.
  748. * If the VCPU's MPIDR matches, return the level0 affinity, otherwise
  749. * return -1.
  750. */
  751. static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
  752. {
  753. unsigned long affinity;
  754. int level0;
  755. /*
  756. * Split the current VCPU's MPIDR into affinity level 0 and the
  757. * rest as this is what we have to compare against.
  758. */
  759. affinity = kvm_vcpu_get_mpidr_aff(vcpu);
  760. level0 = MPIDR_AFFINITY_LEVEL(affinity, 0);
  761. affinity &= ~MPIDR_LEVEL_MASK;
  762. /* bail out if the upper three levels don't match */
  763. if (sgi_aff != affinity)
  764. return -1;
  765. /* Is this VCPU's bit set in the mask ? */
  766. if (!(sgi_cpu_mask & BIT(level0)))
  767. return -1;
  768. return level0;
  769. }
  770. #define SGI_AFFINITY_LEVEL(reg, level) \
  771. ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
  772. >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
  773. /**
  774. * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
  775. * @vcpu: The VCPU requesting a SGI
  776. * @reg: The value written into the ICC_SGI1R_EL1 register by that VCPU
  777. *
  778. * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register.
  779. * This will trap in sys_regs.c and call this function.
  780. * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the
  781. * target processors as well as a bitmask of 16 Aff0 CPUs.
  782. * If the interrupt routing mode bit is not set, we iterate over all VCPUs to
  783. * check for matching ones. If this bit is set, we signal all, but not the
  784. * calling VCPU.
  785. */
  786. void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
  787. {
  788. struct kvm *kvm = vcpu->kvm;
  789. struct kvm_vcpu *c_vcpu;
  790. struct vgic_dist *dist = &kvm->arch.vgic;
  791. u16 target_cpus;
  792. u64 mpidr;
  793. int sgi, c;
  794. int vcpu_id = vcpu->vcpu_id;
  795. bool broadcast;
  796. int updated = 0;
  797. sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
  798. broadcast = reg & BIT(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
  799. target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT;
  800. mpidr = SGI_AFFINITY_LEVEL(reg, 3);
  801. mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
  802. mpidr |= SGI_AFFINITY_LEVEL(reg, 1);
  803. /*
  804. * We take the dist lock here, because we come from the sysregs
  805. * code path and not from the MMIO one (which already takes the lock).
  806. */
  807. spin_lock(&dist->lock);
  808. /*
  809. * We iterate over all VCPUs to find the MPIDRs matching the request.
  810. * If we have handled one CPU, we clear it's bit to detect early
  811. * if we are already finished. This avoids iterating through all
  812. * VCPUs when most of the times we just signal a single VCPU.
  813. */
  814. kvm_for_each_vcpu(c, c_vcpu, kvm) {
  815. /* Exit early if we have dealt with all requested CPUs */
  816. if (!broadcast && target_cpus == 0)
  817. break;
  818. /* Don't signal the calling VCPU */
  819. if (broadcast && c == vcpu_id)
  820. continue;
  821. if (!broadcast) {
  822. int level0;
  823. level0 = match_mpidr(mpidr, target_cpus, c_vcpu);
  824. if (level0 == -1)
  825. continue;
  826. /* remove this matching VCPU from the mask */
  827. target_cpus &= ~BIT(level0);
  828. }
  829. /* Flag the SGI as pending */
  830. vgic_dist_irq_set_pending(c_vcpu, sgi);
  831. updated = 1;
  832. kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
  833. }
  834. if (updated)
  835. vgic_update_state(vcpu->kvm);
  836. spin_unlock(&dist->lock);
  837. if (updated)
  838. vgic_kick_vcpus(vcpu->kvm);
  839. }
  840. static int vgic_v3_create(struct kvm_device *dev, u32 type)
  841. {
  842. return kvm_vgic_create(dev->kvm, type);
  843. }
  844. static void vgic_v3_destroy(struct kvm_device *dev)
  845. {
  846. kfree(dev);
  847. }
  848. static int vgic_v3_set_attr(struct kvm_device *dev,
  849. struct kvm_device_attr *attr)
  850. {
  851. int ret;
  852. ret = vgic_set_common_attr(dev, attr);
  853. if (ret != -ENXIO)
  854. return ret;
  855. switch (attr->group) {
  856. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  857. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
  858. return -ENXIO;
  859. }
  860. return -ENXIO;
  861. }
  862. static int vgic_v3_get_attr(struct kvm_device *dev,
  863. struct kvm_device_attr *attr)
  864. {
  865. int ret;
  866. ret = vgic_get_common_attr(dev, attr);
  867. if (ret != -ENXIO)
  868. return ret;
  869. switch (attr->group) {
  870. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  871. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
  872. return -ENXIO;
  873. }
  874. return -ENXIO;
  875. }
  876. static int vgic_v3_has_attr(struct kvm_device *dev,
  877. struct kvm_device_attr *attr)
  878. {
  879. switch (attr->group) {
  880. case KVM_DEV_ARM_VGIC_GRP_ADDR:
  881. switch (attr->attr) {
  882. case KVM_VGIC_V2_ADDR_TYPE_DIST:
  883. case KVM_VGIC_V2_ADDR_TYPE_CPU:
  884. return -ENXIO;
  885. case KVM_VGIC_V3_ADDR_TYPE_DIST:
  886. case KVM_VGIC_V3_ADDR_TYPE_REDIST:
  887. return 0;
  888. }
  889. break;
  890. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  891. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
  892. return -ENXIO;
  893. case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
  894. return 0;
  895. case KVM_DEV_ARM_VGIC_GRP_CTRL:
  896. switch (attr->attr) {
  897. case KVM_DEV_ARM_VGIC_CTRL_INIT:
  898. return 0;
  899. }
  900. }
  901. return -ENXIO;
  902. }
  903. struct kvm_device_ops kvm_arm_vgic_v3_ops = {
  904. .name = "kvm-arm-vgic-v3",
  905. .create = vgic_v3_create,
  906. .destroy = vgic_v3_destroy,
  907. .set_attr = vgic_v3_set_attr,
  908. .get_attr = vgic_v3_get_attr,
  909. .has_attr = vgic_v3_has_attr,
  910. };