vgic-v3-emul.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030
  1. /*
  2. * GICv3 distributor and redistributor emulation
  3. *
  4. * GICv3 emulation is currently only supported on a GICv3 host (because
  5. * we rely on the hardware's CPU interface virtualization support), but
  6. * supports both hardware with or without the optional GICv2 backwards
  7. * compatibility features.
  8. *
  9. * Limitations of the emulation:
  10. * (RAZ/WI: read as zero, write ignore, RAO/WI: read as one, write ignore)
  11. * - We do not support LPIs (yet). TYPER.LPIS is reported as 0 and is RAZ/WI.
  12. * - We do not support the message based interrupts (MBIs) triggered by
  13. * writes to the GICD_{SET,CLR}SPI_* registers. TYPER.MBIS is reported as 0.
  14. * - We do not support the (optional) backwards compatibility feature.
  15. * GICD_CTLR.ARE resets to 1 and is RAO/WI. If the _host_ GIC supports
  16. * the compatiblity feature, you can use a GICv2 in the guest, though.
  17. * - We only support a single security state. GICD_CTLR.DS is 1 and is RAO/WI.
  18. * - Priorities are not emulated (same as the GICv2 emulation). Linux
  19. * as a guest is fine with this, because it does not use priorities.
  20. * - We only support Group1 interrupts. Again Linux uses only those.
  21. *
  22. * Copyright (C) 2014 ARM Ltd.
  23. * Author: Andre Przywara <andre.przywara@arm.com>
  24. *
  25. * This program is free software; you can redistribute it and/or modify
  26. * it under the terms of the GNU General Public License version 2 as
  27. * published by the Free Software Foundation.
  28. *
  29. * This program is distributed in the hope that it will be useful,
  30. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  31. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  32. * GNU General Public License for more details.
  33. *
  34. * You should have received a copy of the GNU General Public License
  35. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  36. */
  37. #include <linux/cpu.h>
  38. #include <linux/kvm.h>
  39. #include <linux/kvm_host.h>
  40. #include <linux/interrupt.h>
  41. #include <linux/irqchip/arm-gic-v3.h>
  42. #include <kvm/arm_vgic.h>
  43. #include <asm/kvm_emulate.h>
  44. #include <asm/kvm_arm.h>
  45. #include <asm/kvm_mmu.h>
  46. #include "vgic.h"
  47. static bool handle_mmio_rao_wi(struct kvm_vcpu *vcpu,
  48. struct kvm_exit_mmio *mmio, phys_addr_t offset)
  49. {
  50. u32 reg = 0xffffffff;
  51. vgic_reg_access(mmio, &reg, offset,
  52. ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
  53. return false;
  54. }
  55. static bool handle_mmio_ctlr(struct kvm_vcpu *vcpu,
  56. struct kvm_exit_mmio *mmio, phys_addr_t offset)
  57. {
  58. u32 reg = 0;
  59. /*
  60. * Force ARE and DS to 1, the guest cannot change this.
  61. * For the time being we only support Group1 interrupts.
  62. */
  63. if (vcpu->kvm->arch.vgic.enabled)
  64. reg = GICD_CTLR_ENABLE_SS_G1;
  65. reg |= GICD_CTLR_ARE_NS | GICD_CTLR_DS;
  66. vgic_reg_access(mmio, &reg, offset,
  67. ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
  68. if (mmio->is_write) {
  69. if (reg & GICD_CTLR_ENABLE_SS_G0)
  70. kvm_info("guest tried to enable unsupported Group0 interrupts\n");
  71. vcpu->kvm->arch.vgic.enabled = !!(reg & GICD_CTLR_ENABLE_SS_G1);
  72. vgic_update_state(vcpu->kvm);
  73. return true;
  74. }
  75. return false;
  76. }
  77. /*
  78. * As this implementation does not provide compatibility
  79. * with GICv2 (ARE==1), we report zero CPUs in bits [5..7].
  80. * Also LPIs and MBIs are not supported, so we set the respective bits to 0.
  81. * Also we report at most 2**10=1024 interrupt IDs (to match 1024 SPIs).
  82. */
  83. #define INTERRUPT_ID_BITS 10
  84. static bool handle_mmio_typer(struct kvm_vcpu *vcpu,
  85. struct kvm_exit_mmio *mmio, phys_addr_t offset)
  86. {
  87. u32 reg;
  88. reg = (min(vcpu->kvm->arch.vgic.nr_irqs, 1024) >> 5) - 1;
  89. reg |= (INTERRUPT_ID_BITS - 1) << 19;
  90. vgic_reg_access(mmio, &reg, offset,
  91. ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
  92. return false;
  93. }
  94. static bool handle_mmio_iidr(struct kvm_vcpu *vcpu,
  95. struct kvm_exit_mmio *mmio, phys_addr_t offset)
  96. {
  97. u32 reg;
  98. reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
  99. vgic_reg_access(mmio, &reg, offset,
  100. ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
  101. return false;
  102. }
  103. static bool handle_mmio_set_enable_reg_dist(struct kvm_vcpu *vcpu,
  104. struct kvm_exit_mmio *mmio,
  105. phys_addr_t offset)
  106. {
  107. if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
  108. return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
  109. vcpu->vcpu_id,
  110. ACCESS_WRITE_SETBIT);
  111. vgic_reg_access(mmio, NULL, offset,
  112. ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
  113. return false;
  114. }
  115. static bool handle_mmio_clear_enable_reg_dist(struct kvm_vcpu *vcpu,
  116. struct kvm_exit_mmio *mmio,
  117. phys_addr_t offset)
  118. {
  119. if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
  120. return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
  121. vcpu->vcpu_id,
  122. ACCESS_WRITE_CLEARBIT);
  123. vgic_reg_access(mmio, NULL, offset,
  124. ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
  125. return false;
  126. }
  127. static bool handle_mmio_set_pending_reg_dist(struct kvm_vcpu *vcpu,
  128. struct kvm_exit_mmio *mmio,
  129. phys_addr_t offset)
  130. {
  131. if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
  132. return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
  133. vcpu->vcpu_id);
  134. vgic_reg_access(mmio, NULL, offset,
  135. ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
  136. return false;
  137. }
  138. static bool handle_mmio_clear_pending_reg_dist(struct kvm_vcpu *vcpu,
  139. struct kvm_exit_mmio *mmio,
  140. phys_addr_t offset)
  141. {
  142. if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
  143. return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
  144. vcpu->vcpu_id);
  145. vgic_reg_access(mmio, NULL, offset,
  146. ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
  147. return false;
  148. }
  149. static bool handle_mmio_priority_reg_dist(struct kvm_vcpu *vcpu,
  150. struct kvm_exit_mmio *mmio,
  151. phys_addr_t offset)
  152. {
  153. u32 *reg;
  154. if (unlikely(offset < VGIC_NR_PRIVATE_IRQS)) {
  155. vgic_reg_access(mmio, NULL, offset,
  156. ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
  157. return false;
  158. }
  159. reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
  160. vcpu->vcpu_id, offset);
  161. vgic_reg_access(mmio, reg, offset,
  162. ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
  163. return false;
  164. }
  165. static bool handle_mmio_cfg_reg_dist(struct kvm_vcpu *vcpu,
  166. struct kvm_exit_mmio *mmio,
  167. phys_addr_t offset)
  168. {
  169. u32 *reg;
  170. if (unlikely(offset < VGIC_NR_PRIVATE_IRQS / 4)) {
  171. vgic_reg_access(mmio, NULL, offset,
  172. ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
  173. return false;
  174. }
  175. reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
  176. vcpu->vcpu_id, offset >> 1);
  177. return vgic_handle_cfg_reg(reg, mmio, offset);
  178. }
  179. /*
  180. * We use a compressed version of the MPIDR (all 32 bits in one 32-bit word)
  181. * when we store the target MPIDR written by the guest.
  182. */
  183. static u32 compress_mpidr(unsigned long mpidr)
  184. {
  185. u32 ret;
  186. ret = MPIDR_AFFINITY_LEVEL(mpidr, 0);
  187. ret |= MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8;
  188. ret |= MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16;
  189. ret |= MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24;
  190. return ret;
  191. }
  192. static unsigned long uncompress_mpidr(u32 value)
  193. {
  194. unsigned long mpidr;
  195. mpidr = ((value >> 0) & 0xFF) << MPIDR_LEVEL_SHIFT(0);
  196. mpidr |= ((value >> 8) & 0xFF) << MPIDR_LEVEL_SHIFT(1);
  197. mpidr |= ((value >> 16) & 0xFF) << MPIDR_LEVEL_SHIFT(2);
  198. mpidr |= (u64)((value >> 24) & 0xFF) << MPIDR_LEVEL_SHIFT(3);
  199. return mpidr;
  200. }
  201. /*
  202. * Lookup the given MPIDR value to get the vcpu_id (if there is one)
  203. * and store that in the irq_spi_cpu[] array.
  204. * This limits the number of VCPUs to 255 for now, extending the data
  205. * type (or storing kvm_vcpu pointers) should lift the limit.
  206. * Store the original MPIDR value in an extra array to support read-as-written.
  207. * Unallocated MPIDRs are translated to a special value and caught
  208. * before any array accesses.
  209. */
  210. static bool handle_mmio_route_reg(struct kvm_vcpu *vcpu,
  211. struct kvm_exit_mmio *mmio,
  212. phys_addr_t offset)
  213. {
  214. struct kvm *kvm = vcpu->kvm;
  215. struct vgic_dist *dist = &kvm->arch.vgic;
  216. int spi;
  217. u32 reg;
  218. int vcpu_id;
  219. unsigned long *bmap, mpidr;
  220. /*
  221. * The upper 32 bits of each 64 bit register are zero,
  222. * as we don't support Aff3.
  223. */
  224. if ((offset & 4)) {
  225. vgic_reg_access(mmio, NULL, offset,
  226. ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
  227. return false;
  228. }
  229. /* This region only covers SPIs, so no handling of private IRQs here. */
  230. spi = offset / 8;
  231. /* get the stored MPIDR for this IRQ */
  232. mpidr = uncompress_mpidr(dist->irq_spi_mpidr[spi]);
  233. reg = mpidr;
  234. vgic_reg_access(mmio, &reg, offset,
  235. ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
  236. if (!mmio->is_write)
  237. return false;
  238. /*
  239. * Now clear the currently assigned vCPU from the map, making room
  240. * for the new one to be written below
  241. */
  242. vcpu = kvm_mpidr_to_vcpu(kvm, mpidr);
  243. if (likely(vcpu)) {
  244. vcpu_id = vcpu->vcpu_id;
  245. bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]);
  246. __clear_bit(spi, bmap);
  247. }
  248. dist->irq_spi_mpidr[spi] = compress_mpidr(reg);
  249. vcpu = kvm_mpidr_to_vcpu(kvm, reg & MPIDR_HWID_BITMASK);
  250. /*
  251. * The spec says that non-existent MPIDR values should not be
  252. * forwarded to any existent (v)CPU, but should be able to become
  253. * pending anyway. We simply keep the irq_spi_target[] array empty, so
  254. * the interrupt will never be injected.
  255. * irq_spi_cpu[irq] gets a magic value in this case.
  256. */
  257. if (likely(vcpu)) {
  258. vcpu_id = vcpu->vcpu_id;
  259. dist->irq_spi_cpu[spi] = vcpu_id;
  260. bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]);
  261. __set_bit(spi, bmap);
  262. } else {
  263. dist->irq_spi_cpu[spi] = VCPU_NOT_ALLOCATED;
  264. }
  265. vgic_update_state(kvm);
  266. return true;
  267. }
  268. /*
  269. * We should be careful about promising too much when a guest reads
  270. * this register. Don't claim to be like any hardware implementation,
  271. * but just report the GIC as version 3 - which is what a Linux guest
  272. * would check.
  273. */
  274. static bool handle_mmio_idregs(struct kvm_vcpu *vcpu,
  275. struct kvm_exit_mmio *mmio,
  276. phys_addr_t offset)
  277. {
  278. u32 reg = 0;
  279. switch (offset + GICD_IDREGS) {
  280. case GICD_PIDR2:
  281. reg = 0x3b;
  282. break;
  283. }
  284. vgic_reg_access(mmio, &reg, offset,
  285. ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
  286. return false;
  287. }
  288. static const struct vgic_io_range vgic_v3_dist_ranges[] = {
  289. {
  290. .base = GICD_CTLR,
  291. .len = 0x04,
  292. .bits_per_irq = 0,
  293. .handle_mmio = handle_mmio_ctlr,
  294. },
  295. {
  296. .base = GICD_TYPER,
  297. .len = 0x04,
  298. .bits_per_irq = 0,
  299. .handle_mmio = handle_mmio_typer,
  300. },
  301. {
  302. .base = GICD_IIDR,
  303. .len = 0x04,
  304. .bits_per_irq = 0,
  305. .handle_mmio = handle_mmio_iidr,
  306. },
  307. {
  308. /* this register is optional, it is RAZ/WI if not implemented */
  309. .base = GICD_STATUSR,
  310. .len = 0x04,
  311. .bits_per_irq = 0,
  312. .handle_mmio = handle_mmio_raz_wi,
  313. },
  314. {
  315. /* this write only register is WI when TYPER.MBIS=0 */
  316. .base = GICD_SETSPI_NSR,
  317. .len = 0x04,
  318. .bits_per_irq = 0,
  319. .handle_mmio = handle_mmio_raz_wi,
  320. },
  321. {
  322. /* this write only register is WI when TYPER.MBIS=0 */
  323. .base = GICD_CLRSPI_NSR,
  324. .len = 0x04,
  325. .bits_per_irq = 0,
  326. .handle_mmio = handle_mmio_raz_wi,
  327. },
  328. {
  329. /* this is RAZ/WI when DS=1 */
  330. .base = GICD_SETSPI_SR,
  331. .len = 0x04,
  332. .bits_per_irq = 0,
  333. .handle_mmio = handle_mmio_raz_wi,
  334. },
  335. {
  336. /* this is RAZ/WI when DS=1 */
  337. .base = GICD_CLRSPI_SR,
  338. .len = 0x04,
  339. .bits_per_irq = 0,
  340. .handle_mmio = handle_mmio_raz_wi,
  341. },
  342. {
  343. .base = GICD_IGROUPR,
  344. .len = 0x80,
  345. .bits_per_irq = 1,
  346. .handle_mmio = handle_mmio_rao_wi,
  347. },
  348. {
  349. .base = GICD_ISENABLER,
  350. .len = 0x80,
  351. .bits_per_irq = 1,
  352. .handle_mmio = handle_mmio_set_enable_reg_dist,
  353. },
  354. {
  355. .base = GICD_ICENABLER,
  356. .len = 0x80,
  357. .bits_per_irq = 1,
  358. .handle_mmio = handle_mmio_clear_enable_reg_dist,
  359. },
  360. {
  361. .base = GICD_ISPENDR,
  362. .len = 0x80,
  363. .bits_per_irq = 1,
  364. .handle_mmio = handle_mmio_set_pending_reg_dist,
  365. },
  366. {
  367. .base = GICD_ICPENDR,
  368. .len = 0x80,
  369. .bits_per_irq = 1,
  370. .handle_mmio = handle_mmio_clear_pending_reg_dist,
  371. },
  372. {
  373. .base = GICD_ISACTIVER,
  374. .len = 0x80,
  375. .bits_per_irq = 1,
  376. .handle_mmio = handle_mmio_raz_wi,
  377. },
  378. {
  379. .base = GICD_ICACTIVER,
  380. .len = 0x80,
  381. .bits_per_irq = 1,
  382. .handle_mmio = handle_mmio_raz_wi,
  383. },
  384. {
  385. .base = GICD_IPRIORITYR,
  386. .len = 0x400,
  387. .bits_per_irq = 8,
  388. .handle_mmio = handle_mmio_priority_reg_dist,
  389. },
  390. {
  391. /* TARGETSRn is RES0 when ARE=1 */
  392. .base = GICD_ITARGETSR,
  393. .len = 0x400,
  394. .bits_per_irq = 8,
  395. .handle_mmio = handle_mmio_raz_wi,
  396. },
  397. {
  398. .base = GICD_ICFGR,
  399. .len = 0x100,
  400. .bits_per_irq = 2,
  401. .handle_mmio = handle_mmio_cfg_reg_dist,
  402. },
  403. {
  404. /* this is RAZ/WI when DS=1 */
  405. .base = GICD_IGRPMODR,
  406. .len = 0x80,
  407. .bits_per_irq = 1,
  408. .handle_mmio = handle_mmio_raz_wi,
  409. },
  410. {
  411. /* this is RAZ/WI when DS=1 */
  412. .base = GICD_NSACR,
  413. .len = 0x100,
  414. .bits_per_irq = 2,
  415. .handle_mmio = handle_mmio_raz_wi,
  416. },
  417. {
  418. /* this is RAZ/WI when ARE=1 */
  419. .base = GICD_SGIR,
  420. .len = 0x04,
  421. .handle_mmio = handle_mmio_raz_wi,
  422. },
  423. {
  424. /* this is RAZ/WI when ARE=1 */
  425. .base = GICD_CPENDSGIR,
  426. .len = 0x10,
  427. .handle_mmio = handle_mmio_raz_wi,
  428. },
  429. {
  430. /* this is RAZ/WI when ARE=1 */
  431. .base = GICD_SPENDSGIR,
  432. .len = 0x10,
  433. .handle_mmio = handle_mmio_raz_wi,
  434. },
  435. {
  436. .base = GICD_IROUTER + 0x100,
  437. .len = 0x1ee0,
  438. .bits_per_irq = 64,
  439. .handle_mmio = handle_mmio_route_reg,
  440. },
  441. {
  442. .base = GICD_IDREGS,
  443. .len = 0x30,
  444. .bits_per_irq = 0,
  445. .handle_mmio = handle_mmio_idregs,
  446. },
  447. {},
  448. };
  449. static bool handle_mmio_ctlr_redist(struct kvm_vcpu *vcpu,
  450. struct kvm_exit_mmio *mmio,
  451. phys_addr_t offset)
  452. {
  453. /* since we don't support LPIs, this register is zero for now */
  454. vgic_reg_access(mmio, NULL, offset,
  455. ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
  456. return false;
  457. }
  458. static bool handle_mmio_typer_redist(struct kvm_vcpu *vcpu,
  459. struct kvm_exit_mmio *mmio,
  460. phys_addr_t offset)
  461. {
  462. u32 reg;
  463. u64 mpidr;
  464. struct kvm_vcpu *redist_vcpu = mmio->private;
  465. int target_vcpu_id = redist_vcpu->vcpu_id;
  466. /* the upper 32 bits contain the affinity value */
  467. if ((offset & ~3) == 4) {
  468. mpidr = kvm_vcpu_get_mpidr_aff(redist_vcpu);
  469. reg = compress_mpidr(mpidr);
  470. vgic_reg_access(mmio, &reg, offset,
  471. ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
  472. return false;
  473. }
  474. reg = redist_vcpu->vcpu_id << 8;
  475. if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
  476. reg |= GICR_TYPER_LAST;
  477. vgic_reg_access(mmio, &reg, offset,
  478. ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
  479. return false;
  480. }
  481. static bool handle_mmio_set_enable_reg_redist(struct kvm_vcpu *vcpu,
  482. struct kvm_exit_mmio *mmio,
  483. phys_addr_t offset)
  484. {
  485. struct kvm_vcpu *redist_vcpu = mmio->private;
  486. return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
  487. redist_vcpu->vcpu_id,
  488. ACCESS_WRITE_SETBIT);
  489. }
  490. static bool handle_mmio_clear_enable_reg_redist(struct kvm_vcpu *vcpu,
  491. struct kvm_exit_mmio *mmio,
  492. phys_addr_t offset)
  493. {
  494. struct kvm_vcpu *redist_vcpu = mmio->private;
  495. return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
  496. redist_vcpu->vcpu_id,
  497. ACCESS_WRITE_CLEARBIT);
  498. }
  499. static bool handle_mmio_set_pending_reg_redist(struct kvm_vcpu *vcpu,
  500. struct kvm_exit_mmio *mmio,
  501. phys_addr_t offset)
  502. {
  503. struct kvm_vcpu *redist_vcpu = mmio->private;
  504. return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
  505. redist_vcpu->vcpu_id);
  506. }
  507. static bool handle_mmio_clear_pending_reg_redist(struct kvm_vcpu *vcpu,
  508. struct kvm_exit_mmio *mmio,
  509. phys_addr_t offset)
  510. {
  511. struct kvm_vcpu *redist_vcpu = mmio->private;
  512. return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
  513. redist_vcpu->vcpu_id);
  514. }
  515. static bool handle_mmio_priority_reg_redist(struct kvm_vcpu *vcpu,
  516. struct kvm_exit_mmio *mmio,
  517. phys_addr_t offset)
  518. {
  519. struct kvm_vcpu *redist_vcpu = mmio->private;
  520. u32 *reg;
  521. reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
  522. redist_vcpu->vcpu_id, offset);
  523. vgic_reg_access(mmio, reg, offset,
  524. ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
  525. return false;
  526. }
  527. static bool handle_mmio_cfg_reg_redist(struct kvm_vcpu *vcpu,
  528. struct kvm_exit_mmio *mmio,
  529. phys_addr_t offset)
  530. {
  531. struct kvm_vcpu *redist_vcpu = mmio->private;
  532. u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
  533. redist_vcpu->vcpu_id, offset >> 1);
  534. return vgic_handle_cfg_reg(reg, mmio, offset);
  535. }
  536. #define SGI_base(x) ((x) + SZ_64K)
  537. static const struct vgic_io_range vgic_redist_ranges[] = {
  538. {
  539. .base = GICR_CTLR,
  540. .len = 0x04,
  541. .bits_per_irq = 0,
  542. .handle_mmio = handle_mmio_ctlr_redist,
  543. },
  544. {
  545. .base = GICR_TYPER,
  546. .len = 0x08,
  547. .bits_per_irq = 0,
  548. .handle_mmio = handle_mmio_typer_redist,
  549. },
  550. {
  551. .base = GICR_IIDR,
  552. .len = 0x04,
  553. .bits_per_irq = 0,
  554. .handle_mmio = handle_mmio_iidr,
  555. },
  556. {
  557. .base = GICR_WAKER,
  558. .len = 0x04,
  559. .bits_per_irq = 0,
  560. .handle_mmio = handle_mmio_raz_wi,
  561. },
  562. {
  563. .base = GICR_IDREGS,
  564. .len = 0x30,
  565. .bits_per_irq = 0,
  566. .handle_mmio = handle_mmio_idregs,
  567. },
  568. {
  569. .base = SGI_base(GICR_IGROUPR0),
  570. .len = 0x04,
  571. .bits_per_irq = 1,
  572. .handle_mmio = handle_mmio_rao_wi,
  573. },
  574. {
  575. .base = SGI_base(GICR_ISENABLER0),
  576. .len = 0x04,
  577. .bits_per_irq = 1,
  578. .handle_mmio = handle_mmio_set_enable_reg_redist,
  579. },
  580. {
  581. .base = SGI_base(GICR_ICENABLER0),
  582. .len = 0x04,
  583. .bits_per_irq = 1,
  584. .handle_mmio = handle_mmio_clear_enable_reg_redist,
  585. },
  586. {
  587. .base = SGI_base(GICR_ISPENDR0),
  588. .len = 0x04,
  589. .bits_per_irq = 1,
  590. .handle_mmio = handle_mmio_set_pending_reg_redist,
  591. },
  592. {
  593. .base = SGI_base(GICR_ICPENDR0),
  594. .len = 0x04,
  595. .bits_per_irq = 1,
  596. .handle_mmio = handle_mmio_clear_pending_reg_redist,
  597. },
  598. {
  599. .base = SGI_base(GICR_ISACTIVER0),
  600. .len = 0x04,
  601. .bits_per_irq = 1,
  602. .handle_mmio = handle_mmio_raz_wi,
  603. },
  604. {
  605. .base = SGI_base(GICR_ICACTIVER0),
  606. .len = 0x04,
  607. .bits_per_irq = 1,
  608. .handle_mmio = handle_mmio_raz_wi,
  609. },
  610. {
  611. .base = SGI_base(GICR_IPRIORITYR0),
  612. .len = 0x20,
  613. .bits_per_irq = 8,
  614. .handle_mmio = handle_mmio_priority_reg_redist,
  615. },
  616. {
  617. .base = SGI_base(GICR_ICFGR0),
  618. .len = 0x08,
  619. .bits_per_irq = 2,
  620. .handle_mmio = handle_mmio_cfg_reg_redist,
  621. },
  622. {
  623. .base = SGI_base(GICR_IGRPMODR0),
  624. .len = 0x04,
  625. .bits_per_irq = 1,
  626. .handle_mmio = handle_mmio_raz_wi,
  627. },
  628. {
  629. .base = SGI_base(GICR_NSACR),
  630. .len = 0x04,
  631. .handle_mmio = handle_mmio_raz_wi,
  632. },
  633. {},
  634. };
  635. static bool vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, int irq)
  636. {
  637. if (vgic_queue_irq(vcpu, 0, irq)) {
  638. vgic_dist_irq_clear_pending(vcpu, irq);
  639. vgic_cpu_irq_clear(vcpu, irq);
  640. return true;
  641. }
  642. return false;
  643. }
  644. static int vgic_v3_map_resources(struct kvm *kvm,
  645. const struct vgic_params *params)
  646. {
  647. int ret = 0;
  648. struct vgic_dist *dist = &kvm->arch.vgic;
  649. gpa_t rdbase = dist->vgic_redist_base;
  650. struct vgic_io_device *iodevs = NULL;
  651. int i;
  652. if (!irqchip_in_kernel(kvm))
  653. return 0;
  654. mutex_lock(&kvm->lock);
  655. if (vgic_ready(kvm))
  656. goto out;
  657. if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
  658. IS_VGIC_ADDR_UNDEF(dist->vgic_redist_base)) {
  659. kvm_err("Need to set vgic distributor addresses first\n");
  660. ret = -ENXIO;
  661. goto out;
  662. }
  663. /*
  664. * For a VGICv3 we require the userland to explicitly initialize
  665. * the VGIC before we need to use it.
  666. */
  667. if (!vgic_initialized(kvm)) {
  668. ret = -EBUSY;
  669. goto out;
  670. }
  671. ret = vgic_register_kvm_io_dev(kvm, dist->vgic_dist_base,
  672. GIC_V3_DIST_SIZE, vgic_v3_dist_ranges,
  673. -1, &dist->dist_iodev);
  674. if (ret)
  675. goto out;
  676. iodevs = kcalloc(dist->nr_cpus, sizeof(iodevs[0]), GFP_KERNEL);
  677. if (!iodevs) {
  678. ret = -ENOMEM;
  679. goto out_unregister;
  680. }
  681. for (i = 0; i < dist->nr_cpus; i++) {
  682. ret = vgic_register_kvm_io_dev(kvm, rdbase,
  683. SZ_128K, vgic_redist_ranges,
  684. i, &iodevs[i]);
  685. if (ret)
  686. goto out_unregister;
  687. rdbase += GIC_V3_REDIST_SIZE;
  688. }
  689. dist->redist_iodevs = iodevs;
  690. dist->ready = true;
  691. goto out;
  692. out_unregister:
  693. kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dist->dist_iodev.dev);
  694. if (iodevs) {
  695. for (i = 0; i < dist->nr_cpus; i++) {
  696. if (iodevs[i].dev.ops)
  697. kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS,
  698. &iodevs[i].dev);
  699. }
  700. }
  701. out:
  702. if (ret)
  703. kvm_vgic_destroy(kvm);
  704. mutex_unlock(&kvm->lock);
  705. return ret;
  706. }
  707. static int vgic_v3_init_model(struct kvm *kvm)
  708. {
  709. int i;
  710. u32 mpidr;
  711. struct vgic_dist *dist = &kvm->arch.vgic;
  712. int nr_spis = dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
  713. dist->irq_spi_mpidr = kcalloc(nr_spis, sizeof(dist->irq_spi_mpidr[0]),
  714. GFP_KERNEL);
  715. if (!dist->irq_spi_mpidr)
  716. return -ENOMEM;
  717. /* Initialize the target VCPUs for each IRQ to VCPU 0 */
  718. mpidr = compress_mpidr(kvm_vcpu_get_mpidr_aff(kvm_get_vcpu(kvm, 0)));
  719. for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i++) {
  720. dist->irq_spi_cpu[i - VGIC_NR_PRIVATE_IRQS] = 0;
  721. dist->irq_spi_mpidr[i - VGIC_NR_PRIVATE_IRQS] = mpidr;
  722. vgic_bitmap_set_irq_val(dist->irq_spi_target, 0, i, 1);
  723. }
  724. return 0;
  725. }
  726. /* GICv3 does not keep track of SGI sources anymore. */
  727. static void vgic_v3_add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
  728. {
  729. }
  730. void vgic_v3_init_emulation(struct kvm *kvm)
  731. {
  732. struct vgic_dist *dist = &kvm->arch.vgic;
  733. dist->vm_ops.queue_sgi = vgic_v3_queue_sgi;
  734. dist->vm_ops.add_sgi_source = vgic_v3_add_sgi_source;
  735. dist->vm_ops.init_model = vgic_v3_init_model;
  736. dist->vm_ops.map_resources = vgic_v3_map_resources;
  737. kvm->arch.max_vcpus = KVM_MAX_VCPUS;
  738. }
  739. /*
  740. * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI
  741. * generation register ICC_SGI1R_EL1) with a given VCPU.
  742. * If the VCPU's MPIDR matches, return the level0 affinity, otherwise
  743. * return -1.
  744. */
  745. static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
  746. {
  747. unsigned long affinity;
  748. int level0;
  749. /*
  750. * Split the current VCPU's MPIDR into affinity level 0 and the
  751. * rest as this is what we have to compare against.
  752. */
  753. affinity = kvm_vcpu_get_mpidr_aff(vcpu);
  754. level0 = MPIDR_AFFINITY_LEVEL(affinity, 0);
  755. affinity &= ~MPIDR_LEVEL_MASK;
  756. /* bail out if the upper three levels don't match */
  757. if (sgi_aff != affinity)
  758. return -1;
  759. /* Is this VCPU's bit set in the mask ? */
  760. if (!(sgi_cpu_mask & BIT(level0)))
  761. return -1;
  762. return level0;
  763. }
  764. #define SGI_AFFINITY_LEVEL(reg, level) \
  765. ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
  766. >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
  767. /**
  768. * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
  769. * @vcpu: The VCPU requesting a SGI
  770. * @reg: The value written into the ICC_SGI1R_EL1 register by that VCPU
  771. *
  772. * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register.
  773. * This will trap in sys_regs.c and call this function.
  774. * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the
  775. * target processors as well as a bitmask of 16 Aff0 CPUs.
  776. * If the interrupt routing mode bit is not set, we iterate over all VCPUs to
  777. * check for matching ones. If this bit is set, we signal all, but not the
  778. * calling VCPU.
  779. */
  780. void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
  781. {
  782. struct kvm *kvm = vcpu->kvm;
  783. struct kvm_vcpu *c_vcpu;
  784. struct vgic_dist *dist = &kvm->arch.vgic;
  785. u16 target_cpus;
  786. u64 mpidr;
  787. int sgi, c;
  788. int vcpu_id = vcpu->vcpu_id;
  789. bool broadcast;
  790. int updated = 0;
  791. sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
  792. broadcast = reg & BIT(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
  793. target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT;
  794. mpidr = SGI_AFFINITY_LEVEL(reg, 3);
  795. mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
  796. mpidr |= SGI_AFFINITY_LEVEL(reg, 1);
  797. /*
  798. * We take the dist lock here, because we come from the sysregs
  799. * code path and not from the MMIO one (which already takes the lock).
  800. */
  801. spin_lock(&dist->lock);
  802. /*
  803. * We iterate over all VCPUs to find the MPIDRs matching the request.
  804. * If we have handled one CPU, we clear it's bit to detect early
  805. * if we are already finished. This avoids iterating through all
  806. * VCPUs when most of the times we just signal a single VCPU.
  807. */
  808. kvm_for_each_vcpu(c, c_vcpu, kvm) {
  809. /* Exit early if we have dealt with all requested CPUs */
  810. if (!broadcast && target_cpus == 0)
  811. break;
  812. /* Don't signal the calling VCPU */
  813. if (broadcast && c == vcpu_id)
  814. continue;
  815. if (!broadcast) {
  816. int level0;
  817. level0 = match_mpidr(mpidr, target_cpus, c_vcpu);
  818. if (level0 == -1)
  819. continue;
  820. /* remove this matching VCPU from the mask */
  821. target_cpus &= ~BIT(level0);
  822. }
  823. /* Flag the SGI as pending */
  824. vgic_dist_irq_set_pending(c_vcpu, sgi);
  825. updated = 1;
  826. kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
  827. }
  828. if (updated)
  829. vgic_update_state(vcpu->kvm);
  830. spin_unlock(&dist->lock);
  831. if (updated)
  832. vgic_kick_vcpus(vcpu->kvm);
  833. }
  834. static int vgic_v3_create(struct kvm_device *dev, u32 type)
  835. {
  836. return kvm_vgic_create(dev->kvm, type);
  837. }
  838. static void vgic_v3_destroy(struct kvm_device *dev)
  839. {
  840. kfree(dev);
  841. }
  842. static int vgic_v3_set_attr(struct kvm_device *dev,
  843. struct kvm_device_attr *attr)
  844. {
  845. int ret;
  846. ret = vgic_set_common_attr(dev, attr);
  847. if (ret != -ENXIO)
  848. return ret;
  849. switch (attr->group) {
  850. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  851. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
  852. return -ENXIO;
  853. }
  854. return -ENXIO;
  855. }
  856. static int vgic_v3_get_attr(struct kvm_device *dev,
  857. struct kvm_device_attr *attr)
  858. {
  859. int ret;
  860. ret = vgic_get_common_attr(dev, attr);
  861. if (ret != -ENXIO)
  862. return ret;
  863. switch (attr->group) {
  864. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  865. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
  866. return -ENXIO;
  867. }
  868. return -ENXIO;
  869. }
  870. static int vgic_v3_has_attr(struct kvm_device *dev,
  871. struct kvm_device_attr *attr)
  872. {
  873. switch (attr->group) {
  874. case KVM_DEV_ARM_VGIC_GRP_ADDR:
  875. switch (attr->attr) {
  876. case KVM_VGIC_V2_ADDR_TYPE_DIST:
  877. case KVM_VGIC_V2_ADDR_TYPE_CPU:
  878. return -ENXIO;
  879. case KVM_VGIC_V3_ADDR_TYPE_DIST:
  880. case KVM_VGIC_V3_ADDR_TYPE_REDIST:
  881. return 0;
  882. }
  883. break;
  884. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  885. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
  886. return -ENXIO;
  887. case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
  888. return 0;
  889. case KVM_DEV_ARM_VGIC_GRP_CTRL:
  890. switch (attr->attr) {
  891. case KVM_DEV_ARM_VGIC_CTRL_INIT:
  892. return 0;
  893. }
  894. }
  895. return -ENXIO;
  896. }
  897. struct kvm_device_ops kvm_arm_vgic_v3_ops = {
  898. .name = "kvm-arm-vgic-v3",
  899. .create = vgic_v3_create,
  900. .destroy = vgic_v3_destroy,
  901. .set_attr = vgic_v3_set_attr,
  902. .get_attr = vgic_v3_get_attr,
  903. .has_attr = vgic_v3_has_attr,
  904. };