vgic-mmio-v2.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497
  1. /*
  2. * VGICv2 MMIO handling functions
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/irqchip/arm-gic.h>
  14. #include <linux/kvm.h>
  15. #include <linux/kvm_host.h>
  16. #include <kvm/iodev.h>
  17. #include <kvm/arm_vgic.h>
  18. #include "vgic.h"
  19. #include "vgic-mmio.h"
  20. static unsigned long vgic_mmio_read_v2_misc(struct kvm_vcpu *vcpu,
  21. gpa_t addr, unsigned int len)
  22. {
  23. u32 value;
  24. switch (addr & 0x0c) {
  25. case GIC_DIST_CTRL:
  26. value = vcpu->kvm->arch.vgic.enabled ? GICD_ENABLE : 0;
  27. break;
  28. case GIC_DIST_CTR:
  29. value = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
  30. value = (value >> 5) - 1;
  31. value |= (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
  32. break;
  33. case GIC_DIST_IIDR:
  34. value = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
  35. break;
  36. default:
  37. return 0;
  38. }
  39. return value;
  40. }
  41. static void vgic_mmio_write_v2_misc(struct kvm_vcpu *vcpu,
  42. gpa_t addr, unsigned int len,
  43. unsigned long val)
  44. {
  45. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  46. bool was_enabled = dist->enabled;
  47. switch (addr & 0x0c) {
  48. case GIC_DIST_CTRL:
  49. dist->enabled = val & GICD_ENABLE;
  50. if (!was_enabled && dist->enabled)
  51. vgic_kick_vcpus(vcpu->kvm);
  52. break;
  53. case GIC_DIST_CTR:
  54. case GIC_DIST_IIDR:
  55. /* Nothing to do */
  56. return;
  57. }
  58. }
  59. static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
  60. gpa_t addr, unsigned int len,
  61. unsigned long val)
  62. {
  63. int nr_vcpus = atomic_read(&source_vcpu->kvm->online_vcpus);
  64. int intid = val & 0xf;
  65. int targets = (val >> 16) & 0xff;
  66. int mode = (val >> 24) & 0x03;
  67. int c;
  68. struct kvm_vcpu *vcpu;
  69. unsigned long flags;
  70. switch (mode) {
  71. case 0x0: /* as specified by targets */
  72. break;
  73. case 0x1:
  74. targets = (1U << nr_vcpus) - 1; /* all, ... */
  75. targets &= ~(1U << source_vcpu->vcpu_id); /* but self */
  76. break;
  77. case 0x2: /* this very vCPU only */
  78. targets = (1U << source_vcpu->vcpu_id);
  79. break;
  80. case 0x3: /* reserved */
  81. return;
  82. }
  83. kvm_for_each_vcpu(c, vcpu, source_vcpu->kvm) {
  84. struct vgic_irq *irq;
  85. if (!(targets & (1U << c)))
  86. continue;
  87. irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
  88. spin_lock_irqsave(&irq->irq_lock, flags);
  89. irq->pending_latch = true;
  90. irq->source |= 1U << source_vcpu->vcpu_id;
  91. vgic_queue_irq_unlock(source_vcpu->kvm, irq, flags);
  92. vgic_put_irq(source_vcpu->kvm, irq);
  93. }
  94. }
  95. static unsigned long vgic_mmio_read_target(struct kvm_vcpu *vcpu,
  96. gpa_t addr, unsigned int len)
  97. {
  98. u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
  99. int i;
  100. u64 val = 0;
  101. for (i = 0; i < len; i++) {
  102. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  103. val |= (u64)irq->targets << (i * 8);
  104. vgic_put_irq(vcpu->kvm, irq);
  105. }
  106. return val;
  107. }
  108. static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
  109. gpa_t addr, unsigned int len,
  110. unsigned long val)
  111. {
  112. u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
  113. u8 cpu_mask = GENMASK(atomic_read(&vcpu->kvm->online_vcpus) - 1, 0);
  114. int i;
  115. unsigned long flags;
  116. /* GICD_ITARGETSR[0-7] are read-only */
  117. if (intid < VGIC_NR_PRIVATE_IRQS)
  118. return;
  119. for (i = 0; i < len; i++) {
  120. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
  121. int target;
  122. spin_lock_irqsave(&irq->irq_lock, flags);
  123. irq->targets = (val >> (i * 8)) & cpu_mask;
  124. target = irq->targets ? __ffs(irq->targets) : 0;
  125. irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
  126. spin_unlock_irqrestore(&irq->irq_lock, flags);
  127. vgic_put_irq(vcpu->kvm, irq);
  128. }
  129. }
  130. static unsigned long vgic_mmio_read_sgipend(struct kvm_vcpu *vcpu,
  131. gpa_t addr, unsigned int len)
  132. {
  133. u32 intid = addr & 0x0f;
  134. int i;
  135. u64 val = 0;
  136. for (i = 0; i < len; i++) {
  137. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  138. val |= (u64)irq->source << (i * 8);
  139. vgic_put_irq(vcpu->kvm, irq);
  140. }
  141. return val;
  142. }
  143. static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
  144. gpa_t addr, unsigned int len,
  145. unsigned long val)
  146. {
  147. u32 intid = addr & 0x0f;
  148. int i;
  149. unsigned long flags;
  150. for (i = 0; i < len; i++) {
  151. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  152. spin_lock_irqsave(&irq->irq_lock, flags);
  153. irq->source &= ~((val >> (i * 8)) & 0xff);
  154. if (!irq->source)
  155. irq->pending_latch = false;
  156. spin_unlock_irqrestore(&irq->irq_lock, flags);
  157. vgic_put_irq(vcpu->kvm, irq);
  158. }
  159. }
  160. static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
  161. gpa_t addr, unsigned int len,
  162. unsigned long val)
  163. {
  164. u32 intid = addr & 0x0f;
  165. int i;
  166. unsigned long flags;
  167. for (i = 0; i < len; i++) {
  168. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  169. spin_lock_irqsave(&irq->irq_lock, flags);
  170. irq->source |= (val >> (i * 8)) & 0xff;
  171. if (irq->source) {
  172. irq->pending_latch = true;
  173. vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
  174. } else {
  175. spin_unlock_irqrestore(&irq->irq_lock, flags);
  176. }
  177. vgic_put_irq(vcpu->kvm, irq);
  178. }
  179. }
  180. #define GICC_ARCH_VERSION_V2 0x2
  181. /* These are for userland accesses only, there is no guest-facing emulation. */
  182. static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
  183. gpa_t addr, unsigned int len)
  184. {
  185. struct vgic_vmcr vmcr;
  186. u32 val;
  187. vgic_get_vmcr(vcpu, &vmcr);
  188. switch (addr & 0xff) {
  189. case GIC_CPU_CTRL:
  190. val = vmcr.grpen0 << GIC_CPU_CTRL_EnableGrp0_SHIFT;
  191. val |= vmcr.grpen1 << GIC_CPU_CTRL_EnableGrp1_SHIFT;
  192. val |= vmcr.ackctl << GIC_CPU_CTRL_AckCtl_SHIFT;
  193. val |= vmcr.fiqen << GIC_CPU_CTRL_FIQEn_SHIFT;
  194. val |= vmcr.cbpr << GIC_CPU_CTRL_CBPR_SHIFT;
  195. val |= vmcr.eoim << GIC_CPU_CTRL_EOImodeNS_SHIFT;
  196. break;
  197. case GIC_CPU_PRIMASK:
  198. /*
  199. * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
  200. * the PMR field as GICH_VMCR.VMPriMask rather than
  201. * GICC_PMR.Priority, so we expose the upper five bits of
  202. * priority mask to userspace using the lower bits in the
  203. * unsigned long.
  204. */
  205. val = (vmcr.pmr & GICV_PMR_PRIORITY_MASK) >>
  206. GICV_PMR_PRIORITY_SHIFT;
  207. break;
  208. case GIC_CPU_BINPOINT:
  209. val = vmcr.bpr;
  210. break;
  211. case GIC_CPU_ALIAS_BINPOINT:
  212. val = vmcr.abpr;
  213. break;
  214. case GIC_CPU_IDENT:
  215. val = ((PRODUCT_ID_KVM << 20) |
  216. (GICC_ARCH_VERSION_V2 << 16) |
  217. IMPLEMENTER_ARM);
  218. break;
  219. default:
  220. return 0;
  221. }
  222. return val;
  223. }
  224. static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
  225. gpa_t addr, unsigned int len,
  226. unsigned long val)
  227. {
  228. struct vgic_vmcr vmcr;
  229. vgic_get_vmcr(vcpu, &vmcr);
  230. switch (addr & 0xff) {
  231. case GIC_CPU_CTRL:
  232. vmcr.grpen0 = !!(val & GIC_CPU_CTRL_EnableGrp0);
  233. vmcr.grpen1 = !!(val & GIC_CPU_CTRL_EnableGrp1);
  234. vmcr.ackctl = !!(val & GIC_CPU_CTRL_AckCtl);
  235. vmcr.fiqen = !!(val & GIC_CPU_CTRL_FIQEn);
  236. vmcr.cbpr = !!(val & GIC_CPU_CTRL_CBPR);
  237. vmcr.eoim = !!(val & GIC_CPU_CTRL_EOImodeNS);
  238. break;
  239. case GIC_CPU_PRIMASK:
  240. /*
  241. * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
  242. * the PMR field as GICH_VMCR.VMPriMask rather than
  243. * GICC_PMR.Priority, so we expose the upper five bits of
  244. * priority mask to userspace using the lower bits in the
  245. * unsigned long.
  246. */
  247. vmcr.pmr = (val << GICV_PMR_PRIORITY_SHIFT) &
  248. GICV_PMR_PRIORITY_MASK;
  249. break;
  250. case GIC_CPU_BINPOINT:
  251. vmcr.bpr = val;
  252. break;
  253. case GIC_CPU_ALIAS_BINPOINT:
  254. vmcr.abpr = val;
  255. break;
  256. }
  257. vgic_set_vmcr(vcpu, &vmcr);
  258. }
  259. static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu,
  260. gpa_t addr, unsigned int len)
  261. {
  262. int n; /* which APRn is this */
  263. n = (addr >> 2) & 0x3;
  264. if (kvm_vgic_global_state.type == VGIC_V2) {
  265. /* GICv2 hardware systems support max. 32 groups */
  266. if (n != 0)
  267. return 0;
  268. return vcpu->arch.vgic_cpu.vgic_v2.vgic_apr;
  269. } else {
  270. struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
  271. if (n > vgic_v3_max_apr_idx(vcpu))
  272. return 0;
  273. /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
  274. return vgicv3->vgic_ap1r[n];
  275. }
  276. }
  277. static void vgic_mmio_write_apr(struct kvm_vcpu *vcpu,
  278. gpa_t addr, unsigned int len,
  279. unsigned long val)
  280. {
  281. int n; /* which APRn is this */
  282. n = (addr >> 2) & 0x3;
  283. if (kvm_vgic_global_state.type == VGIC_V2) {
  284. /* GICv2 hardware systems support max. 32 groups */
  285. if (n != 0)
  286. return;
  287. vcpu->arch.vgic_cpu.vgic_v2.vgic_apr = val;
  288. } else {
  289. struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
  290. if (n > vgic_v3_max_apr_idx(vcpu))
  291. return;
  292. /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
  293. vgicv3->vgic_ap1r[n] = val;
  294. }
  295. }
  296. static const struct vgic_register_region vgic_v2_dist_registers[] = {
  297. REGISTER_DESC_WITH_LENGTH(GIC_DIST_CTRL,
  298. vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12,
  299. VGIC_ACCESS_32bit),
  300. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_IGROUP,
  301. vgic_mmio_read_rao, vgic_mmio_write_wi, NULL, NULL, 1,
  302. VGIC_ACCESS_32bit),
  303. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET,
  304. vgic_mmio_read_enable, vgic_mmio_write_senable, NULL, NULL, 1,
  305. VGIC_ACCESS_32bit),
  306. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR,
  307. vgic_mmio_read_enable, vgic_mmio_write_cenable, NULL, NULL, 1,
  308. VGIC_ACCESS_32bit),
  309. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
  310. vgic_mmio_read_pending, vgic_mmio_write_spending, NULL, NULL, 1,
  311. VGIC_ACCESS_32bit),
  312. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR,
  313. vgic_mmio_read_pending, vgic_mmio_write_cpending, NULL, NULL, 1,
  314. VGIC_ACCESS_32bit),
  315. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
  316. vgic_mmio_read_active, vgic_mmio_write_sactive,
  317. NULL, vgic_mmio_uaccess_write_sactive, 1,
  318. VGIC_ACCESS_32bit),
  319. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR,
  320. vgic_mmio_read_active, vgic_mmio_write_cactive,
  321. NULL, vgic_mmio_uaccess_write_cactive, 1,
  322. VGIC_ACCESS_32bit),
  323. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI,
  324. vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
  325. 8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
  326. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_TARGET,
  327. vgic_mmio_read_target, vgic_mmio_write_target, NULL, NULL, 8,
  328. VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
  329. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_CONFIG,
  330. vgic_mmio_read_config, vgic_mmio_write_config, NULL, NULL, 2,
  331. VGIC_ACCESS_32bit),
  332. REGISTER_DESC_WITH_LENGTH(GIC_DIST_SOFTINT,
  333. vgic_mmio_read_raz, vgic_mmio_write_sgir, 4,
  334. VGIC_ACCESS_32bit),
  335. REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_CLEAR,
  336. vgic_mmio_read_sgipend, vgic_mmio_write_sgipendc, 16,
  337. VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
  338. REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_SET,
  339. vgic_mmio_read_sgipend, vgic_mmio_write_sgipends, 16,
  340. VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
  341. };
  342. static const struct vgic_register_region vgic_v2_cpu_registers[] = {
  343. REGISTER_DESC_WITH_LENGTH(GIC_CPU_CTRL,
  344. vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
  345. VGIC_ACCESS_32bit),
  346. REGISTER_DESC_WITH_LENGTH(GIC_CPU_PRIMASK,
  347. vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
  348. VGIC_ACCESS_32bit),
  349. REGISTER_DESC_WITH_LENGTH(GIC_CPU_BINPOINT,
  350. vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
  351. VGIC_ACCESS_32bit),
  352. REGISTER_DESC_WITH_LENGTH(GIC_CPU_ALIAS_BINPOINT,
  353. vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
  354. VGIC_ACCESS_32bit),
  355. REGISTER_DESC_WITH_LENGTH(GIC_CPU_ACTIVEPRIO,
  356. vgic_mmio_read_apr, vgic_mmio_write_apr, 16,
  357. VGIC_ACCESS_32bit),
  358. REGISTER_DESC_WITH_LENGTH(GIC_CPU_IDENT,
  359. vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
  360. VGIC_ACCESS_32bit),
  361. };
  362. unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev)
  363. {
  364. dev->regions = vgic_v2_dist_registers;
  365. dev->nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
  366. kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops);
  367. return SZ_4K;
  368. }
  369. int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
  370. {
  371. const struct vgic_register_region *region;
  372. struct vgic_io_device iodev;
  373. struct vgic_reg_attr reg_attr;
  374. struct kvm_vcpu *vcpu;
  375. gpa_t addr;
  376. int ret;
  377. ret = vgic_v2_parse_attr(dev, attr, &reg_attr);
  378. if (ret)
  379. return ret;
  380. vcpu = reg_attr.vcpu;
  381. addr = reg_attr.addr;
  382. switch (attr->group) {
  383. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  384. iodev.regions = vgic_v2_dist_registers;
  385. iodev.nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
  386. iodev.base_addr = 0;
  387. break;
  388. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
  389. iodev.regions = vgic_v2_cpu_registers;
  390. iodev.nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers);
  391. iodev.base_addr = 0;
  392. break;
  393. default:
  394. return -ENXIO;
  395. }
  396. /* We only support aligned 32-bit accesses. */
  397. if (addr & 3)
  398. return -ENXIO;
  399. region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32));
  400. if (!region)
  401. return -ENXIO;
  402. return 0;
  403. }
  404. int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
  405. int offset, u32 *val)
  406. {
  407. struct vgic_io_device dev = {
  408. .regions = vgic_v2_cpu_registers,
  409. .nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers),
  410. .iodev_type = IODEV_CPUIF,
  411. };
  412. return vgic_uaccess(vcpu, &dev, is_write, offset, val);
  413. }
  414. int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
  415. int offset, u32 *val)
  416. {
  417. struct vgic_io_device dev = {
  418. .regions = vgic_v2_dist_registers,
  419. .nr_regions = ARRAY_SIZE(vgic_v2_dist_registers),
  420. .iodev_type = IODEV_DIST,
  421. };
  422. return vgic_uaccess(vcpu, &dev, is_write, offset, val);
  423. }