vgic-mmio-v2.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554
  1. /*
  2. * VGICv2 MMIO handling functions
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/irqchip/arm-gic.h>
  14. #include <linux/kvm.h>
  15. #include <linux/kvm_host.h>
  16. #include <linux/nospec.h>
  17. #include <kvm/iodev.h>
  18. #include <kvm/arm_vgic.h>
  19. #include "vgic.h"
  20. #include "vgic-mmio.h"
  21. /*
  22. * The Revision field in the IIDR have the following meanings:
  23. *
  24. * Revision 1: Report GICv2 interrupts as group 0 instead of group 1
  25. * Revision 2: Interrupt groups are guest-configurable and signaled using
  26. * their configured groups.
  27. */
  28. static unsigned long vgic_mmio_read_v2_misc(struct kvm_vcpu *vcpu,
  29. gpa_t addr, unsigned int len)
  30. {
  31. struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
  32. u32 value;
  33. switch (addr & 0x0c) {
  34. case GIC_DIST_CTRL:
  35. value = vgic->enabled ? GICD_ENABLE : 0;
  36. break;
  37. case GIC_DIST_CTR:
  38. value = vgic->nr_spis + VGIC_NR_PRIVATE_IRQS;
  39. value = (value >> 5) - 1;
  40. value |= (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
  41. break;
  42. case GIC_DIST_IIDR:
  43. value = (PRODUCT_ID_KVM << GICD_IIDR_PRODUCT_ID_SHIFT) |
  44. (vgic->implementation_rev << GICD_IIDR_REVISION_SHIFT) |
  45. (IMPLEMENTER_ARM << GICD_IIDR_IMPLEMENTER_SHIFT);
  46. break;
  47. default:
  48. return 0;
  49. }
  50. return value;
  51. }
  52. static void vgic_mmio_write_v2_misc(struct kvm_vcpu *vcpu,
  53. gpa_t addr, unsigned int len,
  54. unsigned long val)
  55. {
  56. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  57. bool was_enabled = dist->enabled;
  58. switch (addr & 0x0c) {
  59. case GIC_DIST_CTRL:
  60. dist->enabled = val & GICD_ENABLE;
  61. if (!was_enabled && dist->enabled)
  62. vgic_kick_vcpus(vcpu->kvm);
  63. break;
  64. case GIC_DIST_CTR:
  65. case GIC_DIST_IIDR:
  66. /* Nothing to do */
  67. return;
  68. }
  69. }
  70. static int vgic_mmio_uaccess_write_v2_misc(struct kvm_vcpu *vcpu,
  71. gpa_t addr, unsigned int len,
  72. unsigned long val)
  73. {
  74. switch (addr & 0x0c) {
  75. case GIC_DIST_IIDR:
  76. if (val != vgic_mmio_read_v2_misc(vcpu, addr, len))
  77. return -EINVAL;
  78. /*
  79. * If we observe a write to GICD_IIDR we know that userspace
  80. * has been updated and has had a chance to cope with older
  81. * kernels (VGICv2 IIDR.Revision == 0) incorrectly reporting
  82. * interrupts as group 1, and therefore we now allow groups to
  83. * be user writable. Doing this by default would break
  84. * migration from old kernels to new kernels with legacy
  85. * userspace.
  86. */
  87. vcpu->kvm->arch.vgic.v2_groups_user_writable = true;
  88. return 0;
  89. }
  90. vgic_mmio_write_v2_misc(vcpu, addr, len, val);
  91. return 0;
  92. }
  93. static int vgic_mmio_uaccess_write_v2_group(struct kvm_vcpu *vcpu,
  94. gpa_t addr, unsigned int len,
  95. unsigned long val)
  96. {
  97. if (vcpu->kvm->arch.vgic.v2_groups_user_writable)
  98. vgic_mmio_write_group(vcpu, addr, len, val);
  99. return 0;
  100. }
  101. static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
  102. gpa_t addr, unsigned int len,
  103. unsigned long val)
  104. {
  105. int nr_vcpus = atomic_read(&source_vcpu->kvm->online_vcpus);
  106. int intid = val & 0xf;
  107. int targets = (val >> 16) & 0xff;
  108. int mode = (val >> 24) & 0x03;
  109. int c;
  110. struct kvm_vcpu *vcpu;
  111. unsigned long flags;
  112. switch (mode) {
  113. case 0x0: /* as specified by targets */
  114. break;
  115. case 0x1:
  116. targets = (1U << nr_vcpus) - 1; /* all, ... */
  117. targets &= ~(1U << source_vcpu->vcpu_id); /* but self */
  118. break;
  119. case 0x2: /* this very vCPU only */
  120. targets = (1U << source_vcpu->vcpu_id);
  121. break;
  122. case 0x3: /* reserved */
  123. return;
  124. }
  125. kvm_for_each_vcpu(c, vcpu, source_vcpu->kvm) {
  126. struct vgic_irq *irq;
  127. if (!(targets & (1U << c)))
  128. continue;
  129. irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
  130. spin_lock_irqsave(&irq->irq_lock, flags);
  131. irq->pending_latch = true;
  132. irq->source |= 1U << source_vcpu->vcpu_id;
  133. vgic_queue_irq_unlock(source_vcpu->kvm, irq, flags);
  134. vgic_put_irq(source_vcpu->kvm, irq);
  135. }
  136. }
  137. static unsigned long vgic_mmio_read_target(struct kvm_vcpu *vcpu,
  138. gpa_t addr, unsigned int len)
  139. {
  140. u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
  141. int i;
  142. u64 val = 0;
  143. for (i = 0; i < len; i++) {
  144. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  145. val |= (u64)irq->targets << (i * 8);
  146. vgic_put_irq(vcpu->kvm, irq);
  147. }
  148. return val;
  149. }
  150. static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
  151. gpa_t addr, unsigned int len,
  152. unsigned long val)
  153. {
  154. u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
  155. u8 cpu_mask = GENMASK(atomic_read(&vcpu->kvm->online_vcpus) - 1, 0);
  156. int i;
  157. unsigned long flags;
  158. /* GICD_ITARGETSR[0-7] are read-only */
  159. if (intid < VGIC_NR_PRIVATE_IRQS)
  160. return;
  161. for (i = 0; i < len; i++) {
  162. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
  163. int target;
  164. spin_lock_irqsave(&irq->irq_lock, flags);
  165. irq->targets = (val >> (i * 8)) & cpu_mask;
  166. target = irq->targets ? __ffs(irq->targets) : 0;
  167. irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
  168. spin_unlock_irqrestore(&irq->irq_lock, flags);
  169. vgic_put_irq(vcpu->kvm, irq);
  170. }
  171. }
  172. static unsigned long vgic_mmio_read_sgipend(struct kvm_vcpu *vcpu,
  173. gpa_t addr, unsigned int len)
  174. {
  175. u32 intid = addr & 0x0f;
  176. int i;
  177. u64 val = 0;
  178. for (i = 0; i < len; i++) {
  179. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  180. val |= (u64)irq->source << (i * 8);
  181. vgic_put_irq(vcpu->kvm, irq);
  182. }
  183. return val;
  184. }
  185. static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
  186. gpa_t addr, unsigned int len,
  187. unsigned long val)
  188. {
  189. u32 intid = addr & 0x0f;
  190. int i;
  191. unsigned long flags;
  192. for (i = 0; i < len; i++) {
  193. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  194. spin_lock_irqsave(&irq->irq_lock, flags);
  195. irq->source &= ~((val >> (i * 8)) & 0xff);
  196. if (!irq->source)
  197. irq->pending_latch = false;
  198. spin_unlock_irqrestore(&irq->irq_lock, flags);
  199. vgic_put_irq(vcpu->kvm, irq);
  200. }
  201. }
  202. static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
  203. gpa_t addr, unsigned int len,
  204. unsigned long val)
  205. {
  206. u32 intid = addr & 0x0f;
  207. int i;
  208. unsigned long flags;
  209. for (i = 0; i < len; i++) {
  210. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  211. spin_lock_irqsave(&irq->irq_lock, flags);
  212. irq->source |= (val >> (i * 8)) & 0xff;
  213. if (irq->source) {
  214. irq->pending_latch = true;
  215. vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
  216. } else {
  217. spin_unlock_irqrestore(&irq->irq_lock, flags);
  218. }
  219. vgic_put_irq(vcpu->kvm, irq);
  220. }
  221. }
  222. #define GICC_ARCH_VERSION_V2 0x2
  223. /* These are for userland accesses only, there is no guest-facing emulation. */
  224. static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
  225. gpa_t addr, unsigned int len)
  226. {
  227. struct vgic_vmcr vmcr;
  228. u32 val;
  229. vgic_get_vmcr(vcpu, &vmcr);
  230. switch (addr & 0xff) {
  231. case GIC_CPU_CTRL:
  232. val = vmcr.grpen0 << GIC_CPU_CTRL_EnableGrp0_SHIFT;
  233. val |= vmcr.grpen1 << GIC_CPU_CTRL_EnableGrp1_SHIFT;
  234. val |= vmcr.ackctl << GIC_CPU_CTRL_AckCtl_SHIFT;
  235. val |= vmcr.fiqen << GIC_CPU_CTRL_FIQEn_SHIFT;
  236. val |= vmcr.cbpr << GIC_CPU_CTRL_CBPR_SHIFT;
  237. val |= vmcr.eoim << GIC_CPU_CTRL_EOImodeNS_SHIFT;
  238. break;
  239. case GIC_CPU_PRIMASK:
  240. /*
  241. * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
  242. * the PMR field as GICH_VMCR.VMPriMask rather than
  243. * GICC_PMR.Priority, so we expose the upper five bits of
  244. * priority mask to userspace using the lower bits in the
  245. * unsigned long.
  246. */
  247. val = (vmcr.pmr & GICV_PMR_PRIORITY_MASK) >>
  248. GICV_PMR_PRIORITY_SHIFT;
  249. break;
  250. case GIC_CPU_BINPOINT:
  251. val = vmcr.bpr;
  252. break;
  253. case GIC_CPU_ALIAS_BINPOINT:
  254. val = vmcr.abpr;
  255. break;
  256. case GIC_CPU_IDENT:
  257. val = ((PRODUCT_ID_KVM << 20) |
  258. (GICC_ARCH_VERSION_V2 << 16) |
  259. IMPLEMENTER_ARM);
  260. break;
  261. default:
  262. return 0;
  263. }
  264. return val;
  265. }
  266. static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
  267. gpa_t addr, unsigned int len,
  268. unsigned long val)
  269. {
  270. struct vgic_vmcr vmcr;
  271. vgic_get_vmcr(vcpu, &vmcr);
  272. switch (addr & 0xff) {
  273. case GIC_CPU_CTRL:
  274. vmcr.grpen0 = !!(val & GIC_CPU_CTRL_EnableGrp0);
  275. vmcr.grpen1 = !!(val & GIC_CPU_CTRL_EnableGrp1);
  276. vmcr.ackctl = !!(val & GIC_CPU_CTRL_AckCtl);
  277. vmcr.fiqen = !!(val & GIC_CPU_CTRL_FIQEn);
  278. vmcr.cbpr = !!(val & GIC_CPU_CTRL_CBPR);
  279. vmcr.eoim = !!(val & GIC_CPU_CTRL_EOImodeNS);
  280. break;
  281. case GIC_CPU_PRIMASK:
  282. /*
  283. * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
  284. * the PMR field as GICH_VMCR.VMPriMask rather than
  285. * GICC_PMR.Priority, so we expose the upper five bits of
  286. * priority mask to userspace using the lower bits in the
  287. * unsigned long.
  288. */
  289. vmcr.pmr = (val << GICV_PMR_PRIORITY_SHIFT) &
  290. GICV_PMR_PRIORITY_MASK;
  291. break;
  292. case GIC_CPU_BINPOINT:
  293. vmcr.bpr = val;
  294. break;
  295. case GIC_CPU_ALIAS_BINPOINT:
  296. vmcr.abpr = val;
  297. break;
  298. }
  299. vgic_set_vmcr(vcpu, &vmcr);
  300. }
  301. static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu,
  302. gpa_t addr, unsigned int len)
  303. {
  304. int n; /* which APRn is this */
  305. n = (addr >> 2) & 0x3;
  306. if (kvm_vgic_global_state.type == VGIC_V2) {
  307. /* GICv2 hardware systems support max. 32 groups */
  308. if (n != 0)
  309. return 0;
  310. return vcpu->arch.vgic_cpu.vgic_v2.vgic_apr;
  311. } else {
  312. struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
  313. if (n > vgic_v3_max_apr_idx(vcpu))
  314. return 0;
  315. n = array_index_nospec(n, 4);
  316. /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
  317. return vgicv3->vgic_ap1r[n];
  318. }
  319. }
  320. static void vgic_mmio_write_apr(struct kvm_vcpu *vcpu,
  321. gpa_t addr, unsigned int len,
  322. unsigned long val)
  323. {
  324. int n; /* which APRn is this */
  325. n = (addr >> 2) & 0x3;
  326. if (kvm_vgic_global_state.type == VGIC_V2) {
  327. /* GICv2 hardware systems support max. 32 groups */
  328. if (n != 0)
  329. return;
  330. vcpu->arch.vgic_cpu.vgic_v2.vgic_apr = val;
  331. } else {
  332. struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
  333. if (n > vgic_v3_max_apr_idx(vcpu))
  334. return;
  335. n = array_index_nospec(n, 4);
  336. /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
  337. vgicv3->vgic_ap1r[n] = val;
  338. }
  339. }
  340. static const struct vgic_register_region vgic_v2_dist_registers[] = {
  341. REGISTER_DESC_WITH_LENGTH_UACCESS(GIC_DIST_CTRL,
  342. vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc,
  343. NULL, vgic_mmio_uaccess_write_v2_misc,
  344. 12, VGIC_ACCESS_32bit),
  345. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_IGROUP,
  346. vgic_mmio_read_group, vgic_mmio_write_group,
  347. NULL, vgic_mmio_uaccess_write_v2_group, 1,
  348. VGIC_ACCESS_32bit),
  349. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET,
  350. vgic_mmio_read_enable, vgic_mmio_write_senable, NULL, NULL, 1,
  351. VGIC_ACCESS_32bit),
  352. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR,
  353. vgic_mmio_read_enable, vgic_mmio_write_cenable, NULL, NULL, 1,
  354. VGIC_ACCESS_32bit),
  355. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
  356. vgic_mmio_read_pending, vgic_mmio_write_spending, NULL, NULL, 1,
  357. VGIC_ACCESS_32bit),
  358. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR,
  359. vgic_mmio_read_pending, vgic_mmio_write_cpending, NULL, NULL, 1,
  360. VGIC_ACCESS_32bit),
  361. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
  362. vgic_mmio_read_active, vgic_mmio_write_sactive,
  363. NULL, vgic_mmio_uaccess_write_sactive, 1,
  364. VGIC_ACCESS_32bit),
  365. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR,
  366. vgic_mmio_read_active, vgic_mmio_write_cactive,
  367. NULL, vgic_mmio_uaccess_write_cactive, 1,
  368. VGIC_ACCESS_32bit),
  369. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI,
  370. vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
  371. 8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
  372. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_TARGET,
  373. vgic_mmio_read_target, vgic_mmio_write_target, NULL, NULL, 8,
  374. VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
  375. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_CONFIG,
  376. vgic_mmio_read_config, vgic_mmio_write_config, NULL, NULL, 2,
  377. VGIC_ACCESS_32bit),
  378. REGISTER_DESC_WITH_LENGTH(GIC_DIST_SOFTINT,
  379. vgic_mmio_read_raz, vgic_mmio_write_sgir, 4,
  380. VGIC_ACCESS_32bit),
  381. REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_CLEAR,
  382. vgic_mmio_read_sgipend, vgic_mmio_write_sgipendc, 16,
  383. VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
  384. REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_SET,
  385. vgic_mmio_read_sgipend, vgic_mmio_write_sgipends, 16,
  386. VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
  387. };
  388. static const struct vgic_register_region vgic_v2_cpu_registers[] = {
  389. REGISTER_DESC_WITH_LENGTH(GIC_CPU_CTRL,
  390. vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
  391. VGIC_ACCESS_32bit),
  392. REGISTER_DESC_WITH_LENGTH(GIC_CPU_PRIMASK,
  393. vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
  394. VGIC_ACCESS_32bit),
  395. REGISTER_DESC_WITH_LENGTH(GIC_CPU_BINPOINT,
  396. vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
  397. VGIC_ACCESS_32bit),
  398. REGISTER_DESC_WITH_LENGTH(GIC_CPU_ALIAS_BINPOINT,
  399. vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
  400. VGIC_ACCESS_32bit),
  401. REGISTER_DESC_WITH_LENGTH(GIC_CPU_ACTIVEPRIO,
  402. vgic_mmio_read_apr, vgic_mmio_write_apr, 16,
  403. VGIC_ACCESS_32bit),
  404. REGISTER_DESC_WITH_LENGTH(GIC_CPU_IDENT,
  405. vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
  406. VGIC_ACCESS_32bit),
  407. };
  408. unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev)
  409. {
  410. dev->regions = vgic_v2_dist_registers;
  411. dev->nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
  412. kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops);
  413. return SZ_4K;
  414. }
  415. int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
  416. {
  417. const struct vgic_register_region *region;
  418. struct vgic_io_device iodev;
  419. struct vgic_reg_attr reg_attr;
  420. struct kvm_vcpu *vcpu;
  421. gpa_t addr;
  422. int ret;
  423. ret = vgic_v2_parse_attr(dev, attr, &reg_attr);
  424. if (ret)
  425. return ret;
  426. vcpu = reg_attr.vcpu;
  427. addr = reg_attr.addr;
  428. switch (attr->group) {
  429. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  430. iodev.regions = vgic_v2_dist_registers;
  431. iodev.nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
  432. iodev.base_addr = 0;
  433. break;
  434. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
  435. iodev.regions = vgic_v2_cpu_registers;
  436. iodev.nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers);
  437. iodev.base_addr = 0;
  438. break;
  439. default:
  440. return -ENXIO;
  441. }
  442. /* We only support aligned 32-bit accesses. */
  443. if (addr & 3)
  444. return -ENXIO;
  445. region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32));
  446. if (!region)
  447. return -ENXIO;
  448. return 0;
  449. }
  450. int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
  451. int offset, u32 *val)
  452. {
  453. struct vgic_io_device dev = {
  454. .regions = vgic_v2_cpu_registers,
  455. .nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers),
  456. .iodev_type = IODEV_CPUIF,
  457. };
  458. return vgic_uaccess(vcpu, &dev, is_write, offset, val);
  459. }
  460. int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
  461. int offset, u32 *val)
  462. {
  463. struct vgic_io_device dev = {
  464. .regions = vgic_v2_dist_registers,
  465. .nr_regions = ARRAY_SIZE(vgic_v2_dist_registers),
  466. .iodev_type = IODEV_DIST,
  467. };
  468. return vgic_uaccess(vcpu, &dev, is_write, offset, val);
  469. }