vgic-v2-emul.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847
  1. /*
  2. * Contains GICv2 specific emulation code, was in vgic.c before.
  3. *
  4. * Copyright (C) 2012 ARM Ltd.
  5. * Author: Marc Zyngier <marc.zyngier@arm.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/cpu.h>
  20. #include <linux/kvm.h>
  21. #include <linux/kvm_host.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/io.h>
  24. #include <linux/uaccess.h>
  25. #include <linux/irqchip/arm-gic.h>
  26. #include <asm/kvm_emulate.h>
  27. #include <asm/kvm_arm.h>
  28. #include <asm/kvm_mmu.h>
  29. #include "vgic.h"
  30. #define GICC_ARCH_VERSION_V2 0x2
  31. static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
  32. static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi)
  33. {
  34. return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi;
  35. }
  36. static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
  37. struct kvm_exit_mmio *mmio, phys_addr_t offset)
  38. {
  39. u32 reg;
  40. u32 word_offset = offset & 3;
  41. switch (offset & ~3) {
  42. case 0: /* GICD_CTLR */
  43. reg = vcpu->kvm->arch.vgic.enabled;
  44. vgic_reg_access(mmio, &reg, word_offset,
  45. ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
  46. if (mmio->is_write) {
  47. vcpu->kvm->arch.vgic.enabled = reg & 1;
  48. vgic_update_state(vcpu->kvm);
  49. return true;
  50. }
  51. break;
  52. case 4: /* GICD_TYPER */
  53. reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
  54. reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1;
  55. vgic_reg_access(mmio, &reg, word_offset,
  56. ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
  57. break;
  58. case 8: /* GICD_IIDR */
  59. reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
  60. vgic_reg_access(mmio, &reg, word_offset,
  61. ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
  62. break;
  63. }
  64. return false;
  65. }
  66. static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
  67. struct kvm_exit_mmio *mmio,
  68. phys_addr_t offset)
  69. {
  70. return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
  71. vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
  72. }
  73. static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
  74. struct kvm_exit_mmio *mmio,
  75. phys_addr_t offset)
  76. {
  77. return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
  78. vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
  79. }
  80. static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
  81. struct kvm_exit_mmio *mmio,
  82. phys_addr_t offset)
  83. {
  84. return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
  85. vcpu->vcpu_id);
  86. }
  87. static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
  88. struct kvm_exit_mmio *mmio,
  89. phys_addr_t offset)
  90. {
  91. return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
  92. vcpu->vcpu_id);
  93. }
  94. static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
  95. struct kvm_exit_mmio *mmio,
  96. phys_addr_t offset)
  97. {
  98. u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
  99. vcpu->vcpu_id, offset);
  100. vgic_reg_access(mmio, reg, offset,
  101. ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
  102. return false;
  103. }
  104. #define GICD_ITARGETSR_SIZE 32
  105. #define GICD_CPUTARGETS_BITS 8
  106. #define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
  107. static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
  108. {
  109. struct vgic_dist *dist = &kvm->arch.vgic;
  110. int i;
  111. u32 val = 0;
  112. irq -= VGIC_NR_PRIVATE_IRQS;
  113. for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
  114. val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
  115. return val;
  116. }
  117. static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
  118. {
  119. struct vgic_dist *dist = &kvm->arch.vgic;
  120. struct kvm_vcpu *vcpu;
  121. int i, c;
  122. unsigned long *bmap;
  123. u32 target;
  124. irq -= VGIC_NR_PRIVATE_IRQS;
  125. /*
  126. * Pick the LSB in each byte. This ensures we target exactly
  127. * one vcpu per IRQ. If the byte is null, assume we target
  128. * CPU0.
  129. */
  130. for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
  131. int shift = i * GICD_CPUTARGETS_BITS;
  132. target = ffs((val >> shift) & 0xffU);
  133. target = target ? (target - 1) : 0;
  134. dist->irq_spi_cpu[irq + i] = target;
  135. kvm_for_each_vcpu(c, vcpu, kvm) {
  136. bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
  137. if (c == target)
  138. set_bit(irq + i, bmap);
  139. else
  140. clear_bit(irq + i, bmap);
  141. }
  142. }
  143. }
  144. static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
  145. struct kvm_exit_mmio *mmio,
  146. phys_addr_t offset)
  147. {
  148. u32 reg;
  149. /* We treat the banked interrupts targets as read-only */
  150. if (offset < 32) {
  151. u32 roreg;
  152. roreg = 1 << vcpu->vcpu_id;
  153. roreg |= roreg << 8;
  154. roreg |= roreg << 16;
  155. vgic_reg_access(mmio, &roreg, offset,
  156. ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
  157. return false;
  158. }
  159. reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
  160. vgic_reg_access(mmio, &reg, offset,
  161. ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
  162. if (mmio->is_write) {
  163. vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
  164. vgic_update_state(vcpu->kvm);
  165. return true;
  166. }
  167. return false;
  168. }
  169. static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
  170. struct kvm_exit_mmio *mmio, phys_addr_t offset)
  171. {
  172. u32 *reg;
  173. reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
  174. vcpu->vcpu_id, offset >> 1);
  175. return vgic_handle_cfg_reg(reg, mmio, offset);
  176. }
  177. static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
  178. struct kvm_exit_mmio *mmio, phys_addr_t offset)
  179. {
  180. u32 reg;
  181. vgic_reg_access(mmio, &reg, offset,
  182. ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
  183. if (mmio->is_write) {
  184. vgic_dispatch_sgi(vcpu, reg);
  185. vgic_update_state(vcpu->kvm);
  186. return true;
  187. }
  188. return false;
  189. }
  190. /* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
  191. static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
  192. struct kvm_exit_mmio *mmio,
  193. phys_addr_t offset)
  194. {
  195. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  196. int sgi;
  197. int min_sgi = (offset & ~0x3);
  198. int max_sgi = min_sgi + 3;
  199. int vcpu_id = vcpu->vcpu_id;
  200. u32 reg = 0;
  201. /* Copy source SGIs from distributor side */
  202. for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
  203. u8 sources = *vgic_get_sgi_sources(dist, vcpu_id, sgi);
  204. reg |= ((u32)sources) << (8 * (sgi - min_sgi));
  205. }
  206. mmio_data_write(mmio, ~0, reg);
  207. return false;
  208. }
  209. static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
  210. struct kvm_exit_mmio *mmio,
  211. phys_addr_t offset, bool set)
  212. {
  213. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  214. int sgi;
  215. int min_sgi = (offset & ~0x3);
  216. int max_sgi = min_sgi + 3;
  217. int vcpu_id = vcpu->vcpu_id;
  218. u32 reg;
  219. bool updated = false;
  220. reg = mmio_data_read(mmio, ~0);
  221. /* Clear pending SGIs on the distributor */
  222. for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
  223. u8 mask = reg >> (8 * (sgi - min_sgi));
  224. u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi);
  225. if (set) {
  226. if ((*src & mask) != mask)
  227. updated = true;
  228. *src |= mask;
  229. } else {
  230. if (*src & mask)
  231. updated = true;
  232. *src &= ~mask;
  233. }
  234. }
  235. if (updated)
  236. vgic_update_state(vcpu->kvm);
  237. return updated;
  238. }
  239. static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
  240. struct kvm_exit_mmio *mmio,
  241. phys_addr_t offset)
  242. {
  243. if (!mmio->is_write)
  244. return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
  245. else
  246. return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
  247. }
  248. static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
  249. struct kvm_exit_mmio *mmio,
  250. phys_addr_t offset)
  251. {
  252. if (!mmio->is_write)
  253. return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
  254. else
  255. return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
  256. }
  257. static const struct kvm_mmio_range vgic_dist_ranges[] = {
  258. {
  259. .base = GIC_DIST_CTRL,
  260. .len = 12,
  261. .bits_per_irq = 0,
  262. .handle_mmio = handle_mmio_misc,
  263. },
  264. {
  265. .base = GIC_DIST_IGROUP,
  266. .len = VGIC_MAX_IRQS / 8,
  267. .bits_per_irq = 1,
  268. .handle_mmio = handle_mmio_raz_wi,
  269. },
  270. {
  271. .base = GIC_DIST_ENABLE_SET,
  272. .len = VGIC_MAX_IRQS / 8,
  273. .bits_per_irq = 1,
  274. .handle_mmio = handle_mmio_set_enable_reg,
  275. },
  276. {
  277. .base = GIC_DIST_ENABLE_CLEAR,
  278. .len = VGIC_MAX_IRQS / 8,
  279. .bits_per_irq = 1,
  280. .handle_mmio = handle_mmio_clear_enable_reg,
  281. },
  282. {
  283. .base = GIC_DIST_PENDING_SET,
  284. .len = VGIC_MAX_IRQS / 8,
  285. .bits_per_irq = 1,
  286. .handle_mmio = handle_mmio_set_pending_reg,
  287. },
  288. {
  289. .base = GIC_DIST_PENDING_CLEAR,
  290. .len = VGIC_MAX_IRQS / 8,
  291. .bits_per_irq = 1,
  292. .handle_mmio = handle_mmio_clear_pending_reg,
  293. },
  294. {
  295. .base = GIC_DIST_ACTIVE_SET,
  296. .len = VGIC_MAX_IRQS / 8,
  297. .bits_per_irq = 1,
  298. .handle_mmio = handle_mmio_raz_wi,
  299. },
  300. {
  301. .base = GIC_DIST_ACTIVE_CLEAR,
  302. .len = VGIC_MAX_IRQS / 8,
  303. .bits_per_irq = 1,
  304. .handle_mmio = handle_mmio_raz_wi,
  305. },
  306. {
  307. .base = GIC_DIST_PRI,
  308. .len = VGIC_MAX_IRQS,
  309. .bits_per_irq = 8,
  310. .handle_mmio = handle_mmio_priority_reg,
  311. },
  312. {
  313. .base = GIC_DIST_TARGET,
  314. .len = VGIC_MAX_IRQS,
  315. .bits_per_irq = 8,
  316. .handle_mmio = handle_mmio_target_reg,
  317. },
  318. {
  319. .base = GIC_DIST_CONFIG,
  320. .len = VGIC_MAX_IRQS / 4,
  321. .bits_per_irq = 2,
  322. .handle_mmio = handle_mmio_cfg_reg,
  323. },
  324. {
  325. .base = GIC_DIST_SOFTINT,
  326. .len = 4,
  327. .handle_mmio = handle_mmio_sgi_reg,
  328. },
  329. {
  330. .base = GIC_DIST_SGI_PENDING_CLEAR,
  331. .len = VGIC_NR_SGIS,
  332. .handle_mmio = handle_mmio_sgi_clear,
  333. },
  334. {
  335. .base = GIC_DIST_SGI_PENDING_SET,
  336. .len = VGIC_NR_SGIS,
  337. .handle_mmio = handle_mmio_sgi_set,
  338. },
  339. {}
  340. };
  341. static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
  342. struct kvm_exit_mmio *mmio)
  343. {
  344. unsigned long base = vcpu->kvm->arch.vgic.vgic_dist_base;
  345. if (!is_in_range(mmio->phys_addr, mmio->len, base,
  346. KVM_VGIC_V2_DIST_SIZE))
  347. return false;
  348. /* GICv2 does not support accesses wider than 32 bits */
  349. if (mmio->len > 4) {
  350. kvm_inject_dabt(vcpu, mmio->phys_addr);
  351. return true;
  352. }
  353. return vgic_handle_mmio_range(vcpu, run, mmio, vgic_dist_ranges, base);
  354. }
  355. static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
  356. {
  357. struct kvm *kvm = vcpu->kvm;
  358. struct vgic_dist *dist = &kvm->arch.vgic;
  359. int nrcpus = atomic_read(&kvm->online_vcpus);
  360. u8 target_cpus;
  361. int sgi, mode, c, vcpu_id;
  362. vcpu_id = vcpu->vcpu_id;
  363. sgi = reg & 0xf;
  364. target_cpus = (reg >> 16) & 0xff;
  365. mode = (reg >> 24) & 3;
  366. switch (mode) {
  367. case 0:
  368. if (!target_cpus)
  369. return;
  370. break;
  371. case 1:
  372. target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
  373. break;
  374. case 2:
  375. target_cpus = 1 << vcpu_id;
  376. break;
  377. }
  378. kvm_for_each_vcpu(c, vcpu, kvm) {
  379. if (target_cpus & 1) {
  380. /* Flag the SGI as pending */
  381. vgic_dist_irq_set_pending(vcpu, sgi);
  382. *vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id;
  383. kvm_debug("SGI%d from CPU%d to CPU%d\n",
  384. sgi, vcpu_id, c);
  385. }
  386. target_cpus >>= 1;
  387. }
  388. }
  389. static bool vgic_v2_queue_sgi(struct kvm_vcpu *vcpu, int irq)
  390. {
  391. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  392. unsigned long sources;
  393. int vcpu_id = vcpu->vcpu_id;
  394. int c;
  395. sources = *vgic_get_sgi_sources(dist, vcpu_id, irq);
  396. for_each_set_bit(c, &sources, dist->nr_cpus) {
  397. if (vgic_queue_irq(vcpu, c, irq))
  398. clear_bit(c, &sources);
  399. }
  400. *vgic_get_sgi_sources(dist, vcpu_id, irq) = sources;
  401. /*
  402. * If the sources bitmap has been cleared it means that we
  403. * could queue all the SGIs onto link registers (see the
  404. * clear_bit above), and therefore we are done with them in
  405. * our emulated gic and can get rid of them.
  406. */
  407. if (!sources) {
  408. vgic_dist_irq_clear_pending(vcpu, irq);
  409. vgic_cpu_irq_clear(vcpu, irq);
  410. return true;
  411. }
  412. return false;
  413. }
  414. /**
  415. * kvm_vgic_map_resources - Configure global VGIC state before running any VCPUs
  416. * @kvm: pointer to the kvm struct
  417. *
  418. * Map the virtual CPU interface into the VM before running any VCPUs. We
  419. * can't do this at creation time, because user space must first set the
  420. * virtual CPU interface address in the guest physical address space.
  421. */
  422. static int vgic_v2_map_resources(struct kvm *kvm,
  423. const struct vgic_params *params)
  424. {
  425. int ret = 0;
  426. if (!irqchip_in_kernel(kvm))
  427. return 0;
  428. mutex_lock(&kvm->lock);
  429. if (vgic_ready(kvm))
  430. goto out;
  431. if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
  432. IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) {
  433. kvm_err("Need to set vgic cpu and dist addresses first\n");
  434. ret = -ENXIO;
  435. goto out;
  436. }
  437. /*
  438. * Initialize the vgic if this hasn't already been done on demand by
  439. * accessing the vgic state from userspace.
  440. */
  441. ret = vgic_init(kvm);
  442. if (ret) {
  443. kvm_err("Unable to allocate maps\n");
  444. goto out;
  445. }
  446. ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
  447. params->vcpu_base, KVM_VGIC_V2_CPU_SIZE,
  448. true);
  449. if (ret) {
  450. kvm_err("Unable to remap VGIC CPU to VCPU\n");
  451. goto out;
  452. }
  453. kvm->arch.vgic.ready = true;
  454. out:
  455. if (ret)
  456. kvm_vgic_destroy(kvm);
  457. mutex_unlock(&kvm->lock);
  458. return ret;
  459. }
  460. static void vgic_v2_add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
  461. {
  462. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  463. *vgic_get_sgi_sources(dist, vcpu->vcpu_id, irq) |= 1 << source;
  464. }
  465. static int vgic_v2_init_model(struct kvm *kvm)
  466. {
  467. int i;
  468. for (i = VGIC_NR_PRIVATE_IRQS; i < kvm->arch.vgic.nr_irqs; i += 4)
  469. vgic_set_target_reg(kvm, 0, i);
  470. return 0;
  471. }
  472. void vgic_v2_init_emulation(struct kvm *kvm)
  473. {
  474. struct vgic_dist *dist = &kvm->arch.vgic;
  475. dist->vm_ops.handle_mmio = vgic_v2_handle_mmio;
  476. dist->vm_ops.queue_sgi = vgic_v2_queue_sgi;
  477. dist->vm_ops.add_sgi_source = vgic_v2_add_sgi_source;
  478. dist->vm_ops.init_model = vgic_v2_init_model;
  479. dist->vm_ops.map_resources = vgic_v2_map_resources;
  480. kvm->arch.max_vcpus = VGIC_V2_MAX_CPUS;
  481. }
  482. static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
  483. struct kvm_exit_mmio *mmio, phys_addr_t offset)
  484. {
  485. bool updated = false;
  486. struct vgic_vmcr vmcr;
  487. u32 *vmcr_field;
  488. u32 reg;
  489. vgic_get_vmcr(vcpu, &vmcr);
  490. switch (offset & ~0x3) {
  491. case GIC_CPU_CTRL:
  492. vmcr_field = &vmcr.ctlr;
  493. break;
  494. case GIC_CPU_PRIMASK:
  495. vmcr_field = &vmcr.pmr;
  496. break;
  497. case GIC_CPU_BINPOINT:
  498. vmcr_field = &vmcr.bpr;
  499. break;
  500. case GIC_CPU_ALIAS_BINPOINT:
  501. vmcr_field = &vmcr.abpr;
  502. break;
  503. default:
  504. BUG();
  505. }
  506. if (!mmio->is_write) {
  507. reg = *vmcr_field;
  508. mmio_data_write(mmio, ~0, reg);
  509. } else {
  510. reg = mmio_data_read(mmio, ~0);
  511. if (reg != *vmcr_field) {
  512. *vmcr_field = reg;
  513. vgic_set_vmcr(vcpu, &vmcr);
  514. updated = true;
  515. }
  516. }
  517. return updated;
  518. }
  519. static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
  520. struct kvm_exit_mmio *mmio, phys_addr_t offset)
  521. {
  522. return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
  523. }
  524. static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
  525. struct kvm_exit_mmio *mmio,
  526. phys_addr_t offset)
  527. {
  528. u32 reg;
  529. if (mmio->is_write)
  530. return false;
  531. /* GICC_IIDR */
  532. reg = (PRODUCT_ID_KVM << 20) |
  533. (GICC_ARCH_VERSION_V2 << 16) |
  534. (IMPLEMENTER_ARM << 0);
  535. mmio_data_write(mmio, ~0, reg);
  536. return false;
  537. }
  538. /*
  539. * CPU Interface Register accesses - these are not accessed by the VM, but by
  540. * user space for saving and restoring VGIC state.
  541. */
  542. static const struct kvm_mmio_range vgic_cpu_ranges[] = {
  543. {
  544. .base = GIC_CPU_CTRL,
  545. .len = 12,
  546. .handle_mmio = handle_cpu_mmio_misc,
  547. },
  548. {
  549. .base = GIC_CPU_ALIAS_BINPOINT,
  550. .len = 4,
  551. .handle_mmio = handle_mmio_abpr,
  552. },
  553. {
  554. .base = GIC_CPU_ACTIVEPRIO,
  555. .len = 16,
  556. .handle_mmio = handle_mmio_raz_wi,
  557. },
  558. {
  559. .base = GIC_CPU_IDENT,
  560. .len = 4,
  561. .handle_mmio = handle_cpu_mmio_ident,
  562. },
  563. };
  564. static int vgic_attr_regs_access(struct kvm_device *dev,
  565. struct kvm_device_attr *attr,
  566. u32 *reg, bool is_write)
  567. {
  568. const struct kvm_mmio_range *r = NULL, *ranges;
  569. phys_addr_t offset;
  570. int ret, cpuid, c;
  571. struct kvm_vcpu *vcpu, *tmp_vcpu;
  572. struct vgic_dist *vgic;
  573. struct kvm_exit_mmio mmio;
  574. offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
  575. cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
  576. KVM_DEV_ARM_VGIC_CPUID_SHIFT;
  577. mutex_lock(&dev->kvm->lock);
  578. ret = vgic_init(dev->kvm);
  579. if (ret)
  580. goto out;
  581. if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
  582. ret = -EINVAL;
  583. goto out;
  584. }
  585. vcpu = kvm_get_vcpu(dev->kvm, cpuid);
  586. vgic = &dev->kvm->arch.vgic;
  587. mmio.len = 4;
  588. mmio.is_write = is_write;
  589. if (is_write)
  590. mmio_data_write(&mmio, ~0, *reg);
  591. switch (attr->group) {
  592. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  593. mmio.phys_addr = vgic->vgic_dist_base + offset;
  594. ranges = vgic_dist_ranges;
  595. break;
  596. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
  597. mmio.phys_addr = vgic->vgic_cpu_base + offset;
  598. ranges = vgic_cpu_ranges;
  599. break;
  600. default:
  601. BUG();
  602. }
  603. r = vgic_find_range(ranges, &mmio, offset);
  604. if (unlikely(!r || !r->handle_mmio)) {
  605. ret = -ENXIO;
  606. goto out;
  607. }
  608. spin_lock(&vgic->lock);
  609. /*
  610. * Ensure that no other VCPU is running by checking the vcpu->cpu
  611. * field. If no other VPCUs are running we can safely access the VGIC
  612. * state, because even if another VPU is run after this point, that
  613. * VCPU will not touch the vgic state, because it will block on
  614. * getting the vgic->lock in kvm_vgic_sync_hwstate().
  615. */
  616. kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
  617. if (unlikely(tmp_vcpu->cpu != -1)) {
  618. ret = -EBUSY;
  619. goto out_vgic_unlock;
  620. }
  621. }
  622. /*
  623. * Move all pending IRQs from the LRs on all VCPUs so the pending
  624. * state can be properly represented in the register state accessible
  625. * through this API.
  626. */
  627. kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
  628. vgic_unqueue_irqs(tmp_vcpu);
  629. offset -= r->base;
  630. r->handle_mmio(vcpu, &mmio, offset);
  631. if (!is_write)
  632. *reg = mmio_data_read(&mmio, ~0);
  633. ret = 0;
  634. out_vgic_unlock:
  635. spin_unlock(&vgic->lock);
  636. out:
  637. mutex_unlock(&dev->kvm->lock);
  638. return ret;
  639. }
  640. static int vgic_v2_create(struct kvm_device *dev, u32 type)
  641. {
  642. return kvm_vgic_create(dev->kvm, type);
  643. }
  644. static void vgic_v2_destroy(struct kvm_device *dev)
  645. {
  646. kfree(dev);
  647. }
  648. static int vgic_v2_set_attr(struct kvm_device *dev,
  649. struct kvm_device_attr *attr)
  650. {
  651. int ret;
  652. ret = vgic_set_common_attr(dev, attr);
  653. if (ret != -ENXIO)
  654. return ret;
  655. switch (attr->group) {
  656. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  657. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
  658. u32 __user *uaddr = (u32 __user *)(long)attr->addr;
  659. u32 reg;
  660. if (get_user(reg, uaddr))
  661. return -EFAULT;
  662. return vgic_attr_regs_access(dev, attr, &reg, true);
  663. }
  664. }
  665. return -ENXIO;
  666. }
  667. static int vgic_v2_get_attr(struct kvm_device *dev,
  668. struct kvm_device_attr *attr)
  669. {
  670. int ret;
  671. ret = vgic_get_common_attr(dev, attr);
  672. if (ret != -ENXIO)
  673. return ret;
  674. switch (attr->group) {
  675. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  676. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
  677. u32 __user *uaddr = (u32 __user *)(long)attr->addr;
  678. u32 reg = 0;
  679. ret = vgic_attr_regs_access(dev, attr, &reg, false);
  680. if (ret)
  681. return ret;
  682. return put_user(reg, uaddr);
  683. }
  684. }
  685. return -ENXIO;
  686. }
  687. static int vgic_v2_has_attr(struct kvm_device *dev,
  688. struct kvm_device_attr *attr)
  689. {
  690. phys_addr_t offset;
  691. switch (attr->group) {
  692. case KVM_DEV_ARM_VGIC_GRP_ADDR:
  693. switch (attr->attr) {
  694. case KVM_VGIC_V2_ADDR_TYPE_DIST:
  695. case KVM_VGIC_V2_ADDR_TYPE_CPU:
  696. return 0;
  697. }
  698. break;
  699. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  700. offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
  701. return vgic_has_attr_regs(vgic_dist_ranges, offset);
  702. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
  703. offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
  704. return vgic_has_attr_regs(vgic_cpu_ranges, offset);
  705. case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
  706. return 0;
  707. case KVM_DEV_ARM_VGIC_GRP_CTRL:
  708. switch (attr->attr) {
  709. case KVM_DEV_ARM_VGIC_CTRL_INIT:
  710. return 0;
  711. }
  712. }
  713. return -ENXIO;
  714. }
  715. struct kvm_device_ops kvm_arm_vgic_v2_ops = {
  716. .name = "kvm-arm-vgic-v2",
  717. .create = vgic_v2_create,
  718. .destroy = vgic_v2_destroy,
  719. .set_attr = vgic_v2_set_attr,
  720. .get_attr = vgic_v2_get_attr,
  721. .has_attr = vgic_v2_has_attr,
  722. };