vgic-v2.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504
  1. /*
  2. * Copyright (C) 2015, 2016 ARM Ltd.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  15. */
  16. #include <linux/irqchip/arm-gic.h>
  17. #include <linux/kvm.h>
  18. #include <linux/kvm_host.h>
  19. #include <kvm/arm_vgic.h>
  20. #include <asm/kvm_mmu.h>
  21. #include "vgic.h"
  22. static inline void vgic_v2_write_lr(int lr, u32 val)
  23. {
  24. void __iomem *base = kvm_vgic_global_state.vctrl_base;
  25. writel_relaxed(val, base + GICH_LR0 + (lr * 4));
  26. }
  27. void vgic_v2_init_lrs(void)
  28. {
  29. int i;
  30. for (i = 0; i < kvm_vgic_global_state.nr_lr; i++)
  31. vgic_v2_write_lr(i, 0);
  32. }
  33. void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
  34. {
  35. struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
  36. cpuif->vgic_hcr |= GICH_HCR_UIE;
  37. }
  38. static bool lr_signals_eoi_mi(u32 lr_val)
  39. {
  40. return !(lr_val & GICH_LR_STATE) && (lr_val & GICH_LR_EOI) &&
  41. !(lr_val & GICH_LR_HW);
  42. }
  43. /*
  44. * transfer the content of the LRs back into the corresponding ap_list:
  45. * - active bit is transferred as is
  46. * - pending bit is
  47. * - transferred as is in case of edge sensitive IRQs
  48. * - set to the line-level (resample time) for level sensitive IRQs
  49. */
  50. void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
  51. {
  52. struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
  53. struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2;
  54. int lr;
  55. DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
  56. cpuif->vgic_hcr &= ~GICH_HCR_UIE;
  57. for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
  58. u32 val = cpuif->vgic_lr[lr];
  59. u32 cpuid, intid = val & GICH_LR_VIRTUALID;
  60. struct vgic_irq *irq;
  61. /* Extract the source vCPU id from the LR */
  62. cpuid = val & GICH_LR_PHYSID_CPUID;
  63. cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
  64. cpuid &= 7;
  65. /* Notify fds when the guest EOI'ed a level-triggered SPI */
  66. if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
  67. kvm_notify_acked_irq(vcpu->kvm, 0,
  68. intid - VGIC_NR_PRIVATE_IRQS);
  69. irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
  70. spin_lock(&irq->irq_lock);
  71. /* Always preserve the active bit */
  72. irq->active = !!(val & GICH_LR_ACTIVE_BIT);
  73. if (irq->active && vgic_irq_is_sgi(intid))
  74. irq->active_source = cpuid;
  75. /* Edge is the only case where we preserve the pending bit */
  76. if (irq->config == VGIC_CONFIG_EDGE &&
  77. (val & GICH_LR_PENDING_BIT)) {
  78. irq->pending_latch = true;
  79. if (vgic_irq_is_sgi(intid))
  80. irq->source |= (1 << cpuid);
  81. }
  82. /*
  83. * Clear soft pending state when level irqs have been acked.
  84. */
  85. if (irq->config == VGIC_CONFIG_LEVEL && !(val & GICH_LR_STATE))
  86. irq->pending_latch = false;
  87. /*
  88. * Level-triggered mapped IRQs are special because we only
  89. * observe rising edges as input to the VGIC.
  90. *
  91. * If the guest never acked the interrupt we have to sample
  92. * the physical line and set the line level, because the
  93. * device state could have changed or we simply need to
  94. * process the still pending interrupt later.
  95. *
  96. * If this causes us to lower the level, we have to also clear
  97. * the physical active state, since we will otherwise never be
  98. * told when the interrupt becomes asserted again.
  99. */
  100. if (vgic_irq_is_mapped_level(irq) && (val & GICH_LR_PENDING_BIT)) {
  101. irq->line_level = vgic_get_phys_line_level(irq);
  102. if (!irq->line_level)
  103. vgic_irq_set_phys_active(irq, false);
  104. }
  105. spin_unlock(&irq->irq_lock);
  106. vgic_put_irq(vcpu->kvm, irq);
  107. }
  108. vgic_cpu->used_lrs = 0;
  109. }
  110. /*
  111. * Populates the particular LR with the state of a given IRQ:
  112. * - for an edge sensitive IRQ the pending state is cleared in struct vgic_irq
  113. * - for a level sensitive IRQ the pending state value is unchanged;
  114. * it is dictated directly by the input level
  115. *
  116. * If @irq describes an SGI with multiple sources, we choose the
  117. * lowest-numbered source VCPU and clear that bit in the source bitmap.
  118. *
  119. * The irq_lock must be held by the caller.
  120. */
  121. void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
  122. {
  123. u32 val = irq->intid;
  124. bool allow_pending = true;
  125. if (irq->active) {
  126. val |= GICH_LR_ACTIVE_BIT;
  127. if (vgic_irq_is_sgi(irq->intid))
  128. val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
  129. if (vgic_irq_is_multi_sgi(irq)) {
  130. allow_pending = false;
  131. val |= GICH_LR_EOI;
  132. }
  133. }
  134. if (irq->group)
  135. val |= GICH_LR_GROUP1;
  136. if (irq->hw) {
  137. val |= GICH_LR_HW;
  138. val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT;
  139. /*
  140. * Never set pending+active on a HW interrupt, as the
  141. * pending state is kept at the physical distributor
  142. * level.
  143. */
  144. if (irq->active)
  145. allow_pending = false;
  146. } else {
  147. if (irq->config == VGIC_CONFIG_LEVEL) {
  148. val |= GICH_LR_EOI;
  149. /*
  150. * Software resampling doesn't work very well
  151. * if we allow P+A, so let's not do that.
  152. */
  153. if (irq->active)
  154. allow_pending = false;
  155. }
  156. }
  157. if (allow_pending && irq_is_pending(irq)) {
  158. val |= GICH_LR_PENDING_BIT;
  159. if (irq->config == VGIC_CONFIG_EDGE)
  160. irq->pending_latch = false;
  161. if (vgic_irq_is_sgi(irq->intid)) {
  162. u32 src = ffs(irq->source);
  163. BUG_ON(!src);
  164. val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
  165. irq->source &= ~(1 << (src - 1));
  166. if (irq->source) {
  167. irq->pending_latch = true;
  168. val |= GICH_LR_EOI;
  169. }
  170. }
  171. }
  172. /*
  173. * Level-triggered mapped IRQs are special because we only observe
  174. * rising edges as input to the VGIC. We therefore lower the line
  175. * level here, so that we can take new virtual IRQs. See
  176. * vgic_v2_fold_lr_state for more info.
  177. */
  178. if (vgic_irq_is_mapped_level(irq) && (val & GICH_LR_PENDING_BIT))
  179. irq->line_level = false;
  180. /* The GICv2 LR only holds five bits of priority. */
  181. val |= (irq->priority >> 3) << GICH_LR_PRIORITY_SHIFT;
  182. vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = val;
  183. }
  184. void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr)
  185. {
  186. vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = 0;
  187. }
  188. void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
  189. {
  190. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  191. u32 vmcr;
  192. vmcr = (vmcrp->grpen0 << GICH_VMCR_ENABLE_GRP0_SHIFT) &
  193. GICH_VMCR_ENABLE_GRP0_MASK;
  194. vmcr |= (vmcrp->grpen1 << GICH_VMCR_ENABLE_GRP1_SHIFT) &
  195. GICH_VMCR_ENABLE_GRP1_MASK;
  196. vmcr |= (vmcrp->ackctl << GICH_VMCR_ACK_CTL_SHIFT) &
  197. GICH_VMCR_ACK_CTL_MASK;
  198. vmcr |= (vmcrp->fiqen << GICH_VMCR_FIQ_EN_SHIFT) &
  199. GICH_VMCR_FIQ_EN_MASK;
  200. vmcr |= (vmcrp->cbpr << GICH_VMCR_CBPR_SHIFT) &
  201. GICH_VMCR_CBPR_MASK;
  202. vmcr |= (vmcrp->eoim << GICH_VMCR_EOI_MODE_SHIFT) &
  203. GICH_VMCR_EOI_MODE_MASK;
  204. vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) &
  205. GICH_VMCR_ALIAS_BINPOINT_MASK;
  206. vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
  207. GICH_VMCR_BINPOINT_MASK;
  208. vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) <<
  209. GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
  210. cpu_if->vgic_vmcr = vmcr;
  211. }
  212. void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
  213. {
  214. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  215. u32 vmcr;
  216. vmcr = cpu_if->vgic_vmcr;
  217. vmcrp->grpen0 = (vmcr & GICH_VMCR_ENABLE_GRP0_MASK) >>
  218. GICH_VMCR_ENABLE_GRP0_SHIFT;
  219. vmcrp->grpen1 = (vmcr & GICH_VMCR_ENABLE_GRP1_MASK) >>
  220. GICH_VMCR_ENABLE_GRP1_SHIFT;
  221. vmcrp->ackctl = (vmcr & GICH_VMCR_ACK_CTL_MASK) >>
  222. GICH_VMCR_ACK_CTL_SHIFT;
  223. vmcrp->fiqen = (vmcr & GICH_VMCR_FIQ_EN_MASK) >>
  224. GICH_VMCR_FIQ_EN_SHIFT;
  225. vmcrp->cbpr = (vmcr & GICH_VMCR_CBPR_MASK) >>
  226. GICH_VMCR_CBPR_SHIFT;
  227. vmcrp->eoim = (vmcr & GICH_VMCR_EOI_MODE_MASK) >>
  228. GICH_VMCR_EOI_MODE_SHIFT;
  229. vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >>
  230. GICH_VMCR_ALIAS_BINPOINT_SHIFT;
  231. vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
  232. GICH_VMCR_BINPOINT_SHIFT;
  233. vmcrp->pmr = ((vmcr & GICH_VMCR_PRIMASK_MASK) >>
  234. GICH_VMCR_PRIMASK_SHIFT) << GICV_PMR_PRIORITY_SHIFT;
  235. }
  236. void vgic_v2_enable(struct kvm_vcpu *vcpu)
  237. {
  238. /*
  239. * By forcing VMCR to zero, the GIC will restore the binary
  240. * points to their reset values. Anything else resets to zero
  241. * anyway.
  242. */
  243. vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
  244. /* Get the show on the road... */
  245. vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
  246. }
  247. /* check for overlapping regions and for regions crossing the end of memory */
  248. static bool vgic_v2_check_base(gpa_t dist_base, gpa_t cpu_base)
  249. {
  250. if (dist_base + KVM_VGIC_V2_DIST_SIZE < dist_base)
  251. return false;
  252. if (cpu_base + KVM_VGIC_V2_CPU_SIZE < cpu_base)
  253. return false;
  254. if (dist_base + KVM_VGIC_V2_DIST_SIZE <= cpu_base)
  255. return true;
  256. if (cpu_base + KVM_VGIC_V2_CPU_SIZE <= dist_base)
  257. return true;
  258. return false;
  259. }
  260. int vgic_v2_map_resources(struct kvm *kvm)
  261. {
  262. struct vgic_dist *dist = &kvm->arch.vgic;
  263. int ret = 0;
  264. if (vgic_ready(kvm))
  265. goto out;
  266. if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
  267. IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
  268. kvm_err("Need to set vgic cpu and dist addresses first\n");
  269. ret = -ENXIO;
  270. goto out;
  271. }
  272. if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) {
  273. kvm_err("VGIC CPU and dist frames overlap\n");
  274. ret = -EINVAL;
  275. goto out;
  276. }
  277. /*
  278. * Initialize the vgic if this hasn't already been done on demand by
  279. * accessing the vgic state from userspace.
  280. */
  281. ret = vgic_init(kvm);
  282. if (ret) {
  283. kvm_err("Unable to initialize VGIC dynamic data structures\n");
  284. goto out;
  285. }
  286. ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2);
  287. if (ret) {
  288. kvm_err("Unable to register VGIC MMIO regions\n");
  289. goto out;
  290. }
  291. if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
  292. ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
  293. kvm_vgic_global_state.vcpu_base,
  294. KVM_VGIC_V2_CPU_SIZE, true);
  295. if (ret) {
  296. kvm_err("Unable to remap VGIC CPU to VCPU\n");
  297. goto out;
  298. }
  299. }
  300. dist->ready = true;
  301. out:
  302. return ret;
  303. }
  304. DEFINE_STATIC_KEY_FALSE(vgic_v2_cpuif_trap);
  305. /**
  306. * vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT
  307. * @node: pointer to the DT node
  308. *
  309. * Returns 0 if a GICv2 has been found, returns an error code otherwise
  310. */
  311. int vgic_v2_probe(const struct gic_kvm_info *info)
  312. {
  313. int ret;
  314. u32 vtr;
  315. if (!info->vctrl.start) {
  316. kvm_err("GICH not present in the firmware table\n");
  317. return -ENXIO;
  318. }
  319. if (!PAGE_ALIGNED(info->vcpu.start) ||
  320. !PAGE_ALIGNED(resource_size(&info->vcpu))) {
  321. kvm_info("GICV region size/alignment is unsafe, using trapping (reduced performance)\n");
  322. ret = create_hyp_io_mappings(info->vcpu.start,
  323. resource_size(&info->vcpu),
  324. &kvm_vgic_global_state.vcpu_base_va,
  325. &kvm_vgic_global_state.vcpu_hyp_va);
  326. if (ret) {
  327. kvm_err("Cannot map GICV into hyp\n");
  328. goto out;
  329. }
  330. static_branch_enable(&vgic_v2_cpuif_trap);
  331. }
  332. ret = create_hyp_io_mappings(info->vctrl.start,
  333. resource_size(&info->vctrl),
  334. &kvm_vgic_global_state.vctrl_base,
  335. &kvm_vgic_global_state.vctrl_hyp);
  336. if (ret) {
  337. kvm_err("Cannot map VCTRL into hyp\n");
  338. goto out;
  339. }
  340. vtr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VTR);
  341. kvm_vgic_global_state.nr_lr = (vtr & 0x3f) + 1;
  342. ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
  343. if (ret) {
  344. kvm_err("Cannot register GICv2 KVM device\n");
  345. goto out;
  346. }
  347. kvm_vgic_global_state.can_emulate_gicv2 = true;
  348. kvm_vgic_global_state.vcpu_base = info->vcpu.start;
  349. kvm_vgic_global_state.type = VGIC_V2;
  350. kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS;
  351. kvm_debug("vgic-v2@%llx\n", info->vctrl.start);
  352. return 0;
  353. out:
  354. if (kvm_vgic_global_state.vctrl_base)
  355. iounmap(kvm_vgic_global_state.vctrl_base);
  356. if (kvm_vgic_global_state.vcpu_base_va)
  357. iounmap(kvm_vgic_global_state.vcpu_base_va);
  358. return ret;
  359. }
  360. static void save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
  361. {
  362. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  363. u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
  364. u64 elrsr;
  365. int i;
  366. elrsr = readl_relaxed(base + GICH_ELRSR0);
  367. if (unlikely(used_lrs > 32))
  368. elrsr |= ((u64)readl_relaxed(base + GICH_ELRSR1)) << 32;
  369. for (i = 0; i < used_lrs; i++) {
  370. if (elrsr & (1UL << i))
  371. cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
  372. else
  373. cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
  374. writel_relaxed(0, base + GICH_LR0 + (i * 4));
  375. }
  376. }
  377. void vgic_v2_save_state(struct kvm_vcpu *vcpu)
  378. {
  379. void __iomem *base = kvm_vgic_global_state.vctrl_base;
  380. u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
  381. if (!base)
  382. return;
  383. if (used_lrs) {
  384. save_lrs(vcpu, base);
  385. writel_relaxed(0, base + GICH_HCR);
  386. }
  387. }
  388. void vgic_v2_restore_state(struct kvm_vcpu *vcpu)
  389. {
  390. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  391. void __iomem *base = kvm_vgic_global_state.vctrl_base;
  392. u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
  393. int i;
  394. if (!base)
  395. return;
  396. if (used_lrs) {
  397. writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
  398. for (i = 0; i < used_lrs; i++) {
  399. writel_relaxed(cpu_if->vgic_lr[i],
  400. base + GICH_LR0 + (i * 4));
  401. }
  402. }
  403. }
  404. void vgic_v2_load(struct kvm_vcpu *vcpu)
  405. {
  406. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  407. writel_relaxed(cpu_if->vgic_vmcr,
  408. kvm_vgic_global_state.vctrl_base + GICH_VMCR);
  409. writel_relaxed(cpu_if->vgic_apr,
  410. kvm_vgic_global_state.vctrl_base + GICH_APR);
  411. }
  412. void vgic_v2_put(struct kvm_vcpu *vcpu)
  413. {
  414. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  415. cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
  416. cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);
  417. }