vgic.c 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035
  1. /*
  2. * Copyright (C) 2012 ARM Ltd.
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  17. */
  18. #include <linux/cpu.h>
  19. #include <linux/kvm.h>
  20. #include <linux/kvm_host.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/io.h>
  23. #include <linux/of.h>
  24. #include <linux/of_address.h>
  25. #include <linux/of_irq.h>
  26. #include <linux/uaccess.h>
  27. #include <linux/irqchip/arm-gic.h>
  28. #include <asm/kvm_emulate.h>
  29. #include <asm/kvm_arm.h>
  30. #include <asm/kvm_mmu.h>
  31. /*
  32. * How the whole thing works (courtesy of Christoffer Dall):
  33. *
  34. * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
  35. * something is pending
  36. * - VGIC pending interrupts are stored on the vgic.irq_state vgic
  37. * bitmap (this bitmap is updated by both user land ioctls and guest
  38. * mmio ops, and other in-kernel peripherals such as the
  39. * arch. timers) and indicate the 'wire' state.
  40. * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
  41. * recalculated
  42. * - To calculate the oracle, we need info for each cpu from
  43. * compute_pending_for_cpu, which considers:
  44. * - PPI: dist->irq_state & dist->irq_enable
  45. * - SPI: dist->irq_state & dist->irq_enable & dist->irq_spi_target
  46. * - irq_spi_target is a 'formatted' version of the GICD_ICFGR
  47. * registers, stored on each vcpu. We only keep one bit of
  48. * information per interrupt, making sure that only one vcpu can
  49. * accept the interrupt.
  50. * - The same is true when injecting an interrupt, except that we only
  51. * consider a single interrupt at a time. The irq_spi_cpu array
  52. * contains the target CPU for each SPI.
  53. *
  54. * The handling of level interrupts adds some extra complexity. We
  55. * need to track when the interrupt has been EOIed, so we can sample
  56. * the 'line' again. This is achieved as such:
  57. *
  58. * - When a level interrupt is moved onto a vcpu, the corresponding
  59. * bit in irq_active is set. As long as this bit is set, the line
  60. * will be ignored for further interrupts. The interrupt is injected
  61. * into the vcpu with the GICH_LR_EOI bit set (generate a
  62. * maintenance interrupt on EOI).
  63. * - When the interrupt is EOIed, the maintenance interrupt fires,
  64. * and clears the corresponding bit in irq_active. This allow the
  65. * interrupt line to be sampled again.
  66. */
  67. #define VGIC_ADDR_UNDEF (-1)
  68. #define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
  69. #define PRODUCT_ID_KVM 0x4b /* ASCII code K */
  70. #define IMPLEMENTER_ARM 0x43b
  71. #define GICC_ARCH_VERSION_V2 0x2
  72. /* Physical address of vgic virtual cpu interface */
  73. static phys_addr_t vgic_vcpu_base;
  74. /* Virtual control interface base address */
  75. static void __iomem *vgic_vctrl_base;
  76. static struct device_node *vgic_node;
  77. #define ACCESS_READ_VALUE (1 << 0)
  78. #define ACCESS_READ_RAZ (0 << 0)
  79. #define ACCESS_READ_MASK(x) ((x) & (1 << 0))
  80. #define ACCESS_WRITE_IGNORED (0 << 1)
  81. #define ACCESS_WRITE_SETBIT (1 << 1)
  82. #define ACCESS_WRITE_CLEARBIT (2 << 1)
  83. #define ACCESS_WRITE_VALUE (3 << 1)
  84. #define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
  85. static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
  86. static void vgic_update_state(struct kvm *kvm);
  87. static void vgic_kick_vcpus(struct kvm *kvm);
  88. static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
  89. static u32 vgic_nr_lr;
  90. static unsigned int vgic_maint_irq;
  91. static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
  92. int cpuid, u32 offset)
  93. {
  94. offset >>= 2;
  95. if (!offset)
  96. return x->percpu[cpuid].reg;
  97. else
  98. return x->shared.reg + offset - 1;
  99. }
  100. static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
  101. int cpuid, int irq)
  102. {
  103. if (irq < VGIC_NR_PRIVATE_IRQS)
  104. return test_bit(irq, x->percpu[cpuid].reg_ul);
  105. return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared.reg_ul);
  106. }
  107. static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
  108. int irq, int val)
  109. {
  110. unsigned long *reg;
  111. if (irq < VGIC_NR_PRIVATE_IRQS) {
  112. reg = x->percpu[cpuid].reg_ul;
  113. } else {
  114. reg = x->shared.reg_ul;
  115. irq -= VGIC_NR_PRIVATE_IRQS;
  116. }
  117. if (val)
  118. set_bit(irq, reg);
  119. else
  120. clear_bit(irq, reg);
  121. }
  122. static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
  123. {
  124. if (unlikely(cpuid >= VGIC_MAX_CPUS))
  125. return NULL;
  126. return x->percpu[cpuid].reg_ul;
  127. }
  128. static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
  129. {
  130. return x->shared.reg_ul;
  131. }
  132. static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
  133. {
  134. offset >>= 2;
  135. BUG_ON(offset > (VGIC_NR_IRQS / 4));
  136. if (offset < 8)
  137. return x->percpu[cpuid] + offset;
  138. else
  139. return x->shared + offset - 8;
  140. }
  141. #define VGIC_CFG_LEVEL 0
  142. #define VGIC_CFG_EDGE 1
  143. static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq)
  144. {
  145. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  146. int irq_val;
  147. irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq);
  148. return irq_val == VGIC_CFG_EDGE;
  149. }
  150. static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq)
  151. {
  152. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  153. return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq);
  154. }
  155. static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq)
  156. {
  157. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  158. return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq);
  159. }
  160. static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq)
  161. {
  162. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  163. vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1);
  164. }
  165. static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq)
  166. {
  167. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  168. vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0);
  169. }
  170. static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
  171. {
  172. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  173. return vgic_bitmap_get_irq_val(&dist->irq_state, vcpu->vcpu_id, irq);
  174. }
  175. static void vgic_dist_irq_set(struct kvm_vcpu *vcpu, int irq)
  176. {
  177. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  178. vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 1);
  179. }
  180. static void vgic_dist_irq_clear(struct kvm_vcpu *vcpu, int irq)
  181. {
  182. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  183. vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 0);
  184. }
  185. static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
  186. {
  187. if (irq < VGIC_NR_PRIVATE_IRQS)
  188. set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
  189. else
  190. set_bit(irq - VGIC_NR_PRIVATE_IRQS,
  191. vcpu->arch.vgic_cpu.pending_shared);
  192. }
  193. static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
  194. {
  195. if (irq < VGIC_NR_PRIVATE_IRQS)
  196. clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
  197. else
  198. clear_bit(irq - VGIC_NR_PRIVATE_IRQS,
  199. vcpu->arch.vgic_cpu.pending_shared);
  200. }
  201. static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
  202. {
  203. return *((u32 *)mmio->data) & mask;
  204. }
  205. static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
  206. {
  207. *((u32 *)mmio->data) = value & mask;
  208. }
  209. /**
  210. * vgic_reg_access - access vgic register
  211. * @mmio: pointer to the data describing the mmio access
  212. * @reg: pointer to the virtual backing of vgic distributor data
  213. * @offset: least significant 2 bits used for word offset
  214. * @mode: ACCESS_ mode (see defines above)
  215. *
  216. * Helper to make vgic register access easier using one of the access
  217. * modes defined for vgic register access
  218. * (read,raz,write-ignored,setbit,clearbit,write)
  219. */
  220. static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
  221. phys_addr_t offset, int mode)
  222. {
  223. int word_offset = (offset & 3) * 8;
  224. u32 mask = (1UL << (mmio->len * 8)) - 1;
  225. u32 regval;
  226. /*
  227. * Any alignment fault should have been delivered to the guest
  228. * directly (ARM ARM B3.12.7 "Prioritization of aborts").
  229. */
  230. if (reg) {
  231. regval = *reg;
  232. } else {
  233. BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED));
  234. regval = 0;
  235. }
  236. if (mmio->is_write) {
  237. u32 data = mmio_data_read(mmio, mask) << word_offset;
  238. switch (ACCESS_WRITE_MASK(mode)) {
  239. case ACCESS_WRITE_IGNORED:
  240. return;
  241. case ACCESS_WRITE_SETBIT:
  242. regval |= data;
  243. break;
  244. case ACCESS_WRITE_CLEARBIT:
  245. regval &= ~data;
  246. break;
  247. case ACCESS_WRITE_VALUE:
  248. regval = (regval & ~(mask << word_offset)) | data;
  249. break;
  250. }
  251. *reg = regval;
  252. } else {
  253. switch (ACCESS_READ_MASK(mode)) {
  254. case ACCESS_READ_RAZ:
  255. regval = 0;
  256. /* fall through */
  257. case ACCESS_READ_VALUE:
  258. mmio_data_write(mmio, mask, regval >> word_offset);
  259. }
  260. }
  261. }
  262. static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
  263. struct kvm_exit_mmio *mmio, phys_addr_t offset)
  264. {
  265. u32 reg;
  266. u32 word_offset = offset & 3;
  267. switch (offset & ~3) {
  268. case 0: /* GICD_CTLR */
  269. reg = vcpu->kvm->arch.vgic.enabled;
  270. vgic_reg_access(mmio, &reg, word_offset,
  271. ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
  272. if (mmio->is_write) {
  273. vcpu->kvm->arch.vgic.enabled = reg & 1;
  274. vgic_update_state(vcpu->kvm);
  275. return true;
  276. }
  277. break;
  278. case 4: /* GICD_TYPER */
  279. reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
  280. reg |= (VGIC_NR_IRQS >> 5) - 1;
  281. vgic_reg_access(mmio, &reg, word_offset,
  282. ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
  283. break;
  284. case 8: /* GICD_IIDR */
  285. reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
  286. vgic_reg_access(mmio, &reg, word_offset,
  287. ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
  288. break;
  289. }
  290. return false;
  291. }
  292. static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
  293. struct kvm_exit_mmio *mmio, phys_addr_t offset)
  294. {
  295. vgic_reg_access(mmio, NULL, offset,
  296. ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
  297. return false;
  298. }
  299. static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
  300. struct kvm_exit_mmio *mmio,
  301. phys_addr_t offset)
  302. {
  303. u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
  304. vcpu->vcpu_id, offset);
  305. vgic_reg_access(mmio, reg, offset,
  306. ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
  307. if (mmio->is_write) {
  308. vgic_update_state(vcpu->kvm);
  309. return true;
  310. }
  311. return false;
  312. }
  313. static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
  314. struct kvm_exit_mmio *mmio,
  315. phys_addr_t offset)
  316. {
  317. u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
  318. vcpu->vcpu_id, offset);
  319. vgic_reg_access(mmio, reg, offset,
  320. ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
  321. if (mmio->is_write) {
  322. if (offset < 4) /* Force SGI enabled */
  323. *reg |= 0xffff;
  324. vgic_retire_disabled_irqs(vcpu);
  325. vgic_update_state(vcpu->kvm);
  326. return true;
  327. }
  328. return false;
  329. }
  330. static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
  331. struct kvm_exit_mmio *mmio,
  332. phys_addr_t offset)
  333. {
  334. u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state,
  335. vcpu->vcpu_id, offset);
  336. vgic_reg_access(mmio, reg, offset,
  337. ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
  338. if (mmio->is_write) {
  339. vgic_update_state(vcpu->kvm);
  340. return true;
  341. }
  342. return false;
  343. }
  344. static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
  345. struct kvm_exit_mmio *mmio,
  346. phys_addr_t offset)
  347. {
  348. u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state,
  349. vcpu->vcpu_id, offset);
  350. vgic_reg_access(mmio, reg, offset,
  351. ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
  352. if (mmio->is_write) {
  353. vgic_update_state(vcpu->kvm);
  354. return true;
  355. }
  356. return false;
  357. }
  358. static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
  359. struct kvm_exit_mmio *mmio,
  360. phys_addr_t offset)
  361. {
  362. u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
  363. vcpu->vcpu_id, offset);
  364. vgic_reg_access(mmio, reg, offset,
  365. ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
  366. return false;
  367. }
  368. #define GICD_ITARGETSR_SIZE 32
  369. #define GICD_CPUTARGETS_BITS 8
  370. #define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
  371. static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
  372. {
  373. struct vgic_dist *dist = &kvm->arch.vgic;
  374. int i;
  375. u32 val = 0;
  376. irq -= VGIC_NR_PRIVATE_IRQS;
  377. for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
  378. val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
  379. return val;
  380. }
  381. static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
  382. {
  383. struct vgic_dist *dist = &kvm->arch.vgic;
  384. struct kvm_vcpu *vcpu;
  385. int i, c;
  386. unsigned long *bmap;
  387. u32 target;
  388. irq -= VGIC_NR_PRIVATE_IRQS;
  389. /*
  390. * Pick the LSB in each byte. This ensures we target exactly
  391. * one vcpu per IRQ. If the byte is null, assume we target
  392. * CPU0.
  393. */
  394. for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
  395. int shift = i * GICD_CPUTARGETS_BITS;
  396. target = ffs((val >> shift) & 0xffU);
  397. target = target ? (target - 1) : 0;
  398. dist->irq_spi_cpu[irq + i] = target;
  399. kvm_for_each_vcpu(c, vcpu, kvm) {
  400. bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
  401. if (c == target)
  402. set_bit(irq + i, bmap);
  403. else
  404. clear_bit(irq + i, bmap);
  405. }
  406. }
  407. }
  408. static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
  409. struct kvm_exit_mmio *mmio,
  410. phys_addr_t offset)
  411. {
  412. u32 reg;
  413. /* We treat the banked interrupts targets as read-only */
  414. if (offset < 32) {
  415. u32 roreg = 1 << vcpu->vcpu_id;
  416. roreg |= roreg << 8;
  417. roreg |= roreg << 16;
  418. vgic_reg_access(mmio, &roreg, offset,
  419. ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
  420. return false;
  421. }
  422. reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
  423. vgic_reg_access(mmio, &reg, offset,
  424. ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
  425. if (mmio->is_write) {
  426. vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
  427. vgic_update_state(vcpu->kvm);
  428. return true;
  429. }
  430. return false;
  431. }
  432. static u32 vgic_cfg_expand(u16 val)
  433. {
  434. u32 res = 0;
  435. int i;
  436. /*
  437. * Turn a 16bit value like abcd...mnop into a 32bit word
  438. * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
  439. */
  440. for (i = 0; i < 16; i++)
  441. res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1);
  442. return res;
  443. }
  444. static u16 vgic_cfg_compress(u32 val)
  445. {
  446. u16 res = 0;
  447. int i;
  448. /*
  449. * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
  450. * abcd...mnop which is what we really care about.
  451. */
  452. for (i = 0; i < 16; i++)
  453. res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i;
  454. return res;
  455. }
  456. /*
  457. * The distributor uses 2 bits per IRQ for the CFG register, but the
  458. * LSB is always 0. As such, we only keep the upper bit, and use the
  459. * two above functions to compress/expand the bits
  460. */
  461. static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
  462. struct kvm_exit_mmio *mmio, phys_addr_t offset)
  463. {
  464. u32 val;
  465. u32 *reg;
  466. reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
  467. vcpu->vcpu_id, offset >> 1);
  468. if (offset & 4)
  469. val = *reg >> 16;
  470. else
  471. val = *reg & 0xffff;
  472. val = vgic_cfg_expand(val);
  473. vgic_reg_access(mmio, &val, offset,
  474. ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
  475. if (mmio->is_write) {
  476. if (offset < 8) {
  477. *reg = ~0U; /* Force PPIs/SGIs to 1 */
  478. return false;
  479. }
  480. val = vgic_cfg_compress(val);
  481. if (offset & 4) {
  482. *reg &= 0xffff;
  483. *reg |= val << 16;
  484. } else {
  485. *reg &= 0xffff << 16;
  486. *reg |= val;
  487. }
  488. }
  489. return false;
  490. }
  491. static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
  492. struct kvm_exit_mmio *mmio, phys_addr_t offset)
  493. {
  494. u32 reg;
  495. vgic_reg_access(mmio, &reg, offset,
  496. ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
  497. if (mmio->is_write) {
  498. vgic_dispatch_sgi(vcpu, reg);
  499. vgic_update_state(vcpu->kvm);
  500. return true;
  501. }
  502. return false;
  503. }
  504. #define LR_CPUID(lr) \
  505. (((lr) & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT)
  506. #define LR_IRQID(lr) \
  507. ((lr) & GICH_LR_VIRTUALID)
  508. static void vgic_retire_lr(int lr_nr, int irq, struct vgic_cpu *vgic_cpu)
  509. {
  510. clear_bit(lr_nr, vgic_cpu->lr_used);
  511. vgic_cpu->vgic_lr[lr_nr] &= ~GICH_LR_STATE;
  512. vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
  513. }
  514. /**
  515. * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
  516. * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
  517. *
  518. * Move any pending IRQs that have already been assigned to LRs back to the
  519. * emulated distributor state so that the complete emulated state can be read
  520. * from the main emulation structures without investigating the LRs.
  521. *
  522. * Note that IRQs in the active state in the LRs get their pending state moved
  523. * to the distributor but the active state stays in the LRs, because we don't
  524. * track the active state on the distributor side.
  525. */
  526. static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
  527. {
  528. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  529. struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
  530. int vcpu_id = vcpu->vcpu_id;
  531. int i, irq, source_cpu;
  532. u32 *lr;
  533. for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
  534. lr = &vgic_cpu->vgic_lr[i];
  535. irq = LR_IRQID(*lr);
  536. source_cpu = LR_CPUID(*lr);
  537. /*
  538. * There are three options for the state bits:
  539. *
  540. * 01: pending
  541. * 10: active
  542. * 11: pending and active
  543. *
  544. * If the LR holds only an active interrupt (not pending) then
  545. * just leave it alone.
  546. */
  547. if ((*lr & GICH_LR_STATE) == GICH_LR_ACTIVE_BIT)
  548. continue;
  549. /*
  550. * Reestablish the pending state on the distributor and the
  551. * CPU interface. It may have already been pending, but that
  552. * is fine, then we are only setting a few bits that were
  553. * already set.
  554. */
  555. vgic_dist_irq_set(vcpu, irq);
  556. if (irq < VGIC_NR_SGIS)
  557. dist->irq_sgi_sources[vcpu_id][irq] |= 1 << source_cpu;
  558. *lr &= ~GICH_LR_PENDING_BIT;
  559. /*
  560. * If there's no state left on the LR (it could still be
  561. * active), then the LR does not hold any useful info and can
  562. * be marked as free for other use.
  563. */
  564. if (!(*lr & GICH_LR_STATE))
  565. vgic_retire_lr(i, irq, vgic_cpu);
  566. /* Finally update the VGIC state. */
  567. vgic_update_state(vcpu->kvm);
  568. }
  569. }
  570. /* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
  571. static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
  572. struct kvm_exit_mmio *mmio,
  573. phys_addr_t offset)
  574. {
  575. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  576. int sgi;
  577. int min_sgi = (offset & ~0x3) * 4;
  578. int max_sgi = min_sgi + 3;
  579. int vcpu_id = vcpu->vcpu_id;
  580. u32 reg = 0;
  581. /* Copy source SGIs from distributor side */
  582. for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
  583. int shift = 8 * (sgi - min_sgi);
  584. reg |= (u32)dist->irq_sgi_sources[vcpu_id][sgi] << shift;
  585. }
  586. mmio_data_write(mmio, ~0, reg);
  587. return false;
  588. }
  589. static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
  590. struct kvm_exit_mmio *mmio,
  591. phys_addr_t offset, bool set)
  592. {
  593. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  594. int sgi;
  595. int min_sgi = (offset & ~0x3) * 4;
  596. int max_sgi = min_sgi + 3;
  597. int vcpu_id = vcpu->vcpu_id;
  598. u32 reg;
  599. bool updated = false;
  600. reg = mmio_data_read(mmio, ~0);
  601. /* Clear pending SGIs on the distributor */
  602. for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
  603. u8 mask = reg >> (8 * (sgi - min_sgi));
  604. if (set) {
  605. if ((dist->irq_sgi_sources[vcpu_id][sgi] & mask) != mask)
  606. updated = true;
  607. dist->irq_sgi_sources[vcpu_id][sgi] |= mask;
  608. } else {
  609. if (dist->irq_sgi_sources[vcpu_id][sgi] & mask)
  610. updated = true;
  611. dist->irq_sgi_sources[vcpu_id][sgi] &= ~mask;
  612. }
  613. }
  614. if (updated)
  615. vgic_update_state(vcpu->kvm);
  616. return updated;
  617. }
  618. static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
  619. struct kvm_exit_mmio *mmio,
  620. phys_addr_t offset)
  621. {
  622. if (!mmio->is_write)
  623. return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
  624. else
  625. return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
  626. }
  627. static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
  628. struct kvm_exit_mmio *mmio,
  629. phys_addr_t offset)
  630. {
  631. if (!mmio->is_write)
  632. return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
  633. else
  634. return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
  635. }
  636. /*
  637. * I would have liked to use the kvm_bus_io_*() API instead, but it
  638. * cannot cope with banked registers (only the VM pointer is passed
  639. * around, and we need the vcpu). One of these days, someone please
  640. * fix it!
  641. */
  642. struct mmio_range {
  643. phys_addr_t base;
  644. unsigned long len;
  645. bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
  646. phys_addr_t offset);
  647. };
  648. static const struct mmio_range vgic_dist_ranges[] = {
  649. {
  650. .base = GIC_DIST_CTRL,
  651. .len = 12,
  652. .handle_mmio = handle_mmio_misc,
  653. },
  654. {
  655. .base = GIC_DIST_IGROUP,
  656. .len = VGIC_NR_IRQS / 8,
  657. .handle_mmio = handle_mmio_raz_wi,
  658. },
  659. {
  660. .base = GIC_DIST_ENABLE_SET,
  661. .len = VGIC_NR_IRQS / 8,
  662. .handle_mmio = handle_mmio_set_enable_reg,
  663. },
  664. {
  665. .base = GIC_DIST_ENABLE_CLEAR,
  666. .len = VGIC_NR_IRQS / 8,
  667. .handle_mmio = handle_mmio_clear_enable_reg,
  668. },
  669. {
  670. .base = GIC_DIST_PENDING_SET,
  671. .len = VGIC_NR_IRQS / 8,
  672. .handle_mmio = handle_mmio_set_pending_reg,
  673. },
  674. {
  675. .base = GIC_DIST_PENDING_CLEAR,
  676. .len = VGIC_NR_IRQS / 8,
  677. .handle_mmio = handle_mmio_clear_pending_reg,
  678. },
  679. {
  680. .base = GIC_DIST_ACTIVE_SET,
  681. .len = VGIC_NR_IRQS / 8,
  682. .handle_mmio = handle_mmio_raz_wi,
  683. },
  684. {
  685. .base = GIC_DIST_ACTIVE_CLEAR,
  686. .len = VGIC_NR_IRQS / 8,
  687. .handle_mmio = handle_mmio_raz_wi,
  688. },
  689. {
  690. .base = GIC_DIST_PRI,
  691. .len = VGIC_NR_IRQS,
  692. .handle_mmio = handle_mmio_priority_reg,
  693. },
  694. {
  695. .base = GIC_DIST_TARGET,
  696. .len = VGIC_NR_IRQS,
  697. .handle_mmio = handle_mmio_target_reg,
  698. },
  699. {
  700. .base = GIC_DIST_CONFIG,
  701. .len = VGIC_NR_IRQS / 4,
  702. .handle_mmio = handle_mmio_cfg_reg,
  703. },
  704. {
  705. .base = GIC_DIST_SOFTINT,
  706. .len = 4,
  707. .handle_mmio = handle_mmio_sgi_reg,
  708. },
  709. {
  710. .base = GIC_DIST_SGI_PENDING_CLEAR,
  711. .len = VGIC_NR_SGIS,
  712. .handle_mmio = handle_mmio_sgi_clear,
  713. },
  714. {
  715. .base = GIC_DIST_SGI_PENDING_SET,
  716. .len = VGIC_NR_SGIS,
  717. .handle_mmio = handle_mmio_sgi_set,
  718. },
  719. {}
  720. };
  721. static const
  722. struct mmio_range *find_matching_range(const struct mmio_range *ranges,
  723. struct kvm_exit_mmio *mmio,
  724. phys_addr_t offset)
  725. {
  726. const struct mmio_range *r = ranges;
  727. while (r->len) {
  728. if (offset >= r->base &&
  729. (offset + mmio->len) <= (r->base + r->len))
  730. return r;
  731. r++;
  732. }
  733. return NULL;
  734. }
  735. /**
  736. * vgic_handle_mmio - handle an in-kernel MMIO access
  737. * @vcpu: pointer to the vcpu performing the access
  738. * @run: pointer to the kvm_run structure
  739. * @mmio: pointer to the data describing the access
  740. *
  741. * returns true if the MMIO access has been performed in kernel space,
  742. * and false if it needs to be emulated in user space.
  743. */
  744. bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
  745. struct kvm_exit_mmio *mmio)
  746. {
  747. const struct mmio_range *range;
  748. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  749. unsigned long base = dist->vgic_dist_base;
  750. bool updated_state;
  751. unsigned long offset;
  752. if (!irqchip_in_kernel(vcpu->kvm) ||
  753. mmio->phys_addr < base ||
  754. (mmio->phys_addr + mmio->len) > (base + KVM_VGIC_V2_DIST_SIZE))
  755. return false;
  756. /* We don't support ldrd / strd or ldm / stm to the emulated vgic */
  757. if (mmio->len > 4) {
  758. kvm_inject_dabt(vcpu, mmio->phys_addr);
  759. return true;
  760. }
  761. offset = mmio->phys_addr - base;
  762. range = find_matching_range(vgic_dist_ranges, mmio, offset);
  763. if (unlikely(!range || !range->handle_mmio)) {
  764. pr_warn("Unhandled access %d %08llx %d\n",
  765. mmio->is_write, mmio->phys_addr, mmio->len);
  766. return false;
  767. }
  768. spin_lock(&vcpu->kvm->arch.vgic.lock);
  769. offset = mmio->phys_addr - range->base - base;
  770. updated_state = range->handle_mmio(vcpu, mmio, offset);
  771. spin_unlock(&vcpu->kvm->arch.vgic.lock);
  772. kvm_prepare_mmio(run, mmio);
  773. kvm_handle_mmio_return(vcpu, run);
  774. if (updated_state)
  775. vgic_kick_vcpus(vcpu->kvm);
  776. return true;
  777. }
  778. static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
  779. {
  780. struct kvm *kvm = vcpu->kvm;
  781. struct vgic_dist *dist = &kvm->arch.vgic;
  782. int nrcpus = atomic_read(&kvm->online_vcpus);
  783. u8 target_cpus;
  784. int sgi, mode, c, vcpu_id;
  785. vcpu_id = vcpu->vcpu_id;
  786. sgi = reg & 0xf;
  787. target_cpus = (reg >> 16) & 0xff;
  788. mode = (reg >> 24) & 3;
  789. switch (mode) {
  790. case 0:
  791. if (!target_cpus)
  792. return;
  793. break;
  794. case 1:
  795. target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
  796. break;
  797. case 2:
  798. target_cpus = 1 << vcpu_id;
  799. break;
  800. }
  801. kvm_for_each_vcpu(c, vcpu, kvm) {
  802. if (target_cpus & 1) {
  803. /* Flag the SGI as pending */
  804. vgic_dist_irq_set(vcpu, sgi);
  805. dist->irq_sgi_sources[c][sgi] |= 1 << vcpu_id;
  806. kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
  807. }
  808. target_cpus >>= 1;
  809. }
  810. }
  811. static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
  812. {
  813. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  814. unsigned long *pending, *enabled, *pend_percpu, *pend_shared;
  815. unsigned long pending_private, pending_shared;
  816. int vcpu_id;
  817. vcpu_id = vcpu->vcpu_id;
  818. pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
  819. pend_shared = vcpu->arch.vgic_cpu.pending_shared;
  820. pending = vgic_bitmap_get_cpu_map(&dist->irq_state, vcpu_id);
  821. enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
  822. bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
  823. pending = vgic_bitmap_get_shared_map(&dist->irq_state);
  824. enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
  825. bitmap_and(pend_shared, pending, enabled, VGIC_NR_SHARED_IRQS);
  826. bitmap_and(pend_shared, pend_shared,
  827. vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
  828. VGIC_NR_SHARED_IRQS);
  829. pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS);
  830. pending_shared = find_first_bit(pend_shared, VGIC_NR_SHARED_IRQS);
  831. return (pending_private < VGIC_NR_PRIVATE_IRQS ||
  832. pending_shared < VGIC_NR_SHARED_IRQS);
  833. }
  834. /*
  835. * Update the interrupt state and determine which CPUs have pending
  836. * interrupts. Must be called with distributor lock held.
  837. */
  838. static void vgic_update_state(struct kvm *kvm)
  839. {
  840. struct vgic_dist *dist = &kvm->arch.vgic;
  841. struct kvm_vcpu *vcpu;
  842. int c;
  843. if (!dist->enabled) {
  844. set_bit(0, &dist->irq_pending_on_cpu);
  845. return;
  846. }
  847. kvm_for_each_vcpu(c, vcpu, kvm) {
  848. if (compute_pending_for_cpu(vcpu)) {
  849. pr_debug("CPU%d has pending interrupts\n", c);
  850. set_bit(c, &dist->irq_pending_on_cpu);
  851. }
  852. }
  853. }
  854. #define MK_LR_PEND(src, irq) \
  855. (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq))
  856. /*
  857. * An interrupt may have been disabled after being made pending on the
  858. * CPU interface (the classic case is a timer running while we're
  859. * rebooting the guest - the interrupt would kick as soon as the CPU
  860. * interface gets enabled, with deadly consequences).
  861. *
  862. * The solution is to examine already active LRs, and check the
  863. * interrupt is still enabled. If not, just retire it.
  864. */
  865. static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
  866. {
  867. struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
  868. int lr;
  869. for_each_set_bit(lr, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
  870. int irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
  871. if (!vgic_irq_is_enabled(vcpu, irq)) {
  872. vgic_retire_lr(lr, irq, vgic_cpu);
  873. if (vgic_irq_is_active(vcpu, irq))
  874. vgic_irq_clear_active(vcpu, irq);
  875. }
  876. }
  877. }
  878. /*
  879. * Queue an interrupt to a CPU virtual interface. Return true on success,
  880. * or false if it wasn't possible to queue it.
  881. */
  882. static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
  883. {
  884. struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
  885. int lr;
  886. /* Sanitize the input... */
  887. BUG_ON(sgi_source_id & ~7);
  888. BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS);
  889. BUG_ON(irq >= VGIC_NR_IRQS);
  890. kvm_debug("Queue IRQ%d\n", irq);
  891. lr = vgic_cpu->vgic_irq_lr_map[irq];
  892. /* Do we have an active interrupt for the same CPUID? */
  893. if (lr != LR_EMPTY &&
  894. (LR_CPUID(vgic_cpu->vgic_lr[lr]) == sgi_source_id)) {
  895. kvm_debug("LR%d piggyback for IRQ%d %x\n",
  896. lr, irq, vgic_cpu->vgic_lr[lr]);
  897. BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
  898. vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT;
  899. return true;
  900. }
  901. /* Try to use another LR for this interrupt */
  902. lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
  903. vgic_cpu->nr_lr);
  904. if (lr >= vgic_cpu->nr_lr)
  905. return false;
  906. kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
  907. vgic_cpu->vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq);
  908. vgic_cpu->vgic_irq_lr_map[irq] = lr;
  909. set_bit(lr, vgic_cpu->lr_used);
  910. if (!vgic_irq_is_edge(vcpu, irq))
  911. vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI;
  912. return true;
  913. }
  914. static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq)
  915. {
  916. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  917. unsigned long sources;
  918. int vcpu_id = vcpu->vcpu_id;
  919. int c;
  920. sources = dist->irq_sgi_sources[vcpu_id][irq];
  921. for_each_set_bit(c, &sources, VGIC_MAX_CPUS) {
  922. if (vgic_queue_irq(vcpu, c, irq))
  923. clear_bit(c, &sources);
  924. }
  925. dist->irq_sgi_sources[vcpu_id][irq] = sources;
  926. /*
  927. * If the sources bitmap has been cleared it means that we
  928. * could queue all the SGIs onto link registers (see the
  929. * clear_bit above), and therefore we are done with them in
  930. * our emulated gic and can get rid of them.
  931. */
  932. if (!sources) {
  933. vgic_dist_irq_clear(vcpu, irq);
  934. vgic_cpu_irq_clear(vcpu, irq);
  935. return true;
  936. }
  937. return false;
  938. }
  939. static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
  940. {
  941. if (vgic_irq_is_active(vcpu, irq))
  942. return true; /* level interrupt, already queued */
  943. if (vgic_queue_irq(vcpu, 0, irq)) {
  944. if (vgic_irq_is_edge(vcpu, irq)) {
  945. vgic_dist_irq_clear(vcpu, irq);
  946. vgic_cpu_irq_clear(vcpu, irq);
  947. } else {
  948. vgic_irq_set_active(vcpu, irq);
  949. }
  950. return true;
  951. }
  952. return false;
  953. }
  954. /*
  955. * Fill the list registers with pending interrupts before running the
  956. * guest.
  957. */
  958. static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
  959. {
  960. struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
  961. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  962. int i, vcpu_id;
  963. int overflow = 0;
  964. vcpu_id = vcpu->vcpu_id;
  965. /*
  966. * We may not have any pending interrupt, or the interrupts
  967. * may have been serviced from another vcpu. In all cases,
  968. * move along.
  969. */
  970. if (!kvm_vgic_vcpu_pending_irq(vcpu)) {
  971. pr_debug("CPU%d has no pending interrupt\n", vcpu_id);
  972. goto epilog;
  973. }
  974. /* SGIs */
  975. for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) {
  976. if (!vgic_queue_sgi(vcpu, i))
  977. overflow = 1;
  978. }
  979. /* PPIs */
  980. for_each_set_bit_from(i, vgic_cpu->pending_percpu, VGIC_NR_PRIVATE_IRQS) {
  981. if (!vgic_queue_hwirq(vcpu, i))
  982. overflow = 1;
  983. }
  984. /* SPIs */
  985. for_each_set_bit(i, vgic_cpu->pending_shared, VGIC_NR_SHARED_IRQS) {
  986. if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
  987. overflow = 1;
  988. }
  989. epilog:
  990. if (overflow) {
  991. vgic_cpu->vgic_hcr |= GICH_HCR_UIE;
  992. } else {
  993. vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE;
  994. /*
  995. * We're about to run this VCPU, and we've consumed
  996. * everything the distributor had in store for
  997. * us. Claim we don't have anything pending. We'll
  998. * adjust that if needed while exiting.
  999. */
  1000. clear_bit(vcpu_id, &dist->irq_pending_on_cpu);
  1001. }
  1002. }
  1003. static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
  1004. {
  1005. struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
  1006. bool level_pending = false;
  1007. kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr);
  1008. if (vgic_cpu->vgic_misr & GICH_MISR_EOI) {
  1009. /*
  1010. * Some level interrupts have been EOIed. Clear their
  1011. * active bit.
  1012. */
  1013. int lr, irq;
  1014. for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_eisr,
  1015. vgic_cpu->nr_lr) {
  1016. irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
  1017. vgic_irq_clear_active(vcpu, irq);
  1018. vgic_cpu->vgic_lr[lr] &= ~GICH_LR_EOI;
  1019. /* Any additional pending interrupt? */
  1020. if (vgic_dist_irq_is_pending(vcpu, irq)) {
  1021. vgic_cpu_irq_set(vcpu, irq);
  1022. level_pending = true;
  1023. } else {
  1024. vgic_cpu_irq_clear(vcpu, irq);
  1025. }
  1026. /*
  1027. * Despite being EOIed, the LR may not have
  1028. * been marked as empty.
  1029. */
  1030. set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr);
  1031. vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT;
  1032. }
  1033. }
  1034. if (vgic_cpu->vgic_misr & GICH_MISR_U)
  1035. vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE;
  1036. return level_pending;
  1037. }
  1038. /*
  1039. * Sync back the VGIC state after a guest run. The distributor lock is
  1040. * needed so we don't get preempted in the middle of the state processing.
  1041. */
  1042. static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
  1043. {
  1044. struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
  1045. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  1046. int lr, pending;
  1047. bool level_pending;
  1048. level_pending = vgic_process_maintenance(vcpu);
  1049. /* Clear mappings for empty LRs */
  1050. for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr,
  1051. vgic_cpu->nr_lr) {
  1052. int irq;
  1053. if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
  1054. continue;
  1055. irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
  1056. BUG_ON(irq >= VGIC_NR_IRQS);
  1057. vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
  1058. }
  1059. /* Check if we still have something up our sleeve... */
  1060. pending = find_first_zero_bit((unsigned long *)vgic_cpu->vgic_elrsr,
  1061. vgic_cpu->nr_lr);
  1062. if (level_pending || pending < vgic_cpu->nr_lr)
  1063. set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
  1064. }
  1065. void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
  1066. {
  1067. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  1068. if (!irqchip_in_kernel(vcpu->kvm))
  1069. return;
  1070. spin_lock(&dist->lock);
  1071. __kvm_vgic_flush_hwstate(vcpu);
  1072. spin_unlock(&dist->lock);
  1073. }
  1074. void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
  1075. {
  1076. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  1077. if (!irqchip_in_kernel(vcpu->kvm))
  1078. return;
  1079. spin_lock(&dist->lock);
  1080. __kvm_vgic_sync_hwstate(vcpu);
  1081. spin_unlock(&dist->lock);
  1082. }
  1083. int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
  1084. {
  1085. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  1086. if (!irqchip_in_kernel(vcpu->kvm))
  1087. return 0;
  1088. return test_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
  1089. }
  1090. static void vgic_kick_vcpus(struct kvm *kvm)
  1091. {
  1092. struct kvm_vcpu *vcpu;
  1093. int c;
  1094. /*
  1095. * We've injected an interrupt, time to find out who deserves
  1096. * a good kick...
  1097. */
  1098. kvm_for_each_vcpu(c, vcpu, kvm) {
  1099. if (kvm_vgic_vcpu_pending_irq(vcpu))
  1100. kvm_vcpu_kick(vcpu);
  1101. }
  1102. }
  1103. static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
  1104. {
  1105. int is_edge = vgic_irq_is_edge(vcpu, irq);
  1106. int state = vgic_dist_irq_is_pending(vcpu, irq);
  1107. /*
  1108. * Only inject an interrupt if:
  1109. * - edge triggered and we have a rising edge
  1110. * - level triggered and we change level
  1111. */
  1112. if (is_edge)
  1113. return level > state;
  1114. else
  1115. return level != state;
  1116. }
  1117. static bool vgic_update_irq_state(struct kvm *kvm, int cpuid,
  1118. unsigned int irq_num, bool level)
  1119. {
  1120. struct vgic_dist *dist = &kvm->arch.vgic;
  1121. struct kvm_vcpu *vcpu;
  1122. int is_edge, is_level;
  1123. int enabled;
  1124. bool ret = true;
  1125. spin_lock(&dist->lock);
  1126. vcpu = kvm_get_vcpu(kvm, cpuid);
  1127. is_edge = vgic_irq_is_edge(vcpu, irq_num);
  1128. is_level = !is_edge;
  1129. if (!vgic_validate_injection(vcpu, irq_num, level)) {
  1130. ret = false;
  1131. goto out;
  1132. }
  1133. if (irq_num >= VGIC_NR_PRIVATE_IRQS) {
  1134. cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS];
  1135. vcpu = kvm_get_vcpu(kvm, cpuid);
  1136. }
  1137. kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid);
  1138. if (level)
  1139. vgic_dist_irq_set(vcpu, irq_num);
  1140. else
  1141. vgic_dist_irq_clear(vcpu, irq_num);
  1142. enabled = vgic_irq_is_enabled(vcpu, irq_num);
  1143. if (!enabled) {
  1144. ret = false;
  1145. goto out;
  1146. }
  1147. if (is_level && vgic_irq_is_active(vcpu, irq_num)) {
  1148. /*
  1149. * Level interrupt in progress, will be picked up
  1150. * when EOId.
  1151. */
  1152. ret = false;
  1153. goto out;
  1154. }
  1155. if (level) {
  1156. vgic_cpu_irq_set(vcpu, irq_num);
  1157. set_bit(cpuid, &dist->irq_pending_on_cpu);
  1158. }
  1159. out:
  1160. spin_unlock(&dist->lock);
  1161. return ret;
  1162. }
  1163. /**
  1164. * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
  1165. * @kvm: The VM structure pointer
  1166. * @cpuid: The CPU for PPIs
  1167. * @irq_num: The IRQ number that is assigned to the device
  1168. * @level: Edge-triggered: true: to trigger the interrupt
  1169. * false: to ignore the call
  1170. * Level-sensitive true: activates an interrupt
  1171. * false: deactivates an interrupt
  1172. *
  1173. * The GIC is not concerned with devices being active-LOW or active-HIGH for
  1174. * level-sensitive interrupts. You can think of the level parameter as 1
  1175. * being HIGH and 0 being LOW and all devices being active-HIGH.
  1176. */
  1177. int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
  1178. bool level)
  1179. {
  1180. if (vgic_update_irq_state(kvm, cpuid, irq_num, level))
  1181. vgic_kick_vcpus(kvm);
  1182. return 0;
  1183. }
  1184. static irqreturn_t vgic_maintenance_handler(int irq, void *data)
  1185. {
  1186. /*
  1187. * We cannot rely on the vgic maintenance interrupt to be
  1188. * delivered synchronously. This means we can only use it to
  1189. * exit the VM, and we perform the handling of EOIed
  1190. * interrupts on the exit path (see vgic_process_maintenance).
  1191. */
  1192. return IRQ_HANDLED;
  1193. }
  1194. /**
  1195. * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state
  1196. * @vcpu: pointer to the vcpu struct
  1197. *
  1198. * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to
  1199. * this vcpu and enable the VGIC for this VCPU
  1200. */
  1201. int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
  1202. {
  1203. struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
  1204. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  1205. int i;
  1206. if (vcpu->vcpu_id >= VGIC_MAX_CPUS)
  1207. return -EBUSY;
  1208. for (i = 0; i < VGIC_NR_IRQS; i++) {
  1209. if (i < VGIC_NR_PPIS)
  1210. vgic_bitmap_set_irq_val(&dist->irq_enabled,
  1211. vcpu->vcpu_id, i, 1);
  1212. if (i < VGIC_NR_PRIVATE_IRQS)
  1213. vgic_bitmap_set_irq_val(&dist->irq_cfg,
  1214. vcpu->vcpu_id, i, VGIC_CFG_EDGE);
  1215. vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
  1216. }
  1217. /*
  1218. * By forcing VMCR to zero, the GIC will restore the binary
  1219. * points to their reset values. Anything else resets to zero
  1220. * anyway.
  1221. */
  1222. vgic_cpu->vgic_vmcr = 0;
  1223. vgic_cpu->nr_lr = vgic_nr_lr;
  1224. vgic_cpu->vgic_hcr = GICH_HCR_EN; /* Get the show on the road... */
  1225. return 0;
  1226. }
  1227. static void vgic_init_maintenance_interrupt(void *info)
  1228. {
  1229. enable_percpu_irq(vgic_maint_irq, 0);
  1230. }
  1231. static int vgic_cpu_notify(struct notifier_block *self,
  1232. unsigned long action, void *cpu)
  1233. {
  1234. switch (action) {
  1235. case CPU_STARTING:
  1236. case CPU_STARTING_FROZEN:
  1237. vgic_init_maintenance_interrupt(NULL);
  1238. break;
  1239. case CPU_DYING:
  1240. case CPU_DYING_FROZEN:
  1241. disable_percpu_irq(vgic_maint_irq);
  1242. break;
  1243. }
  1244. return NOTIFY_OK;
  1245. }
  1246. static struct notifier_block vgic_cpu_nb = {
  1247. .notifier_call = vgic_cpu_notify,
  1248. };
  1249. int kvm_vgic_hyp_init(void)
  1250. {
  1251. int ret;
  1252. struct resource vctrl_res;
  1253. struct resource vcpu_res;
  1254. vgic_node = of_find_compatible_node(NULL, NULL, "arm,cortex-a15-gic");
  1255. if (!vgic_node) {
  1256. kvm_err("error: no compatible vgic node in DT\n");
  1257. return -ENODEV;
  1258. }
  1259. vgic_maint_irq = irq_of_parse_and_map(vgic_node, 0);
  1260. if (!vgic_maint_irq) {
  1261. kvm_err("error getting vgic maintenance irq from DT\n");
  1262. ret = -ENXIO;
  1263. goto out;
  1264. }
  1265. ret = request_percpu_irq(vgic_maint_irq, vgic_maintenance_handler,
  1266. "vgic", kvm_get_running_vcpus());
  1267. if (ret) {
  1268. kvm_err("Cannot register interrupt %d\n", vgic_maint_irq);
  1269. goto out;
  1270. }
  1271. ret = __register_cpu_notifier(&vgic_cpu_nb);
  1272. if (ret) {
  1273. kvm_err("Cannot register vgic CPU notifier\n");
  1274. goto out_free_irq;
  1275. }
  1276. ret = of_address_to_resource(vgic_node, 2, &vctrl_res);
  1277. if (ret) {
  1278. kvm_err("Cannot obtain VCTRL resource\n");
  1279. goto out_free_irq;
  1280. }
  1281. vgic_vctrl_base = of_iomap(vgic_node, 2);
  1282. if (!vgic_vctrl_base) {
  1283. kvm_err("Cannot ioremap VCTRL\n");
  1284. ret = -ENOMEM;
  1285. goto out_free_irq;
  1286. }
  1287. vgic_nr_lr = readl_relaxed(vgic_vctrl_base + GICH_VTR);
  1288. vgic_nr_lr = (vgic_nr_lr & 0x3f) + 1;
  1289. ret = create_hyp_io_mappings(vgic_vctrl_base,
  1290. vgic_vctrl_base + resource_size(&vctrl_res),
  1291. vctrl_res.start);
  1292. if (ret) {
  1293. kvm_err("Cannot map VCTRL into hyp\n");
  1294. goto out_unmap;
  1295. }
  1296. if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
  1297. kvm_err("Cannot obtain VCPU resource\n");
  1298. ret = -ENXIO;
  1299. goto out_unmap;
  1300. }
  1301. if (!PAGE_ALIGNED(vcpu_res.start)) {
  1302. kvm_err("GICV physical address 0x%llx not page aligned\n",
  1303. (unsigned long long)vcpu_res.start);
  1304. ret = -ENXIO;
  1305. goto out_unmap;
  1306. }
  1307. if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
  1308. kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
  1309. (unsigned long long)resource_size(&vcpu_res),
  1310. PAGE_SIZE);
  1311. ret = -ENXIO;
  1312. goto out_unmap;
  1313. }
  1314. vgic_vcpu_base = vcpu_res.start;
  1315. kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
  1316. vctrl_res.start, vgic_maint_irq);
  1317. on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
  1318. goto out;
  1319. out_unmap:
  1320. iounmap(vgic_vctrl_base);
  1321. out_free_irq:
  1322. free_percpu_irq(vgic_maint_irq, kvm_get_running_vcpus());
  1323. out:
  1324. of_node_put(vgic_node);
  1325. return ret;
  1326. }
  1327. /**
  1328. * kvm_vgic_init - Initialize global VGIC state before running any VCPUs
  1329. * @kvm: pointer to the kvm struct
  1330. *
  1331. * Map the virtual CPU interface into the VM before running any VCPUs. We
  1332. * can't do this at creation time, because user space must first set the
  1333. * virtual CPU interface address in the guest physical address space. Also
  1334. * initialize the ITARGETSRn regs to 0 on the emulated distributor.
  1335. */
  1336. int kvm_vgic_init(struct kvm *kvm)
  1337. {
  1338. int ret = 0, i;
  1339. if (!irqchip_in_kernel(kvm))
  1340. return 0;
  1341. mutex_lock(&kvm->lock);
  1342. if (vgic_initialized(kvm))
  1343. goto out;
  1344. if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
  1345. IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) {
  1346. kvm_err("Need to set vgic cpu and dist addresses first\n");
  1347. ret = -ENXIO;
  1348. goto out;
  1349. }
  1350. ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
  1351. vgic_vcpu_base, KVM_VGIC_V2_CPU_SIZE);
  1352. if (ret) {
  1353. kvm_err("Unable to remap VGIC CPU to VCPU\n");
  1354. goto out;
  1355. }
  1356. for (i = VGIC_NR_PRIVATE_IRQS; i < VGIC_NR_IRQS; i += 4)
  1357. vgic_set_target_reg(kvm, 0, i);
  1358. kvm->arch.vgic.ready = true;
  1359. out:
  1360. mutex_unlock(&kvm->lock);
  1361. return ret;
  1362. }
  1363. int kvm_vgic_create(struct kvm *kvm)
  1364. {
  1365. int i, vcpu_lock_idx = -1, ret = 0;
  1366. struct kvm_vcpu *vcpu;
  1367. mutex_lock(&kvm->lock);
  1368. if (kvm->arch.vgic.vctrl_base) {
  1369. ret = -EEXIST;
  1370. goto out;
  1371. }
  1372. /*
  1373. * Any time a vcpu is run, vcpu_load is called which tries to grab the
  1374. * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
  1375. * that no other VCPUs are run while we create the vgic.
  1376. */
  1377. kvm_for_each_vcpu(i, vcpu, kvm) {
  1378. if (!mutex_trylock(&vcpu->mutex))
  1379. goto out_unlock;
  1380. vcpu_lock_idx = i;
  1381. }
  1382. kvm_for_each_vcpu(i, vcpu, kvm) {
  1383. if (vcpu->arch.has_run_once) {
  1384. ret = -EBUSY;
  1385. goto out_unlock;
  1386. }
  1387. }
  1388. spin_lock_init(&kvm->arch.vgic.lock);
  1389. kvm->arch.vgic.vctrl_base = vgic_vctrl_base;
  1390. kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
  1391. kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
  1392. out_unlock:
  1393. for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
  1394. vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
  1395. mutex_unlock(&vcpu->mutex);
  1396. }
  1397. out:
  1398. mutex_unlock(&kvm->lock);
  1399. return ret;
  1400. }
  1401. static bool vgic_ioaddr_overlap(struct kvm *kvm)
  1402. {
  1403. phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
  1404. phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;
  1405. if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu))
  1406. return 0;
  1407. if ((dist <= cpu && dist + KVM_VGIC_V2_DIST_SIZE > cpu) ||
  1408. (cpu <= dist && cpu + KVM_VGIC_V2_CPU_SIZE > dist))
  1409. return -EBUSY;
  1410. return 0;
  1411. }
  1412. static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
  1413. phys_addr_t addr, phys_addr_t size)
  1414. {
  1415. int ret;
  1416. if (addr & ~KVM_PHYS_MASK)
  1417. return -E2BIG;
  1418. if (addr & (SZ_4K - 1))
  1419. return -EINVAL;
  1420. if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
  1421. return -EEXIST;
  1422. if (addr + size < addr)
  1423. return -EINVAL;
  1424. *ioaddr = addr;
  1425. ret = vgic_ioaddr_overlap(kvm);
  1426. if (ret)
  1427. *ioaddr = VGIC_ADDR_UNDEF;
  1428. return ret;
  1429. }
  1430. /**
  1431. * kvm_vgic_addr - set or get vgic VM base addresses
  1432. * @kvm: pointer to the vm struct
  1433. * @type: the VGIC addr type, one of KVM_VGIC_V2_ADDR_TYPE_XXX
  1434. * @addr: pointer to address value
  1435. * @write: if true set the address in the VM address space, if false read the
  1436. * address
  1437. *
  1438. * Set or get the vgic base addresses for the distributor and the virtual CPU
  1439. * interface in the VM physical address space. These addresses are properties
  1440. * of the emulated core/SoC and therefore user space initially knows this
  1441. * information.
  1442. */
  1443. int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
  1444. {
  1445. int r = 0;
  1446. struct vgic_dist *vgic = &kvm->arch.vgic;
  1447. mutex_lock(&kvm->lock);
  1448. switch (type) {
  1449. case KVM_VGIC_V2_ADDR_TYPE_DIST:
  1450. if (write) {
  1451. r = vgic_ioaddr_assign(kvm, &vgic->vgic_dist_base,
  1452. *addr, KVM_VGIC_V2_DIST_SIZE);
  1453. } else {
  1454. *addr = vgic->vgic_dist_base;
  1455. }
  1456. break;
  1457. case KVM_VGIC_V2_ADDR_TYPE_CPU:
  1458. if (write) {
  1459. r = vgic_ioaddr_assign(kvm, &vgic->vgic_cpu_base,
  1460. *addr, KVM_VGIC_V2_CPU_SIZE);
  1461. } else {
  1462. *addr = vgic->vgic_cpu_base;
  1463. }
  1464. break;
  1465. default:
  1466. r = -ENODEV;
  1467. }
  1468. mutex_unlock(&kvm->lock);
  1469. return r;
  1470. }
  1471. static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
  1472. struct kvm_exit_mmio *mmio, phys_addr_t offset)
  1473. {
  1474. struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
  1475. u32 reg, mask = 0, shift = 0;
  1476. bool updated = false;
  1477. switch (offset & ~0x3) {
  1478. case GIC_CPU_CTRL:
  1479. mask = GICH_VMCR_CTRL_MASK;
  1480. shift = GICH_VMCR_CTRL_SHIFT;
  1481. break;
  1482. case GIC_CPU_PRIMASK:
  1483. mask = GICH_VMCR_PRIMASK_MASK;
  1484. shift = GICH_VMCR_PRIMASK_SHIFT;
  1485. break;
  1486. case GIC_CPU_BINPOINT:
  1487. mask = GICH_VMCR_BINPOINT_MASK;
  1488. shift = GICH_VMCR_BINPOINT_SHIFT;
  1489. break;
  1490. case GIC_CPU_ALIAS_BINPOINT:
  1491. mask = GICH_VMCR_ALIAS_BINPOINT_MASK;
  1492. shift = GICH_VMCR_ALIAS_BINPOINT_SHIFT;
  1493. break;
  1494. }
  1495. if (!mmio->is_write) {
  1496. reg = (vgic_cpu->vgic_vmcr & mask) >> shift;
  1497. mmio_data_write(mmio, ~0, reg);
  1498. } else {
  1499. reg = mmio_data_read(mmio, ~0);
  1500. reg = (reg << shift) & mask;
  1501. if (reg != (vgic_cpu->vgic_vmcr & mask))
  1502. updated = true;
  1503. vgic_cpu->vgic_vmcr &= ~mask;
  1504. vgic_cpu->vgic_vmcr |= reg;
  1505. }
  1506. return updated;
  1507. }
  1508. static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
  1509. struct kvm_exit_mmio *mmio, phys_addr_t offset)
  1510. {
  1511. return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
  1512. }
  1513. static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
  1514. struct kvm_exit_mmio *mmio,
  1515. phys_addr_t offset)
  1516. {
  1517. u32 reg;
  1518. if (mmio->is_write)
  1519. return false;
  1520. /* GICC_IIDR */
  1521. reg = (PRODUCT_ID_KVM << 20) |
  1522. (GICC_ARCH_VERSION_V2 << 16) |
  1523. (IMPLEMENTER_ARM << 0);
  1524. mmio_data_write(mmio, ~0, reg);
  1525. return false;
  1526. }
  1527. /*
  1528. * CPU Interface Register accesses - these are not accessed by the VM, but by
  1529. * user space for saving and restoring VGIC state.
  1530. */
  1531. static const struct mmio_range vgic_cpu_ranges[] = {
  1532. {
  1533. .base = GIC_CPU_CTRL,
  1534. .len = 12,
  1535. .handle_mmio = handle_cpu_mmio_misc,
  1536. },
  1537. {
  1538. .base = GIC_CPU_ALIAS_BINPOINT,
  1539. .len = 4,
  1540. .handle_mmio = handle_mmio_abpr,
  1541. },
  1542. {
  1543. .base = GIC_CPU_ACTIVEPRIO,
  1544. .len = 16,
  1545. .handle_mmio = handle_mmio_raz_wi,
  1546. },
  1547. {
  1548. .base = GIC_CPU_IDENT,
  1549. .len = 4,
  1550. .handle_mmio = handle_cpu_mmio_ident,
  1551. },
  1552. };
  1553. static int vgic_attr_regs_access(struct kvm_device *dev,
  1554. struct kvm_device_attr *attr,
  1555. u32 *reg, bool is_write)
  1556. {
  1557. const struct mmio_range *r = NULL, *ranges;
  1558. phys_addr_t offset;
  1559. int ret, cpuid, c;
  1560. struct kvm_vcpu *vcpu, *tmp_vcpu;
  1561. struct vgic_dist *vgic;
  1562. struct kvm_exit_mmio mmio;
  1563. offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
  1564. cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
  1565. KVM_DEV_ARM_VGIC_CPUID_SHIFT;
  1566. mutex_lock(&dev->kvm->lock);
  1567. if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
  1568. ret = -EINVAL;
  1569. goto out;
  1570. }
  1571. vcpu = kvm_get_vcpu(dev->kvm, cpuid);
  1572. vgic = &dev->kvm->arch.vgic;
  1573. mmio.len = 4;
  1574. mmio.is_write = is_write;
  1575. if (is_write)
  1576. mmio_data_write(&mmio, ~0, *reg);
  1577. switch (attr->group) {
  1578. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  1579. mmio.phys_addr = vgic->vgic_dist_base + offset;
  1580. ranges = vgic_dist_ranges;
  1581. break;
  1582. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
  1583. mmio.phys_addr = vgic->vgic_cpu_base + offset;
  1584. ranges = vgic_cpu_ranges;
  1585. break;
  1586. default:
  1587. BUG();
  1588. }
  1589. r = find_matching_range(ranges, &mmio, offset);
  1590. if (unlikely(!r || !r->handle_mmio)) {
  1591. ret = -ENXIO;
  1592. goto out;
  1593. }
  1594. spin_lock(&vgic->lock);
  1595. /*
  1596. * Ensure that no other VCPU is running by checking the vcpu->cpu
  1597. * field. If no other VPCUs are running we can safely access the VGIC
  1598. * state, because even if another VPU is run after this point, that
  1599. * VCPU will not touch the vgic state, because it will block on
  1600. * getting the vgic->lock in kvm_vgic_sync_hwstate().
  1601. */
  1602. kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
  1603. if (unlikely(tmp_vcpu->cpu != -1)) {
  1604. ret = -EBUSY;
  1605. goto out_vgic_unlock;
  1606. }
  1607. }
  1608. /*
  1609. * Move all pending IRQs from the LRs on all VCPUs so the pending
  1610. * state can be properly represented in the register state accessible
  1611. * through this API.
  1612. */
  1613. kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
  1614. vgic_unqueue_irqs(tmp_vcpu);
  1615. offset -= r->base;
  1616. r->handle_mmio(vcpu, &mmio, offset);
  1617. if (!is_write)
  1618. *reg = mmio_data_read(&mmio, ~0);
  1619. ret = 0;
  1620. out_vgic_unlock:
  1621. spin_unlock(&vgic->lock);
  1622. out:
  1623. mutex_unlock(&dev->kvm->lock);
  1624. return ret;
  1625. }
  1626. static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
  1627. {
  1628. int r;
  1629. switch (attr->group) {
  1630. case KVM_DEV_ARM_VGIC_GRP_ADDR: {
  1631. u64 __user *uaddr = (u64 __user *)(long)attr->addr;
  1632. u64 addr;
  1633. unsigned long type = (unsigned long)attr->attr;
  1634. if (copy_from_user(&addr, uaddr, sizeof(addr)))
  1635. return -EFAULT;
  1636. r = kvm_vgic_addr(dev->kvm, type, &addr, true);
  1637. return (r == -ENODEV) ? -ENXIO : r;
  1638. }
  1639. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  1640. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
  1641. u32 __user *uaddr = (u32 __user *)(long)attr->addr;
  1642. u32 reg;
  1643. if (get_user(reg, uaddr))
  1644. return -EFAULT;
  1645. return vgic_attr_regs_access(dev, attr, &reg, true);
  1646. }
  1647. }
  1648. return -ENXIO;
  1649. }
  1650. static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
  1651. {
  1652. int r = -ENXIO;
  1653. switch (attr->group) {
  1654. case KVM_DEV_ARM_VGIC_GRP_ADDR: {
  1655. u64 __user *uaddr = (u64 __user *)(long)attr->addr;
  1656. u64 addr;
  1657. unsigned long type = (unsigned long)attr->attr;
  1658. r = kvm_vgic_addr(dev->kvm, type, &addr, false);
  1659. if (r)
  1660. return (r == -ENODEV) ? -ENXIO : r;
  1661. if (copy_to_user(uaddr, &addr, sizeof(addr)))
  1662. return -EFAULT;
  1663. break;
  1664. }
  1665. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  1666. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
  1667. u32 __user *uaddr = (u32 __user *)(long)attr->addr;
  1668. u32 reg = 0;
  1669. r = vgic_attr_regs_access(dev, attr, &reg, false);
  1670. if (r)
  1671. return r;
  1672. r = put_user(reg, uaddr);
  1673. break;
  1674. }
  1675. }
  1676. return r;
  1677. }
  1678. static int vgic_has_attr_regs(const struct mmio_range *ranges,
  1679. phys_addr_t offset)
  1680. {
  1681. struct kvm_exit_mmio dev_attr_mmio;
  1682. dev_attr_mmio.len = 4;
  1683. if (find_matching_range(ranges, &dev_attr_mmio, offset))
  1684. return 0;
  1685. else
  1686. return -ENXIO;
  1687. }
  1688. static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
  1689. {
  1690. phys_addr_t offset;
  1691. switch (attr->group) {
  1692. case KVM_DEV_ARM_VGIC_GRP_ADDR:
  1693. switch (attr->attr) {
  1694. case KVM_VGIC_V2_ADDR_TYPE_DIST:
  1695. case KVM_VGIC_V2_ADDR_TYPE_CPU:
  1696. return 0;
  1697. }
  1698. break;
  1699. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  1700. offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
  1701. return vgic_has_attr_regs(vgic_dist_ranges, offset);
  1702. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
  1703. offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
  1704. return vgic_has_attr_regs(vgic_cpu_ranges, offset);
  1705. }
  1706. return -ENXIO;
  1707. }
  1708. static void vgic_destroy(struct kvm_device *dev)
  1709. {
  1710. kfree(dev);
  1711. }
  1712. static int vgic_create(struct kvm_device *dev, u32 type)
  1713. {
  1714. return kvm_vgic_create(dev->kvm);
  1715. }
  1716. struct kvm_device_ops kvm_arm_vgic_v2_ops = {
  1717. .name = "kvm-arm-vgic",
  1718. .create = vgic_create,
  1719. .destroy = vgic_destroy,
  1720. .set_attr = vgic_set_attr,
  1721. .get_attr = vgic_get_attr,
  1722. .has_attr = vgic_has_attr,
  1723. };