vgic.c 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458
  1. /*
  2. * Copyright (C) 2012 ARM Ltd.
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  17. */
  18. #include <linux/cpu.h>
  19. #include <linux/kvm.h>
  20. #include <linux/kvm_host.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/io.h>
  23. #include <linux/of.h>
  24. #include <linux/of_address.h>
  25. #include <linux/of_irq.h>
  26. #include <linux/rculist.h>
  27. #include <linux/uaccess.h>
  28. #include <asm/kvm_emulate.h>
  29. #include <asm/kvm_arm.h>
  30. #include <asm/kvm_mmu.h>
  31. #include <trace/events/kvm.h>
  32. #include <asm/kvm.h>
  33. #include <kvm/iodev.h>
  34. /*
  35. * How the whole thing works (courtesy of Christoffer Dall):
  36. *
  37. * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
  38. * something is pending on the CPU interface.
  39. * - Interrupts that are pending on the distributor are stored on the
  40. * vgic.irq_pending vgic bitmap (this bitmap is updated by both user land
  41. * ioctls and guest mmio ops, and other in-kernel peripherals such as the
  42. * arch. timers).
  43. * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
  44. * recalculated
  45. * - To calculate the oracle, we need info for each cpu from
  46. * compute_pending_for_cpu, which considers:
  47. * - PPI: dist->irq_pending & dist->irq_enable
  48. * - SPI: dist->irq_pending & dist->irq_enable & dist->irq_spi_target
  49. * - irq_spi_target is a 'formatted' version of the GICD_ITARGETSRn
  50. * registers, stored on each vcpu. We only keep one bit of
  51. * information per interrupt, making sure that only one vcpu can
  52. * accept the interrupt.
  53. * - If any of the above state changes, we must recalculate the oracle.
  54. * - The same is true when injecting an interrupt, except that we only
  55. * consider a single interrupt at a time. The irq_spi_cpu array
  56. * contains the target CPU for each SPI.
  57. *
  58. * The handling of level interrupts adds some extra complexity. We
  59. * need to track when the interrupt has been EOIed, so we can sample
  60. * the 'line' again. This is achieved as such:
  61. *
  62. * - When a level interrupt is moved onto a vcpu, the corresponding
  63. * bit in irq_queued is set. As long as this bit is set, the line
  64. * will be ignored for further interrupts. The interrupt is injected
  65. * into the vcpu with the GICH_LR_EOI bit set (generate a
  66. * maintenance interrupt on EOI).
  67. * - When the interrupt is EOIed, the maintenance interrupt fires,
  68. * and clears the corresponding bit in irq_queued. This allows the
  69. * interrupt line to be sampled again.
  70. * - Note that level-triggered interrupts can also be set to pending from
  71. * writes to GICD_ISPENDRn and lowering the external input line does not
  72. * cause the interrupt to become inactive in such a situation.
  73. * Conversely, writes to GICD_ICPENDRn do not cause the interrupt to become
  74. * inactive as long as the external input line is held high.
  75. *
  76. *
  77. * Initialization rules: there are multiple stages to the vgic
  78. * initialization, both for the distributor and the CPU interfaces.
  79. *
  80. * Distributor:
  81. *
  82. * - kvm_vgic_early_init(): initialization of static data that doesn't
  83. * depend on any sizing information or emulation type. No allocation
  84. * is allowed there.
  85. *
  86. * - vgic_init(): allocation and initialization of the generic data
  87. * structures that depend on sizing information (number of CPUs,
  88. * number of interrupts). Also initializes the vcpu specific data
  89. * structures. Can be executed lazily for GICv2.
  90. * [to be renamed to kvm_vgic_init??]
  91. *
  92. * CPU Interface:
  93. *
  94. * - kvm_vgic_cpu_early_init(): initialization of static data that
  95. * doesn't depend on any sizing information or emulation type. No
  96. * allocation is allowed there.
  97. */
  98. #include "vgic.h"
  99. static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
  100. static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
  101. static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
  102. static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
  103. static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
  104. int virt_irq);
  105. static const struct vgic_ops *vgic_ops;
  106. static const struct vgic_params *vgic;
  107. static void add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
  108. {
  109. vcpu->kvm->arch.vgic.vm_ops.add_sgi_source(vcpu, irq, source);
  110. }
  111. static bool queue_sgi(struct kvm_vcpu *vcpu, int irq)
  112. {
  113. return vcpu->kvm->arch.vgic.vm_ops.queue_sgi(vcpu, irq);
  114. }
  115. int kvm_vgic_map_resources(struct kvm *kvm)
  116. {
  117. return kvm->arch.vgic.vm_ops.map_resources(kvm, vgic);
  118. }
  119. /*
  120. * struct vgic_bitmap contains a bitmap made of unsigned longs, but
  121. * extracts u32s out of them.
  122. *
  123. * This does not work on 64-bit BE systems, because the bitmap access
  124. * will store two consecutive 32-bit words with the higher-addressed
  125. * register's bits at the lower index and the lower-addressed register's
  126. * bits at the higher index.
  127. *
  128. * Therefore, swizzle the register index when accessing the 32-bit word
  129. * registers to access the right register's value.
  130. */
  131. #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 64
  132. #define REG_OFFSET_SWIZZLE 1
  133. #else
  134. #define REG_OFFSET_SWIZZLE 0
  135. #endif
  136. static int vgic_init_bitmap(struct vgic_bitmap *b, int nr_cpus, int nr_irqs)
  137. {
  138. int nr_longs;
  139. nr_longs = nr_cpus + BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
  140. b->private = kzalloc(sizeof(unsigned long) * nr_longs, GFP_KERNEL);
  141. if (!b->private)
  142. return -ENOMEM;
  143. b->shared = b->private + nr_cpus;
  144. return 0;
  145. }
  146. static void vgic_free_bitmap(struct vgic_bitmap *b)
  147. {
  148. kfree(b->private);
  149. b->private = NULL;
  150. b->shared = NULL;
  151. }
  152. /*
  153. * Call this function to convert a u64 value to an unsigned long * bitmask
  154. * in a way that works on both 32-bit and 64-bit LE and BE platforms.
  155. *
  156. * Warning: Calling this function may modify *val.
  157. */
  158. static unsigned long *u64_to_bitmask(u64 *val)
  159. {
  160. #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
  161. *val = (*val >> 32) | (*val << 32);
  162. #endif
  163. return (unsigned long *)val;
  164. }
  165. u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, int cpuid, u32 offset)
  166. {
  167. offset >>= 2;
  168. if (!offset)
  169. return (u32 *)(x->private + cpuid) + REG_OFFSET_SWIZZLE;
  170. else
  171. return (u32 *)(x->shared) + ((offset - 1) ^ REG_OFFSET_SWIZZLE);
  172. }
  173. static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
  174. int cpuid, int irq)
  175. {
  176. if (irq < VGIC_NR_PRIVATE_IRQS)
  177. return test_bit(irq, x->private + cpuid);
  178. return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared);
  179. }
  180. void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
  181. int irq, int val)
  182. {
  183. unsigned long *reg;
  184. if (irq < VGIC_NR_PRIVATE_IRQS) {
  185. reg = x->private + cpuid;
  186. } else {
  187. reg = x->shared;
  188. irq -= VGIC_NR_PRIVATE_IRQS;
  189. }
  190. if (val)
  191. set_bit(irq, reg);
  192. else
  193. clear_bit(irq, reg);
  194. }
  195. static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
  196. {
  197. return x->private + cpuid;
  198. }
  199. unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
  200. {
  201. return x->shared;
  202. }
  203. static int vgic_init_bytemap(struct vgic_bytemap *x, int nr_cpus, int nr_irqs)
  204. {
  205. int size;
  206. size = nr_cpus * VGIC_NR_PRIVATE_IRQS;
  207. size += nr_irqs - VGIC_NR_PRIVATE_IRQS;
  208. x->private = kzalloc(size, GFP_KERNEL);
  209. if (!x->private)
  210. return -ENOMEM;
  211. x->shared = x->private + nr_cpus * VGIC_NR_PRIVATE_IRQS / sizeof(u32);
  212. return 0;
  213. }
  214. static void vgic_free_bytemap(struct vgic_bytemap *b)
  215. {
  216. kfree(b->private);
  217. b->private = NULL;
  218. b->shared = NULL;
  219. }
  220. u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
  221. {
  222. u32 *reg;
  223. if (offset < VGIC_NR_PRIVATE_IRQS) {
  224. reg = x->private;
  225. offset += cpuid * VGIC_NR_PRIVATE_IRQS;
  226. } else {
  227. reg = x->shared;
  228. offset -= VGIC_NR_PRIVATE_IRQS;
  229. }
  230. return reg + (offset / sizeof(u32));
  231. }
  232. #define VGIC_CFG_LEVEL 0
  233. #define VGIC_CFG_EDGE 1
  234. static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq)
  235. {
  236. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  237. int irq_val;
  238. irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq);
  239. return irq_val == VGIC_CFG_EDGE;
  240. }
  241. static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq)
  242. {
  243. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  244. return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq);
  245. }
  246. static int vgic_irq_is_queued(struct kvm_vcpu *vcpu, int irq)
  247. {
  248. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  249. return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq);
  250. }
  251. static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq)
  252. {
  253. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  254. return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq);
  255. }
  256. static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq)
  257. {
  258. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  259. vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 1);
  260. }
  261. static void vgic_irq_clear_queued(struct kvm_vcpu *vcpu, int irq)
  262. {
  263. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  264. vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0);
  265. }
  266. static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq)
  267. {
  268. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  269. vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1);
  270. }
  271. static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq)
  272. {
  273. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  274. vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0);
  275. }
  276. static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq)
  277. {
  278. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  279. return vgic_bitmap_get_irq_val(&dist->irq_level, vcpu->vcpu_id, irq);
  280. }
  281. static void vgic_dist_irq_set_level(struct kvm_vcpu *vcpu, int irq)
  282. {
  283. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  284. vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 1);
  285. }
  286. static void vgic_dist_irq_clear_level(struct kvm_vcpu *vcpu, int irq)
  287. {
  288. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  289. vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 0);
  290. }
  291. static int vgic_dist_irq_soft_pend(struct kvm_vcpu *vcpu, int irq)
  292. {
  293. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  294. return vgic_bitmap_get_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq);
  295. }
  296. static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu *vcpu, int irq)
  297. {
  298. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  299. vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0);
  300. }
  301. static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
  302. {
  303. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  304. return vgic_bitmap_get_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq);
  305. }
  306. void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq)
  307. {
  308. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  309. vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 1);
  310. }
  311. void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq)
  312. {
  313. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  314. vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 0);
  315. }
  316. static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
  317. {
  318. if (irq < VGIC_NR_PRIVATE_IRQS)
  319. set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
  320. else
  321. set_bit(irq - VGIC_NR_PRIVATE_IRQS,
  322. vcpu->arch.vgic_cpu.pending_shared);
  323. }
  324. void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
  325. {
  326. if (irq < VGIC_NR_PRIVATE_IRQS)
  327. clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
  328. else
  329. clear_bit(irq - VGIC_NR_PRIVATE_IRQS,
  330. vcpu->arch.vgic_cpu.pending_shared);
  331. }
  332. static bool vgic_can_sample_irq(struct kvm_vcpu *vcpu, int irq)
  333. {
  334. return !vgic_irq_is_queued(vcpu, irq);
  335. }
  336. /**
  337. * vgic_reg_access - access vgic register
  338. * @mmio: pointer to the data describing the mmio access
  339. * @reg: pointer to the virtual backing of vgic distributor data
  340. * @offset: least significant 2 bits used for word offset
  341. * @mode: ACCESS_ mode (see defines above)
  342. *
  343. * Helper to make vgic register access easier using one of the access
  344. * modes defined for vgic register access
  345. * (read,raz,write-ignored,setbit,clearbit,write)
  346. */
  347. void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
  348. phys_addr_t offset, int mode)
  349. {
  350. int word_offset = (offset & 3) * 8;
  351. u32 mask = (1UL << (mmio->len * 8)) - 1;
  352. u32 regval;
  353. /*
  354. * Any alignment fault should have been delivered to the guest
  355. * directly (ARM ARM B3.12.7 "Prioritization of aborts").
  356. */
  357. if (reg) {
  358. regval = *reg;
  359. } else {
  360. BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED));
  361. regval = 0;
  362. }
  363. if (mmio->is_write) {
  364. u32 data = mmio_data_read(mmio, mask) << word_offset;
  365. switch (ACCESS_WRITE_MASK(mode)) {
  366. case ACCESS_WRITE_IGNORED:
  367. return;
  368. case ACCESS_WRITE_SETBIT:
  369. regval |= data;
  370. break;
  371. case ACCESS_WRITE_CLEARBIT:
  372. regval &= ~data;
  373. break;
  374. case ACCESS_WRITE_VALUE:
  375. regval = (regval & ~(mask << word_offset)) | data;
  376. break;
  377. }
  378. *reg = regval;
  379. } else {
  380. switch (ACCESS_READ_MASK(mode)) {
  381. case ACCESS_READ_RAZ:
  382. regval = 0;
  383. /* fall through */
  384. case ACCESS_READ_VALUE:
  385. mmio_data_write(mmio, mask, regval >> word_offset);
  386. }
  387. }
  388. }
  389. bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
  390. phys_addr_t offset)
  391. {
  392. vgic_reg_access(mmio, NULL, offset,
  393. ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
  394. return false;
  395. }
  396. bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
  397. phys_addr_t offset, int vcpu_id, int access)
  398. {
  399. u32 *reg;
  400. int mode = ACCESS_READ_VALUE | access;
  401. struct kvm_vcpu *target_vcpu = kvm_get_vcpu(kvm, vcpu_id);
  402. reg = vgic_bitmap_get_reg(&kvm->arch.vgic.irq_enabled, vcpu_id, offset);
  403. vgic_reg_access(mmio, reg, offset, mode);
  404. if (mmio->is_write) {
  405. if (access & ACCESS_WRITE_CLEARBIT) {
  406. if (offset < 4) /* Force SGI enabled */
  407. *reg |= 0xffff;
  408. vgic_retire_disabled_irqs(target_vcpu);
  409. }
  410. vgic_update_state(kvm);
  411. return true;
  412. }
  413. return false;
  414. }
  415. bool vgic_handle_set_pending_reg(struct kvm *kvm,
  416. struct kvm_exit_mmio *mmio,
  417. phys_addr_t offset, int vcpu_id)
  418. {
  419. u32 *reg, orig;
  420. u32 level_mask;
  421. int mode = ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT;
  422. struct vgic_dist *dist = &kvm->arch.vgic;
  423. reg = vgic_bitmap_get_reg(&dist->irq_cfg, vcpu_id, offset);
  424. level_mask = (~(*reg));
  425. /* Mark both level and edge triggered irqs as pending */
  426. reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
  427. orig = *reg;
  428. vgic_reg_access(mmio, reg, offset, mode);
  429. if (mmio->is_write) {
  430. /* Set the soft-pending flag only for level-triggered irqs */
  431. reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
  432. vcpu_id, offset);
  433. vgic_reg_access(mmio, reg, offset, mode);
  434. *reg &= level_mask;
  435. /* Ignore writes to SGIs */
  436. if (offset < 2) {
  437. *reg &= ~0xffff;
  438. *reg |= orig & 0xffff;
  439. }
  440. vgic_update_state(kvm);
  441. return true;
  442. }
  443. return false;
  444. }
  445. bool vgic_handle_clear_pending_reg(struct kvm *kvm,
  446. struct kvm_exit_mmio *mmio,
  447. phys_addr_t offset, int vcpu_id)
  448. {
  449. u32 *level_active;
  450. u32 *reg, orig;
  451. int mode = ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT;
  452. struct vgic_dist *dist = &kvm->arch.vgic;
  453. reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
  454. orig = *reg;
  455. vgic_reg_access(mmio, reg, offset, mode);
  456. if (mmio->is_write) {
  457. /* Re-set level triggered level-active interrupts */
  458. level_active = vgic_bitmap_get_reg(&dist->irq_level,
  459. vcpu_id, offset);
  460. reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
  461. *reg |= *level_active;
  462. /* Ignore writes to SGIs */
  463. if (offset < 2) {
  464. *reg &= ~0xffff;
  465. *reg |= orig & 0xffff;
  466. }
  467. /* Clear soft-pending flags */
  468. reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
  469. vcpu_id, offset);
  470. vgic_reg_access(mmio, reg, offset, mode);
  471. vgic_update_state(kvm);
  472. return true;
  473. }
  474. return false;
  475. }
  476. bool vgic_handle_set_active_reg(struct kvm *kvm,
  477. struct kvm_exit_mmio *mmio,
  478. phys_addr_t offset, int vcpu_id)
  479. {
  480. u32 *reg;
  481. struct vgic_dist *dist = &kvm->arch.vgic;
  482. reg = vgic_bitmap_get_reg(&dist->irq_active, vcpu_id, offset);
  483. vgic_reg_access(mmio, reg, offset,
  484. ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
  485. if (mmio->is_write) {
  486. vgic_update_state(kvm);
  487. return true;
  488. }
  489. return false;
  490. }
  491. bool vgic_handle_clear_active_reg(struct kvm *kvm,
  492. struct kvm_exit_mmio *mmio,
  493. phys_addr_t offset, int vcpu_id)
  494. {
  495. u32 *reg;
  496. struct vgic_dist *dist = &kvm->arch.vgic;
  497. reg = vgic_bitmap_get_reg(&dist->irq_active, vcpu_id, offset);
  498. vgic_reg_access(mmio, reg, offset,
  499. ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
  500. if (mmio->is_write) {
  501. vgic_update_state(kvm);
  502. return true;
  503. }
  504. return false;
  505. }
  506. static u32 vgic_cfg_expand(u16 val)
  507. {
  508. u32 res = 0;
  509. int i;
  510. /*
  511. * Turn a 16bit value like abcd...mnop into a 32bit word
  512. * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
  513. */
  514. for (i = 0; i < 16; i++)
  515. res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1);
  516. return res;
  517. }
  518. static u16 vgic_cfg_compress(u32 val)
  519. {
  520. u16 res = 0;
  521. int i;
  522. /*
  523. * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
  524. * abcd...mnop which is what we really care about.
  525. */
  526. for (i = 0; i < 16; i++)
  527. res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i;
  528. return res;
  529. }
  530. /*
  531. * The distributor uses 2 bits per IRQ for the CFG register, but the
  532. * LSB is always 0. As such, we only keep the upper bit, and use the
  533. * two above functions to compress/expand the bits
  534. */
  535. bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
  536. phys_addr_t offset)
  537. {
  538. u32 val;
  539. if (offset & 4)
  540. val = *reg >> 16;
  541. else
  542. val = *reg & 0xffff;
  543. val = vgic_cfg_expand(val);
  544. vgic_reg_access(mmio, &val, offset,
  545. ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
  546. if (mmio->is_write) {
  547. if (offset < 8) {
  548. *reg = ~0U; /* Force PPIs/SGIs to 1 */
  549. return false;
  550. }
  551. val = vgic_cfg_compress(val);
  552. if (offset & 4) {
  553. *reg &= 0xffff;
  554. *reg |= val << 16;
  555. } else {
  556. *reg &= 0xffff << 16;
  557. *reg |= val;
  558. }
  559. }
  560. return false;
  561. }
  562. /**
  563. * vgic_unqueue_irqs - move pending/active IRQs from LRs to the distributor
  564. * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
  565. *
  566. * Move any IRQs that have already been assigned to LRs back to the
  567. * emulated distributor state so that the complete emulated state can be read
  568. * from the main emulation structures without investigating the LRs.
  569. */
  570. void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
  571. {
  572. struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
  573. int i;
  574. for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
  575. struct vgic_lr lr = vgic_get_lr(vcpu, i);
  576. /*
  577. * There are three options for the state bits:
  578. *
  579. * 01: pending
  580. * 10: active
  581. * 11: pending and active
  582. */
  583. BUG_ON(!(lr.state & LR_STATE_MASK));
  584. /* Reestablish SGI source for pending and active IRQs */
  585. if (lr.irq < VGIC_NR_SGIS)
  586. add_sgi_source(vcpu, lr.irq, lr.source);
  587. /*
  588. * If the LR holds an active (10) or a pending and active (11)
  589. * interrupt then move the active state to the
  590. * distributor tracking bit.
  591. */
  592. if (lr.state & LR_STATE_ACTIVE) {
  593. vgic_irq_set_active(vcpu, lr.irq);
  594. lr.state &= ~LR_STATE_ACTIVE;
  595. }
  596. /*
  597. * Reestablish the pending state on the distributor and the
  598. * CPU interface. It may have already been pending, but that
  599. * is fine, then we are only setting a few bits that were
  600. * already set.
  601. */
  602. if (lr.state & LR_STATE_PENDING) {
  603. vgic_dist_irq_set_pending(vcpu, lr.irq);
  604. lr.state &= ~LR_STATE_PENDING;
  605. }
  606. vgic_set_lr(vcpu, i, lr);
  607. /*
  608. * Mark the LR as free for other use.
  609. */
  610. BUG_ON(lr.state & LR_STATE_MASK);
  611. vgic_retire_lr(i, lr.irq, vcpu);
  612. vgic_irq_clear_queued(vcpu, lr.irq);
  613. /* Finally update the VGIC state. */
  614. vgic_update_state(vcpu->kvm);
  615. }
  616. }
  617. const
  618. struct vgic_io_range *vgic_find_range(const struct vgic_io_range *ranges,
  619. int len, gpa_t offset)
  620. {
  621. while (ranges->len) {
  622. if (offset >= ranges->base &&
  623. (offset + len) <= (ranges->base + ranges->len))
  624. return ranges;
  625. ranges++;
  626. }
  627. return NULL;
  628. }
  629. static bool vgic_validate_access(const struct vgic_dist *dist,
  630. const struct vgic_io_range *range,
  631. unsigned long offset)
  632. {
  633. int irq;
  634. if (!range->bits_per_irq)
  635. return true; /* Not an irq-based access */
  636. irq = offset * 8 / range->bits_per_irq;
  637. if (irq >= dist->nr_irqs)
  638. return false;
  639. return true;
  640. }
  641. /*
  642. * Call the respective handler function for the given range.
  643. * We split up any 64 bit accesses into two consecutive 32 bit
  644. * handler calls and merge the result afterwards.
  645. * We do this in a little endian fashion regardless of the host's
  646. * or guest's endianness, because the GIC is always LE and the rest of
  647. * the code (vgic_reg_access) also puts it in a LE fashion already.
  648. * At this point we have already identified the handle function, so
  649. * range points to that one entry and offset is relative to this.
  650. */
  651. static bool call_range_handler(struct kvm_vcpu *vcpu,
  652. struct kvm_exit_mmio *mmio,
  653. unsigned long offset,
  654. const struct vgic_io_range *range)
  655. {
  656. struct kvm_exit_mmio mmio32;
  657. bool ret;
  658. if (likely(mmio->len <= 4))
  659. return range->handle_mmio(vcpu, mmio, offset);
  660. /*
  661. * Any access bigger than 4 bytes (that we currently handle in KVM)
  662. * is actually 8 bytes long, caused by a 64-bit access
  663. */
  664. mmio32.len = 4;
  665. mmio32.is_write = mmio->is_write;
  666. mmio32.private = mmio->private;
  667. mmio32.phys_addr = mmio->phys_addr + 4;
  668. mmio32.data = &((u32 *)mmio->data)[1];
  669. ret = range->handle_mmio(vcpu, &mmio32, offset + 4);
  670. mmio32.phys_addr = mmio->phys_addr;
  671. mmio32.data = &((u32 *)mmio->data)[0];
  672. ret |= range->handle_mmio(vcpu, &mmio32, offset);
  673. return ret;
  674. }
  675. /**
  676. * vgic_handle_mmio_access - handle an in-kernel MMIO access
  677. * This is called by the read/write KVM IO device wrappers below.
  678. * @vcpu: pointer to the vcpu performing the access
  679. * @this: pointer to the KVM IO device in charge
  680. * @addr: guest physical address of the access
  681. * @len: size of the access
  682. * @val: pointer to the data region
  683. * @is_write: read or write access
  684. *
  685. * returns true if the MMIO access could be performed
  686. */
  687. static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu,
  688. struct kvm_io_device *this, gpa_t addr,
  689. int len, void *val, bool is_write)
  690. {
  691. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  692. struct vgic_io_device *iodev = container_of(this,
  693. struct vgic_io_device, dev);
  694. struct kvm_run *run = vcpu->run;
  695. const struct vgic_io_range *range;
  696. struct kvm_exit_mmio mmio;
  697. bool updated_state;
  698. gpa_t offset;
  699. offset = addr - iodev->addr;
  700. range = vgic_find_range(iodev->reg_ranges, len, offset);
  701. if (unlikely(!range || !range->handle_mmio)) {
  702. pr_warn("Unhandled access %d %08llx %d\n", is_write, addr, len);
  703. return -ENXIO;
  704. }
  705. mmio.phys_addr = addr;
  706. mmio.len = len;
  707. mmio.is_write = is_write;
  708. mmio.data = val;
  709. mmio.private = iodev->redist_vcpu;
  710. spin_lock(&dist->lock);
  711. offset -= range->base;
  712. if (vgic_validate_access(dist, range, offset)) {
  713. updated_state = call_range_handler(vcpu, &mmio, offset, range);
  714. } else {
  715. if (!is_write)
  716. memset(val, 0, len);
  717. updated_state = false;
  718. }
  719. spin_unlock(&dist->lock);
  720. run->mmio.is_write = is_write;
  721. run->mmio.len = len;
  722. run->mmio.phys_addr = addr;
  723. memcpy(run->mmio.data, val, len);
  724. kvm_handle_mmio_return(vcpu, run);
  725. if (updated_state)
  726. vgic_kick_vcpus(vcpu->kvm);
  727. return 0;
  728. }
  729. static int vgic_handle_mmio_read(struct kvm_vcpu *vcpu,
  730. struct kvm_io_device *this,
  731. gpa_t addr, int len, void *val)
  732. {
  733. return vgic_handle_mmio_access(vcpu, this, addr, len, val, false);
  734. }
  735. static int vgic_handle_mmio_write(struct kvm_vcpu *vcpu,
  736. struct kvm_io_device *this,
  737. gpa_t addr, int len, const void *val)
  738. {
  739. return vgic_handle_mmio_access(vcpu, this, addr, len, (void *)val,
  740. true);
  741. }
  742. struct kvm_io_device_ops vgic_io_ops = {
  743. .read = vgic_handle_mmio_read,
  744. .write = vgic_handle_mmio_write,
  745. };
  746. /**
  747. * vgic_register_kvm_io_dev - register VGIC register frame on the KVM I/O bus
  748. * @kvm: The VM structure pointer
  749. * @base: The (guest) base address for the register frame
  750. * @len: Length of the register frame window
  751. * @ranges: Describing the handler functions for each register
  752. * @redist_vcpu_id: The VCPU ID to pass on to the handlers on call
  753. * @iodev: Points to memory to be passed on to the handler
  754. *
  755. * @iodev stores the parameters of this function to be usable by the handler
  756. * respectively the dispatcher function (since the KVM I/O bus framework lacks
  757. * an opaque parameter). Initialization is done in this function, but the
  758. * reference should be valid and unique for the whole VGIC lifetime.
  759. * If the register frame is not mapped for a specific VCPU, pass -1 to
  760. * @redist_vcpu_id.
  761. */
  762. int vgic_register_kvm_io_dev(struct kvm *kvm, gpa_t base, int len,
  763. const struct vgic_io_range *ranges,
  764. int redist_vcpu_id,
  765. struct vgic_io_device *iodev)
  766. {
  767. struct kvm_vcpu *vcpu = NULL;
  768. int ret;
  769. if (redist_vcpu_id >= 0)
  770. vcpu = kvm_get_vcpu(kvm, redist_vcpu_id);
  771. iodev->addr = base;
  772. iodev->len = len;
  773. iodev->reg_ranges = ranges;
  774. iodev->redist_vcpu = vcpu;
  775. kvm_iodevice_init(&iodev->dev, &vgic_io_ops);
  776. mutex_lock(&kvm->slots_lock);
  777. ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, base, len,
  778. &iodev->dev);
  779. mutex_unlock(&kvm->slots_lock);
  780. /* Mark the iodev as invalid if registration fails. */
  781. if (ret)
  782. iodev->dev.ops = NULL;
  783. return ret;
  784. }
  785. static int vgic_nr_shared_irqs(struct vgic_dist *dist)
  786. {
  787. return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
  788. }
  789. static int compute_active_for_cpu(struct kvm_vcpu *vcpu)
  790. {
  791. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  792. unsigned long *active, *enabled, *act_percpu, *act_shared;
  793. unsigned long active_private, active_shared;
  794. int nr_shared = vgic_nr_shared_irqs(dist);
  795. int vcpu_id;
  796. vcpu_id = vcpu->vcpu_id;
  797. act_percpu = vcpu->arch.vgic_cpu.active_percpu;
  798. act_shared = vcpu->arch.vgic_cpu.active_shared;
  799. active = vgic_bitmap_get_cpu_map(&dist->irq_active, vcpu_id);
  800. enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
  801. bitmap_and(act_percpu, active, enabled, VGIC_NR_PRIVATE_IRQS);
  802. active = vgic_bitmap_get_shared_map(&dist->irq_active);
  803. enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
  804. bitmap_and(act_shared, active, enabled, nr_shared);
  805. bitmap_and(act_shared, act_shared,
  806. vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
  807. nr_shared);
  808. active_private = find_first_bit(act_percpu, VGIC_NR_PRIVATE_IRQS);
  809. active_shared = find_first_bit(act_shared, nr_shared);
  810. return (active_private < VGIC_NR_PRIVATE_IRQS ||
  811. active_shared < nr_shared);
  812. }
  813. static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
  814. {
  815. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  816. unsigned long *pending, *enabled, *pend_percpu, *pend_shared;
  817. unsigned long pending_private, pending_shared;
  818. int nr_shared = vgic_nr_shared_irqs(dist);
  819. int vcpu_id;
  820. vcpu_id = vcpu->vcpu_id;
  821. pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
  822. pend_shared = vcpu->arch.vgic_cpu.pending_shared;
  823. pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id);
  824. enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
  825. bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
  826. pending = vgic_bitmap_get_shared_map(&dist->irq_pending);
  827. enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
  828. bitmap_and(pend_shared, pending, enabled, nr_shared);
  829. bitmap_and(pend_shared, pend_shared,
  830. vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
  831. nr_shared);
  832. pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS);
  833. pending_shared = find_first_bit(pend_shared, nr_shared);
  834. return (pending_private < VGIC_NR_PRIVATE_IRQS ||
  835. pending_shared < vgic_nr_shared_irqs(dist));
  836. }
  837. /*
  838. * Update the interrupt state and determine which CPUs have pending
  839. * or active interrupts. Must be called with distributor lock held.
  840. */
  841. void vgic_update_state(struct kvm *kvm)
  842. {
  843. struct vgic_dist *dist = &kvm->arch.vgic;
  844. struct kvm_vcpu *vcpu;
  845. int c;
  846. if (!dist->enabled) {
  847. set_bit(0, dist->irq_pending_on_cpu);
  848. return;
  849. }
  850. kvm_for_each_vcpu(c, vcpu, kvm) {
  851. if (compute_pending_for_cpu(vcpu))
  852. set_bit(c, dist->irq_pending_on_cpu);
  853. if (compute_active_for_cpu(vcpu))
  854. set_bit(c, dist->irq_active_on_cpu);
  855. else
  856. clear_bit(c, dist->irq_active_on_cpu);
  857. }
  858. }
  859. static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
  860. {
  861. return vgic_ops->get_lr(vcpu, lr);
  862. }
  863. static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
  864. struct vgic_lr vlr)
  865. {
  866. vgic_ops->set_lr(vcpu, lr, vlr);
  867. }
  868. static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
  869. struct vgic_lr vlr)
  870. {
  871. vgic_ops->sync_lr_elrsr(vcpu, lr, vlr);
  872. }
  873. static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
  874. {
  875. return vgic_ops->get_elrsr(vcpu);
  876. }
  877. static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
  878. {
  879. return vgic_ops->get_eisr(vcpu);
  880. }
  881. static inline void vgic_clear_eisr(struct kvm_vcpu *vcpu)
  882. {
  883. vgic_ops->clear_eisr(vcpu);
  884. }
  885. static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
  886. {
  887. return vgic_ops->get_interrupt_status(vcpu);
  888. }
  889. static inline void vgic_enable_underflow(struct kvm_vcpu *vcpu)
  890. {
  891. vgic_ops->enable_underflow(vcpu);
  892. }
  893. static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu)
  894. {
  895. vgic_ops->disable_underflow(vcpu);
  896. }
  897. void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
  898. {
  899. vgic_ops->get_vmcr(vcpu, vmcr);
  900. }
  901. void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
  902. {
  903. vgic_ops->set_vmcr(vcpu, vmcr);
  904. }
  905. static inline void vgic_enable(struct kvm_vcpu *vcpu)
  906. {
  907. vgic_ops->enable(vcpu);
  908. }
  909. static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
  910. {
  911. struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
  912. struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
  913. vlr.state = 0;
  914. vgic_set_lr(vcpu, lr_nr, vlr);
  915. clear_bit(lr_nr, vgic_cpu->lr_used);
  916. vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
  917. vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
  918. }
  919. /*
  920. * An interrupt may have been disabled after being made pending on the
  921. * CPU interface (the classic case is a timer running while we're
  922. * rebooting the guest - the interrupt would kick as soon as the CPU
  923. * interface gets enabled, with deadly consequences).
  924. *
  925. * The solution is to examine already active LRs, and check the
  926. * interrupt is still enabled. If not, just retire it.
  927. */
  928. static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
  929. {
  930. struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
  931. int lr;
  932. for_each_set_bit(lr, vgic_cpu->lr_used, vgic->nr_lr) {
  933. struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
  934. if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
  935. vgic_retire_lr(lr, vlr.irq, vcpu);
  936. if (vgic_irq_is_queued(vcpu, vlr.irq))
  937. vgic_irq_clear_queued(vcpu, vlr.irq);
  938. }
  939. }
  940. }
  941. static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
  942. int lr_nr, struct vgic_lr vlr)
  943. {
  944. if (vgic_irq_is_active(vcpu, irq)) {
  945. vlr.state |= LR_STATE_ACTIVE;
  946. kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state);
  947. vgic_irq_clear_active(vcpu, irq);
  948. vgic_update_state(vcpu->kvm);
  949. } else if (vgic_dist_irq_is_pending(vcpu, irq)) {
  950. vlr.state |= LR_STATE_PENDING;
  951. kvm_debug("Set pending: 0x%x\n", vlr.state);
  952. }
  953. if (!vgic_irq_is_edge(vcpu, irq))
  954. vlr.state |= LR_EOI_INT;
  955. if (vlr.irq >= VGIC_NR_SGIS) {
  956. struct irq_phys_map *map;
  957. map = vgic_irq_map_search(vcpu, irq);
  958. /*
  959. * If we have a mapping, and the virtual interrupt is
  960. * being injected, then we must set the state to
  961. * active in the physical world. Otherwise the
  962. * physical interrupt will fire and the guest will
  963. * exit before processing the virtual interrupt.
  964. */
  965. if (map) {
  966. int ret;
  967. BUG_ON(!map->active);
  968. vlr.hwirq = map->phys_irq;
  969. vlr.state |= LR_HW;
  970. vlr.state &= ~LR_EOI_INT;
  971. ret = irq_set_irqchip_state(map->irq,
  972. IRQCHIP_STATE_ACTIVE,
  973. true);
  974. WARN_ON(ret);
  975. /*
  976. * Make sure we're not going to sample this
  977. * again, as a HW-backed interrupt cannot be
  978. * in the PENDING_ACTIVE stage.
  979. */
  980. vgic_irq_set_queued(vcpu, irq);
  981. }
  982. }
  983. vgic_set_lr(vcpu, lr_nr, vlr);
  984. vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
  985. }
  986. /*
  987. * Queue an interrupt to a CPU virtual interface. Return true on success,
  988. * or false if it wasn't possible to queue it.
  989. * sgi_source must be zero for any non-SGI interrupts.
  990. */
  991. bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
  992. {
  993. struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
  994. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  995. struct vgic_lr vlr;
  996. int lr;
  997. /* Sanitize the input... */
  998. BUG_ON(sgi_source_id & ~7);
  999. BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS);
  1000. BUG_ON(irq >= dist->nr_irqs);
  1001. kvm_debug("Queue IRQ%d\n", irq);
  1002. lr = vgic_cpu->vgic_irq_lr_map[irq];
  1003. /* Do we have an active interrupt for the same CPUID? */
  1004. if (lr != LR_EMPTY) {
  1005. vlr = vgic_get_lr(vcpu, lr);
  1006. if (vlr.source == sgi_source_id) {
  1007. kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
  1008. BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
  1009. vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
  1010. return true;
  1011. }
  1012. }
  1013. /* Try to use another LR for this interrupt */
  1014. lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
  1015. vgic->nr_lr);
  1016. if (lr >= vgic->nr_lr)
  1017. return false;
  1018. kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
  1019. vgic_cpu->vgic_irq_lr_map[irq] = lr;
  1020. set_bit(lr, vgic_cpu->lr_used);
  1021. vlr.irq = irq;
  1022. vlr.source = sgi_source_id;
  1023. vlr.state = 0;
  1024. vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
  1025. return true;
  1026. }
  1027. static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
  1028. {
  1029. if (!vgic_can_sample_irq(vcpu, irq))
  1030. return true; /* level interrupt, already queued */
  1031. if (vgic_queue_irq(vcpu, 0, irq)) {
  1032. if (vgic_irq_is_edge(vcpu, irq)) {
  1033. vgic_dist_irq_clear_pending(vcpu, irq);
  1034. vgic_cpu_irq_clear(vcpu, irq);
  1035. } else {
  1036. vgic_irq_set_queued(vcpu, irq);
  1037. }
  1038. return true;
  1039. }
  1040. return false;
  1041. }
  1042. /*
  1043. * Fill the list registers with pending interrupts before running the
  1044. * guest.
  1045. */
  1046. static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
  1047. {
  1048. struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
  1049. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  1050. unsigned long *pa_percpu, *pa_shared;
  1051. int i, vcpu_id;
  1052. int overflow = 0;
  1053. int nr_shared = vgic_nr_shared_irqs(dist);
  1054. vcpu_id = vcpu->vcpu_id;
  1055. pa_percpu = vcpu->arch.vgic_cpu.pend_act_percpu;
  1056. pa_shared = vcpu->arch.vgic_cpu.pend_act_shared;
  1057. bitmap_or(pa_percpu, vgic_cpu->pending_percpu, vgic_cpu->active_percpu,
  1058. VGIC_NR_PRIVATE_IRQS);
  1059. bitmap_or(pa_shared, vgic_cpu->pending_shared, vgic_cpu->active_shared,
  1060. nr_shared);
  1061. /*
  1062. * We may not have any pending interrupt, or the interrupts
  1063. * may have been serviced from another vcpu. In all cases,
  1064. * move along.
  1065. */
  1066. if (!kvm_vgic_vcpu_pending_irq(vcpu) && !kvm_vgic_vcpu_active_irq(vcpu))
  1067. goto epilog;
  1068. /* SGIs */
  1069. for_each_set_bit(i, pa_percpu, VGIC_NR_SGIS) {
  1070. if (!queue_sgi(vcpu, i))
  1071. overflow = 1;
  1072. }
  1073. /* PPIs */
  1074. for_each_set_bit_from(i, pa_percpu, VGIC_NR_PRIVATE_IRQS) {
  1075. if (!vgic_queue_hwirq(vcpu, i))
  1076. overflow = 1;
  1077. }
  1078. /* SPIs */
  1079. for_each_set_bit(i, pa_shared, nr_shared) {
  1080. if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
  1081. overflow = 1;
  1082. }
  1083. epilog:
  1084. if (overflow) {
  1085. vgic_enable_underflow(vcpu);
  1086. } else {
  1087. vgic_disable_underflow(vcpu);
  1088. /*
  1089. * We're about to run this VCPU, and we've consumed
  1090. * everything the distributor had in store for
  1091. * us. Claim we don't have anything pending. We'll
  1092. * adjust that if needed while exiting.
  1093. */
  1094. clear_bit(vcpu_id, dist->irq_pending_on_cpu);
  1095. }
  1096. }
  1097. static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
  1098. {
  1099. u32 status = vgic_get_interrupt_status(vcpu);
  1100. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  1101. bool level_pending = false;
  1102. struct kvm *kvm = vcpu->kvm;
  1103. kvm_debug("STATUS = %08x\n", status);
  1104. if (status & INT_STATUS_EOI) {
  1105. /*
  1106. * Some level interrupts have been EOIed. Clear their
  1107. * active bit.
  1108. */
  1109. u64 eisr = vgic_get_eisr(vcpu);
  1110. unsigned long *eisr_ptr = u64_to_bitmask(&eisr);
  1111. int lr;
  1112. for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
  1113. struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
  1114. WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
  1115. spin_lock(&dist->lock);
  1116. vgic_irq_clear_queued(vcpu, vlr.irq);
  1117. WARN_ON(vlr.state & LR_STATE_MASK);
  1118. vlr.state = 0;
  1119. vgic_set_lr(vcpu, lr, vlr);
  1120. /*
  1121. * If the IRQ was EOIed it was also ACKed and we we
  1122. * therefore assume we can clear the soft pending
  1123. * state (should it had been set) for this interrupt.
  1124. *
  1125. * Note: if the IRQ soft pending state was set after
  1126. * the IRQ was acked, it actually shouldn't be
  1127. * cleared, but we have no way of knowing that unless
  1128. * we start trapping ACKs when the soft-pending state
  1129. * is set.
  1130. */
  1131. vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
  1132. /*
  1133. * kvm_notify_acked_irq calls kvm_set_irq()
  1134. * to reset the IRQ level. Need to release the
  1135. * lock for kvm_set_irq to grab it.
  1136. */
  1137. spin_unlock(&dist->lock);
  1138. kvm_notify_acked_irq(kvm, 0,
  1139. vlr.irq - VGIC_NR_PRIVATE_IRQS);
  1140. spin_lock(&dist->lock);
  1141. /* Any additional pending interrupt? */
  1142. if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
  1143. vgic_cpu_irq_set(vcpu, vlr.irq);
  1144. level_pending = true;
  1145. } else {
  1146. vgic_dist_irq_clear_pending(vcpu, vlr.irq);
  1147. vgic_cpu_irq_clear(vcpu, vlr.irq);
  1148. }
  1149. spin_unlock(&dist->lock);
  1150. /*
  1151. * Despite being EOIed, the LR may not have
  1152. * been marked as empty.
  1153. */
  1154. vgic_sync_lr_elrsr(vcpu, lr, vlr);
  1155. }
  1156. }
  1157. if (status & INT_STATUS_UNDERFLOW)
  1158. vgic_disable_underflow(vcpu);
  1159. /*
  1160. * In the next iterations of the vcpu loop, if we sync the vgic state
  1161. * after flushing it, but before entering the guest (this happens for
  1162. * pending signals and vmid rollovers), then make sure we don't pick
  1163. * up any old maintenance interrupts here.
  1164. */
  1165. vgic_clear_eisr(vcpu);
  1166. return level_pending;
  1167. }
  1168. /*
  1169. * Save the physical active state, and reset it to inactive.
  1170. *
  1171. * Return 1 if HW interrupt went from active to inactive, and 0 otherwise.
  1172. */
  1173. static int vgic_sync_hwirq(struct kvm_vcpu *vcpu, struct vgic_lr vlr)
  1174. {
  1175. struct irq_phys_map *map;
  1176. int ret;
  1177. if (!(vlr.state & LR_HW))
  1178. return 0;
  1179. map = vgic_irq_map_search(vcpu, vlr.irq);
  1180. BUG_ON(!map || !map->active);
  1181. ret = irq_get_irqchip_state(map->irq,
  1182. IRQCHIP_STATE_ACTIVE,
  1183. &map->active);
  1184. WARN_ON(ret);
  1185. if (map->active) {
  1186. ret = irq_set_irqchip_state(map->irq,
  1187. IRQCHIP_STATE_ACTIVE,
  1188. false);
  1189. WARN_ON(ret);
  1190. return 0;
  1191. }
  1192. return 1;
  1193. }
  1194. /* Sync back the VGIC state after a guest run */
  1195. static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
  1196. {
  1197. struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
  1198. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  1199. u64 elrsr;
  1200. unsigned long *elrsr_ptr;
  1201. int lr, pending;
  1202. bool level_pending;
  1203. level_pending = vgic_process_maintenance(vcpu);
  1204. elrsr = vgic_get_elrsr(vcpu);
  1205. elrsr_ptr = u64_to_bitmask(&elrsr);
  1206. /* Deal with HW interrupts, and clear mappings for empty LRs */
  1207. for (lr = 0; lr < vgic->nr_lr; lr++) {
  1208. struct vgic_lr vlr;
  1209. if (!test_bit(lr, vgic_cpu->lr_used))
  1210. continue;
  1211. vlr = vgic_get_lr(vcpu, lr);
  1212. if (vgic_sync_hwirq(vcpu, vlr)) {
  1213. /*
  1214. * So this is a HW interrupt that the guest
  1215. * EOI-ed. Clean the LR state and allow the
  1216. * interrupt to be sampled again.
  1217. */
  1218. vlr.state = 0;
  1219. vlr.hwirq = 0;
  1220. vgic_set_lr(vcpu, lr, vlr);
  1221. vgic_irq_clear_queued(vcpu, vlr.irq);
  1222. set_bit(lr, elrsr_ptr);
  1223. }
  1224. if (!test_bit(lr, elrsr_ptr))
  1225. continue;
  1226. clear_bit(lr, vgic_cpu->lr_used);
  1227. BUG_ON(vlr.irq >= dist->nr_irqs);
  1228. vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY;
  1229. }
  1230. /* Check if we still have something up our sleeve... */
  1231. pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
  1232. if (level_pending || pending < vgic->nr_lr)
  1233. set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
  1234. }
  1235. void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
  1236. {
  1237. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  1238. if (!irqchip_in_kernel(vcpu->kvm))
  1239. return;
  1240. spin_lock(&dist->lock);
  1241. __kvm_vgic_flush_hwstate(vcpu);
  1242. spin_unlock(&dist->lock);
  1243. }
  1244. void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
  1245. {
  1246. if (!irqchip_in_kernel(vcpu->kvm))
  1247. return;
  1248. __kvm_vgic_sync_hwstate(vcpu);
  1249. }
  1250. int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
  1251. {
  1252. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  1253. if (!irqchip_in_kernel(vcpu->kvm))
  1254. return 0;
  1255. return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
  1256. }
  1257. int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu)
  1258. {
  1259. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  1260. if (!irqchip_in_kernel(vcpu->kvm))
  1261. return 0;
  1262. return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu);
  1263. }
  1264. void vgic_kick_vcpus(struct kvm *kvm)
  1265. {
  1266. struct kvm_vcpu *vcpu;
  1267. int c;
  1268. /*
  1269. * We've injected an interrupt, time to find out who deserves
  1270. * a good kick...
  1271. */
  1272. kvm_for_each_vcpu(c, vcpu, kvm) {
  1273. if (kvm_vgic_vcpu_pending_irq(vcpu))
  1274. kvm_vcpu_kick(vcpu);
  1275. }
  1276. }
  1277. static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
  1278. {
  1279. int edge_triggered = vgic_irq_is_edge(vcpu, irq);
  1280. /*
  1281. * Only inject an interrupt if:
  1282. * - edge triggered and we have a rising edge
  1283. * - level triggered and we change level
  1284. */
  1285. if (edge_triggered) {
  1286. int state = vgic_dist_irq_is_pending(vcpu, irq);
  1287. return level > state;
  1288. } else {
  1289. int state = vgic_dist_irq_get_level(vcpu, irq);
  1290. return level != state;
  1291. }
  1292. }
  1293. static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
  1294. unsigned int irq_num, bool level)
  1295. {
  1296. struct vgic_dist *dist = &kvm->arch.vgic;
  1297. struct kvm_vcpu *vcpu;
  1298. int edge_triggered, level_triggered;
  1299. int enabled;
  1300. bool ret = true, can_inject = true;
  1301. spin_lock(&dist->lock);
  1302. vcpu = kvm_get_vcpu(kvm, cpuid);
  1303. edge_triggered = vgic_irq_is_edge(vcpu, irq_num);
  1304. level_triggered = !edge_triggered;
  1305. if (!vgic_validate_injection(vcpu, irq_num, level)) {
  1306. ret = false;
  1307. goto out;
  1308. }
  1309. if (irq_num >= VGIC_NR_PRIVATE_IRQS) {
  1310. cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS];
  1311. if (cpuid == VCPU_NOT_ALLOCATED) {
  1312. /* Pretend we use CPU0, and prevent injection */
  1313. cpuid = 0;
  1314. can_inject = false;
  1315. }
  1316. vcpu = kvm_get_vcpu(kvm, cpuid);
  1317. }
  1318. kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid);
  1319. if (level) {
  1320. if (level_triggered)
  1321. vgic_dist_irq_set_level(vcpu, irq_num);
  1322. vgic_dist_irq_set_pending(vcpu, irq_num);
  1323. } else {
  1324. if (level_triggered) {
  1325. vgic_dist_irq_clear_level(vcpu, irq_num);
  1326. if (!vgic_dist_irq_soft_pend(vcpu, irq_num))
  1327. vgic_dist_irq_clear_pending(vcpu, irq_num);
  1328. }
  1329. ret = false;
  1330. goto out;
  1331. }
  1332. enabled = vgic_irq_is_enabled(vcpu, irq_num);
  1333. if (!enabled || !can_inject) {
  1334. ret = false;
  1335. goto out;
  1336. }
  1337. if (!vgic_can_sample_irq(vcpu, irq_num)) {
  1338. /*
  1339. * Level interrupt in progress, will be picked up
  1340. * when EOId.
  1341. */
  1342. ret = false;
  1343. goto out;
  1344. }
  1345. if (level) {
  1346. vgic_cpu_irq_set(vcpu, irq_num);
  1347. set_bit(cpuid, dist->irq_pending_on_cpu);
  1348. }
  1349. out:
  1350. spin_unlock(&dist->lock);
  1351. return ret ? cpuid : -EINVAL;
  1352. }
  1353. /**
  1354. * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
  1355. * @kvm: The VM structure pointer
  1356. * @cpuid: The CPU for PPIs
  1357. * @irq_num: The IRQ number that is assigned to the device
  1358. * @level: Edge-triggered: true: to trigger the interrupt
  1359. * false: to ignore the call
  1360. * Level-sensitive true: activates an interrupt
  1361. * false: deactivates an interrupt
  1362. *
  1363. * The GIC is not concerned with devices being active-LOW or active-HIGH for
  1364. * level-sensitive interrupts. You can think of the level parameter as 1
  1365. * being HIGH and 0 being LOW and all devices being active-HIGH.
  1366. */
  1367. int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
  1368. bool level)
  1369. {
  1370. int ret = 0;
  1371. int vcpu_id;
  1372. if (unlikely(!vgic_initialized(kvm))) {
  1373. /*
  1374. * We only provide the automatic initialization of the VGIC
  1375. * for the legacy case of a GICv2. Any other type must
  1376. * be explicitly initialized once setup with the respective
  1377. * KVM device call.
  1378. */
  1379. if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2) {
  1380. ret = -EBUSY;
  1381. goto out;
  1382. }
  1383. mutex_lock(&kvm->lock);
  1384. ret = vgic_init(kvm);
  1385. mutex_unlock(&kvm->lock);
  1386. if (ret)
  1387. goto out;
  1388. }
  1389. if (irq_num >= min(kvm->arch.vgic.nr_irqs, 1020))
  1390. return -EINVAL;
  1391. vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level);
  1392. if (vcpu_id >= 0) {
  1393. /* kick the specified vcpu */
  1394. kvm_vcpu_kick(kvm_get_vcpu(kvm, vcpu_id));
  1395. }
  1396. out:
  1397. return ret;
  1398. }
  1399. static irqreturn_t vgic_maintenance_handler(int irq, void *data)
  1400. {
  1401. /*
  1402. * We cannot rely on the vgic maintenance interrupt to be
  1403. * delivered synchronously. This means we can only use it to
  1404. * exit the VM, and we perform the handling of EOIed
  1405. * interrupts on the exit path (see vgic_process_maintenance).
  1406. */
  1407. return IRQ_HANDLED;
  1408. }
  1409. static struct list_head *vgic_get_irq_phys_map_list(struct kvm_vcpu *vcpu,
  1410. int virt_irq)
  1411. {
  1412. if (virt_irq < VGIC_NR_PRIVATE_IRQS)
  1413. return &vcpu->arch.vgic_cpu.irq_phys_map_list;
  1414. else
  1415. return &vcpu->kvm->arch.vgic.irq_phys_map_list;
  1416. }
  1417. /**
  1418. * kvm_vgic_map_phys_irq - map a virtual IRQ to a physical IRQ
  1419. * @vcpu: The VCPU pointer
  1420. * @virt_irq: The virtual irq number
  1421. * @irq: The Linux IRQ number
  1422. *
  1423. * Establish a mapping between a guest visible irq (@virt_irq) and a
  1424. * Linux irq (@irq). On injection, @virt_irq will be associated with
  1425. * the physical interrupt represented by @irq. This mapping can be
  1426. * established multiple times as long as the parameters are the same.
  1427. *
  1428. * Returns a valid pointer on success, and an error pointer otherwise
  1429. */
  1430. struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
  1431. int virt_irq, int irq)
  1432. {
  1433. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  1434. struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq);
  1435. struct irq_phys_map *map;
  1436. struct irq_phys_map_entry *entry;
  1437. struct irq_desc *desc;
  1438. struct irq_data *data;
  1439. int phys_irq;
  1440. desc = irq_to_desc(irq);
  1441. if (!desc) {
  1442. kvm_err("%s: no interrupt descriptor\n", __func__);
  1443. return ERR_PTR(-EINVAL);
  1444. }
  1445. data = irq_desc_get_irq_data(desc);
  1446. while (data->parent_data)
  1447. data = data->parent_data;
  1448. phys_irq = data->hwirq;
  1449. /* Create a new mapping */
  1450. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  1451. if (!entry)
  1452. return ERR_PTR(-ENOMEM);
  1453. spin_lock(&dist->irq_phys_map_lock);
  1454. /* Try to match an existing mapping */
  1455. map = vgic_irq_map_search(vcpu, virt_irq);
  1456. if (map) {
  1457. /* Make sure this mapping matches */
  1458. if (map->phys_irq != phys_irq ||
  1459. map->irq != irq)
  1460. map = ERR_PTR(-EINVAL);
  1461. /* Found an existing, valid mapping */
  1462. goto out;
  1463. }
  1464. map = &entry->map;
  1465. map->virt_irq = virt_irq;
  1466. map->phys_irq = phys_irq;
  1467. map->irq = irq;
  1468. list_add_tail_rcu(&entry->entry, root);
  1469. out:
  1470. spin_unlock(&dist->irq_phys_map_lock);
  1471. /* If we've found a hit in the existing list, free the useless
  1472. * entry */
  1473. if (IS_ERR(map) || map != &entry->map)
  1474. kfree(entry);
  1475. return map;
  1476. }
  1477. static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
  1478. int virt_irq)
  1479. {
  1480. struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq);
  1481. struct irq_phys_map_entry *entry;
  1482. struct irq_phys_map *map;
  1483. rcu_read_lock();
  1484. list_for_each_entry_rcu(entry, root, entry) {
  1485. map = &entry->map;
  1486. if (map->virt_irq == virt_irq) {
  1487. rcu_read_unlock();
  1488. return map;
  1489. }
  1490. }
  1491. rcu_read_unlock();
  1492. return NULL;
  1493. }
  1494. static void vgic_free_phys_irq_map_rcu(struct rcu_head *rcu)
  1495. {
  1496. struct irq_phys_map_entry *entry;
  1497. entry = container_of(rcu, struct irq_phys_map_entry, rcu);
  1498. kfree(entry);
  1499. }
  1500. /**
  1501. * kvm_vgic_unmap_phys_irq - Remove a virtual to physical IRQ mapping
  1502. * @vcpu: The VCPU pointer
  1503. * @map: The pointer to a mapping obtained through kvm_vgic_map_phys_irq
  1504. *
  1505. * Remove an existing mapping between virtual and physical interrupts.
  1506. */
  1507. int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map)
  1508. {
  1509. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  1510. struct irq_phys_map_entry *entry;
  1511. struct list_head *root;
  1512. if (!map)
  1513. return -EINVAL;
  1514. root = vgic_get_irq_phys_map_list(vcpu, map->virt_irq);
  1515. spin_lock(&dist->irq_phys_map_lock);
  1516. list_for_each_entry(entry, root, entry) {
  1517. if (&entry->map == map) {
  1518. list_del_rcu(&entry->entry);
  1519. call_rcu(&entry->rcu, vgic_free_phys_irq_map_rcu);
  1520. break;
  1521. }
  1522. }
  1523. spin_unlock(&dist->irq_phys_map_lock);
  1524. return 0;
  1525. }
  1526. static void vgic_destroy_irq_phys_map(struct kvm *kvm, struct list_head *root)
  1527. {
  1528. struct vgic_dist *dist = &kvm->arch.vgic;
  1529. struct irq_phys_map_entry *entry;
  1530. spin_lock(&dist->irq_phys_map_lock);
  1531. list_for_each_entry(entry, root, entry) {
  1532. list_del_rcu(&entry->entry);
  1533. call_rcu(&entry->rcu, vgic_free_phys_irq_map_rcu);
  1534. }
  1535. spin_unlock(&dist->irq_phys_map_lock);
  1536. }
  1537. void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
  1538. {
  1539. struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
  1540. kfree(vgic_cpu->pending_shared);
  1541. kfree(vgic_cpu->active_shared);
  1542. kfree(vgic_cpu->pend_act_shared);
  1543. kfree(vgic_cpu->vgic_irq_lr_map);
  1544. vgic_destroy_irq_phys_map(vcpu->kvm, &vgic_cpu->irq_phys_map_list);
  1545. vgic_cpu->pending_shared = NULL;
  1546. vgic_cpu->active_shared = NULL;
  1547. vgic_cpu->pend_act_shared = NULL;
  1548. vgic_cpu->vgic_irq_lr_map = NULL;
  1549. }
  1550. static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
  1551. {
  1552. struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
  1553. int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
  1554. vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
  1555. vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
  1556. vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
  1557. vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL);
  1558. if (!vgic_cpu->pending_shared
  1559. || !vgic_cpu->active_shared
  1560. || !vgic_cpu->pend_act_shared
  1561. || !vgic_cpu->vgic_irq_lr_map) {
  1562. kvm_vgic_vcpu_destroy(vcpu);
  1563. return -ENOMEM;
  1564. }
  1565. memset(vgic_cpu->vgic_irq_lr_map, LR_EMPTY, nr_irqs);
  1566. /*
  1567. * Store the number of LRs per vcpu, so we don't have to go
  1568. * all the way to the distributor structure to find out. Only
  1569. * assembly code should use this one.
  1570. */
  1571. vgic_cpu->nr_lr = vgic->nr_lr;
  1572. return 0;
  1573. }
  1574. /**
  1575. * kvm_vgic_vcpu_early_init - Earliest possible per-vcpu vgic init stage
  1576. *
  1577. * No memory allocation should be performed here, only static init.
  1578. */
  1579. void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu)
  1580. {
  1581. struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
  1582. INIT_LIST_HEAD(&vgic_cpu->irq_phys_map_list);
  1583. }
  1584. /**
  1585. * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
  1586. *
  1587. * The host's GIC naturally limits the maximum amount of VCPUs a guest
  1588. * can use.
  1589. */
  1590. int kvm_vgic_get_max_vcpus(void)
  1591. {
  1592. return vgic->max_gic_vcpus;
  1593. }
  1594. void kvm_vgic_destroy(struct kvm *kvm)
  1595. {
  1596. struct vgic_dist *dist = &kvm->arch.vgic;
  1597. struct kvm_vcpu *vcpu;
  1598. int i;
  1599. kvm_for_each_vcpu(i, vcpu, kvm)
  1600. kvm_vgic_vcpu_destroy(vcpu);
  1601. vgic_free_bitmap(&dist->irq_enabled);
  1602. vgic_free_bitmap(&dist->irq_level);
  1603. vgic_free_bitmap(&dist->irq_pending);
  1604. vgic_free_bitmap(&dist->irq_soft_pend);
  1605. vgic_free_bitmap(&dist->irq_queued);
  1606. vgic_free_bitmap(&dist->irq_cfg);
  1607. vgic_free_bytemap(&dist->irq_priority);
  1608. if (dist->irq_spi_target) {
  1609. for (i = 0; i < dist->nr_cpus; i++)
  1610. vgic_free_bitmap(&dist->irq_spi_target[i]);
  1611. }
  1612. kfree(dist->irq_sgi_sources);
  1613. kfree(dist->irq_spi_cpu);
  1614. kfree(dist->irq_spi_mpidr);
  1615. kfree(dist->irq_spi_target);
  1616. kfree(dist->irq_pending_on_cpu);
  1617. kfree(dist->irq_active_on_cpu);
  1618. vgic_destroy_irq_phys_map(kvm, &dist->irq_phys_map_list);
  1619. dist->irq_sgi_sources = NULL;
  1620. dist->irq_spi_cpu = NULL;
  1621. dist->irq_spi_target = NULL;
  1622. dist->irq_pending_on_cpu = NULL;
  1623. dist->irq_active_on_cpu = NULL;
  1624. dist->nr_cpus = 0;
  1625. }
  1626. /*
  1627. * Allocate and initialize the various data structures. Must be called
  1628. * with kvm->lock held!
  1629. */
  1630. int vgic_init(struct kvm *kvm)
  1631. {
  1632. struct vgic_dist *dist = &kvm->arch.vgic;
  1633. struct kvm_vcpu *vcpu;
  1634. int nr_cpus, nr_irqs;
  1635. int ret, i, vcpu_id;
  1636. if (vgic_initialized(kvm))
  1637. return 0;
  1638. nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus);
  1639. if (!nr_cpus) /* No vcpus? Can't be good... */
  1640. return -ENODEV;
  1641. /*
  1642. * If nobody configured the number of interrupts, use the
  1643. * legacy one.
  1644. */
  1645. if (!dist->nr_irqs)
  1646. dist->nr_irqs = VGIC_NR_IRQS_LEGACY;
  1647. nr_irqs = dist->nr_irqs;
  1648. ret = vgic_init_bitmap(&dist->irq_enabled, nr_cpus, nr_irqs);
  1649. ret |= vgic_init_bitmap(&dist->irq_level, nr_cpus, nr_irqs);
  1650. ret |= vgic_init_bitmap(&dist->irq_pending, nr_cpus, nr_irqs);
  1651. ret |= vgic_init_bitmap(&dist->irq_soft_pend, nr_cpus, nr_irqs);
  1652. ret |= vgic_init_bitmap(&dist->irq_queued, nr_cpus, nr_irqs);
  1653. ret |= vgic_init_bitmap(&dist->irq_active, nr_cpus, nr_irqs);
  1654. ret |= vgic_init_bitmap(&dist->irq_cfg, nr_cpus, nr_irqs);
  1655. ret |= vgic_init_bytemap(&dist->irq_priority, nr_cpus, nr_irqs);
  1656. if (ret)
  1657. goto out;
  1658. dist->irq_sgi_sources = kzalloc(nr_cpus * VGIC_NR_SGIS, GFP_KERNEL);
  1659. dist->irq_spi_cpu = kzalloc(nr_irqs - VGIC_NR_PRIVATE_IRQS, GFP_KERNEL);
  1660. dist->irq_spi_target = kzalloc(sizeof(*dist->irq_spi_target) * nr_cpus,
  1661. GFP_KERNEL);
  1662. dist->irq_pending_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
  1663. GFP_KERNEL);
  1664. dist->irq_active_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
  1665. GFP_KERNEL);
  1666. if (!dist->irq_sgi_sources ||
  1667. !dist->irq_spi_cpu ||
  1668. !dist->irq_spi_target ||
  1669. !dist->irq_pending_on_cpu ||
  1670. !dist->irq_active_on_cpu) {
  1671. ret = -ENOMEM;
  1672. goto out;
  1673. }
  1674. for (i = 0; i < nr_cpus; i++)
  1675. ret |= vgic_init_bitmap(&dist->irq_spi_target[i],
  1676. nr_cpus, nr_irqs);
  1677. if (ret)
  1678. goto out;
  1679. ret = kvm->arch.vgic.vm_ops.init_model(kvm);
  1680. if (ret)
  1681. goto out;
  1682. kvm_for_each_vcpu(vcpu_id, vcpu, kvm) {
  1683. ret = vgic_vcpu_init_maps(vcpu, nr_irqs);
  1684. if (ret) {
  1685. kvm_err("VGIC: Failed to allocate vcpu memory\n");
  1686. break;
  1687. }
  1688. for (i = 0; i < dist->nr_irqs; i++) {
  1689. if (i < VGIC_NR_PPIS)
  1690. vgic_bitmap_set_irq_val(&dist->irq_enabled,
  1691. vcpu->vcpu_id, i, 1);
  1692. if (i < VGIC_NR_PRIVATE_IRQS)
  1693. vgic_bitmap_set_irq_val(&dist->irq_cfg,
  1694. vcpu->vcpu_id, i,
  1695. VGIC_CFG_EDGE);
  1696. }
  1697. vgic_enable(vcpu);
  1698. }
  1699. out:
  1700. if (ret)
  1701. kvm_vgic_destroy(kvm);
  1702. return ret;
  1703. }
  1704. static int init_vgic_model(struct kvm *kvm, int type)
  1705. {
  1706. switch (type) {
  1707. case KVM_DEV_TYPE_ARM_VGIC_V2:
  1708. vgic_v2_init_emulation(kvm);
  1709. break;
  1710. #ifdef CONFIG_ARM_GIC_V3
  1711. case KVM_DEV_TYPE_ARM_VGIC_V3:
  1712. vgic_v3_init_emulation(kvm);
  1713. break;
  1714. #endif
  1715. default:
  1716. return -ENODEV;
  1717. }
  1718. if (atomic_read(&kvm->online_vcpus) > kvm->arch.max_vcpus)
  1719. return -E2BIG;
  1720. return 0;
  1721. }
  1722. /**
  1723. * kvm_vgic_early_init - Earliest possible vgic initialization stage
  1724. *
  1725. * No memory allocation should be performed here, only static init.
  1726. */
  1727. void kvm_vgic_early_init(struct kvm *kvm)
  1728. {
  1729. spin_lock_init(&kvm->arch.vgic.lock);
  1730. spin_lock_init(&kvm->arch.vgic.irq_phys_map_lock);
  1731. INIT_LIST_HEAD(&kvm->arch.vgic.irq_phys_map_list);
  1732. }
  1733. int kvm_vgic_create(struct kvm *kvm, u32 type)
  1734. {
  1735. int i, vcpu_lock_idx = -1, ret;
  1736. struct kvm_vcpu *vcpu;
  1737. mutex_lock(&kvm->lock);
  1738. if (irqchip_in_kernel(kvm)) {
  1739. ret = -EEXIST;
  1740. goto out;
  1741. }
  1742. /*
  1743. * This function is also called by the KVM_CREATE_IRQCHIP handler,
  1744. * which had no chance yet to check the availability of the GICv2
  1745. * emulation. So check this here again. KVM_CREATE_DEVICE does
  1746. * the proper checks already.
  1747. */
  1748. if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2) {
  1749. ret = -ENODEV;
  1750. goto out;
  1751. }
  1752. /*
  1753. * Any time a vcpu is run, vcpu_load is called which tries to grab the
  1754. * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
  1755. * that no other VCPUs are run while we create the vgic.
  1756. */
  1757. ret = -EBUSY;
  1758. kvm_for_each_vcpu(i, vcpu, kvm) {
  1759. if (!mutex_trylock(&vcpu->mutex))
  1760. goto out_unlock;
  1761. vcpu_lock_idx = i;
  1762. }
  1763. kvm_for_each_vcpu(i, vcpu, kvm) {
  1764. if (vcpu->arch.has_run_once)
  1765. goto out_unlock;
  1766. }
  1767. ret = 0;
  1768. ret = init_vgic_model(kvm, type);
  1769. if (ret)
  1770. goto out_unlock;
  1771. kvm->arch.vgic.in_kernel = true;
  1772. kvm->arch.vgic.vgic_model = type;
  1773. kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
  1774. kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
  1775. kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
  1776. kvm->arch.vgic.vgic_redist_base = VGIC_ADDR_UNDEF;
  1777. out_unlock:
  1778. for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
  1779. vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
  1780. mutex_unlock(&vcpu->mutex);
  1781. }
  1782. out:
  1783. mutex_unlock(&kvm->lock);
  1784. return ret;
  1785. }
  1786. static int vgic_ioaddr_overlap(struct kvm *kvm)
  1787. {
  1788. phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
  1789. phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;
  1790. if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu))
  1791. return 0;
  1792. if ((dist <= cpu && dist + KVM_VGIC_V2_DIST_SIZE > cpu) ||
  1793. (cpu <= dist && cpu + KVM_VGIC_V2_CPU_SIZE > dist))
  1794. return -EBUSY;
  1795. return 0;
  1796. }
  1797. static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
  1798. phys_addr_t addr, phys_addr_t size)
  1799. {
  1800. int ret;
  1801. if (addr & ~KVM_PHYS_MASK)
  1802. return -E2BIG;
  1803. if (addr & (SZ_4K - 1))
  1804. return -EINVAL;
  1805. if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
  1806. return -EEXIST;
  1807. if (addr + size < addr)
  1808. return -EINVAL;
  1809. *ioaddr = addr;
  1810. ret = vgic_ioaddr_overlap(kvm);
  1811. if (ret)
  1812. *ioaddr = VGIC_ADDR_UNDEF;
  1813. return ret;
  1814. }
  1815. /**
  1816. * kvm_vgic_addr - set or get vgic VM base addresses
  1817. * @kvm: pointer to the vm struct
  1818. * @type: the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
  1819. * @addr: pointer to address value
  1820. * @write: if true set the address in the VM address space, if false read the
  1821. * address
  1822. *
  1823. * Set or get the vgic base addresses for the distributor and the virtual CPU
  1824. * interface in the VM physical address space. These addresses are properties
  1825. * of the emulated core/SoC and therefore user space initially knows this
  1826. * information.
  1827. */
  1828. int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
  1829. {
  1830. int r = 0;
  1831. struct vgic_dist *vgic = &kvm->arch.vgic;
  1832. int type_needed;
  1833. phys_addr_t *addr_ptr, block_size;
  1834. phys_addr_t alignment;
  1835. mutex_lock(&kvm->lock);
  1836. switch (type) {
  1837. case KVM_VGIC_V2_ADDR_TYPE_DIST:
  1838. type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
  1839. addr_ptr = &vgic->vgic_dist_base;
  1840. block_size = KVM_VGIC_V2_DIST_SIZE;
  1841. alignment = SZ_4K;
  1842. break;
  1843. case KVM_VGIC_V2_ADDR_TYPE_CPU:
  1844. type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
  1845. addr_ptr = &vgic->vgic_cpu_base;
  1846. block_size = KVM_VGIC_V2_CPU_SIZE;
  1847. alignment = SZ_4K;
  1848. break;
  1849. #ifdef CONFIG_ARM_GIC_V3
  1850. case KVM_VGIC_V3_ADDR_TYPE_DIST:
  1851. type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
  1852. addr_ptr = &vgic->vgic_dist_base;
  1853. block_size = KVM_VGIC_V3_DIST_SIZE;
  1854. alignment = SZ_64K;
  1855. break;
  1856. case KVM_VGIC_V3_ADDR_TYPE_REDIST:
  1857. type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
  1858. addr_ptr = &vgic->vgic_redist_base;
  1859. block_size = KVM_VGIC_V3_REDIST_SIZE;
  1860. alignment = SZ_64K;
  1861. break;
  1862. #endif
  1863. default:
  1864. r = -ENODEV;
  1865. goto out;
  1866. }
  1867. if (vgic->vgic_model != type_needed) {
  1868. r = -ENODEV;
  1869. goto out;
  1870. }
  1871. if (write) {
  1872. if (!IS_ALIGNED(*addr, alignment))
  1873. r = -EINVAL;
  1874. else
  1875. r = vgic_ioaddr_assign(kvm, addr_ptr, *addr,
  1876. block_size);
  1877. } else {
  1878. *addr = *addr_ptr;
  1879. }
  1880. out:
  1881. mutex_unlock(&kvm->lock);
  1882. return r;
  1883. }
  1884. int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
  1885. {
  1886. int r;
  1887. switch (attr->group) {
  1888. case KVM_DEV_ARM_VGIC_GRP_ADDR: {
  1889. u64 __user *uaddr = (u64 __user *)(long)attr->addr;
  1890. u64 addr;
  1891. unsigned long type = (unsigned long)attr->attr;
  1892. if (copy_from_user(&addr, uaddr, sizeof(addr)))
  1893. return -EFAULT;
  1894. r = kvm_vgic_addr(dev->kvm, type, &addr, true);
  1895. return (r == -ENODEV) ? -ENXIO : r;
  1896. }
  1897. case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
  1898. u32 __user *uaddr = (u32 __user *)(long)attr->addr;
  1899. u32 val;
  1900. int ret = 0;
  1901. if (get_user(val, uaddr))
  1902. return -EFAULT;
  1903. /*
  1904. * We require:
  1905. * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
  1906. * - at most 1024 interrupts
  1907. * - a multiple of 32 interrupts
  1908. */
  1909. if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
  1910. val > VGIC_MAX_IRQS ||
  1911. (val & 31))
  1912. return -EINVAL;
  1913. mutex_lock(&dev->kvm->lock);
  1914. if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_irqs)
  1915. ret = -EBUSY;
  1916. else
  1917. dev->kvm->arch.vgic.nr_irqs = val;
  1918. mutex_unlock(&dev->kvm->lock);
  1919. return ret;
  1920. }
  1921. case KVM_DEV_ARM_VGIC_GRP_CTRL: {
  1922. switch (attr->attr) {
  1923. case KVM_DEV_ARM_VGIC_CTRL_INIT:
  1924. r = vgic_init(dev->kvm);
  1925. return r;
  1926. }
  1927. break;
  1928. }
  1929. }
  1930. return -ENXIO;
  1931. }
  1932. int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
  1933. {
  1934. int r = -ENXIO;
  1935. switch (attr->group) {
  1936. case KVM_DEV_ARM_VGIC_GRP_ADDR: {
  1937. u64 __user *uaddr = (u64 __user *)(long)attr->addr;
  1938. u64 addr;
  1939. unsigned long type = (unsigned long)attr->attr;
  1940. r = kvm_vgic_addr(dev->kvm, type, &addr, false);
  1941. if (r)
  1942. return (r == -ENODEV) ? -ENXIO : r;
  1943. if (copy_to_user(uaddr, &addr, sizeof(addr)))
  1944. return -EFAULT;
  1945. break;
  1946. }
  1947. case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
  1948. u32 __user *uaddr = (u32 __user *)(long)attr->addr;
  1949. r = put_user(dev->kvm->arch.vgic.nr_irqs, uaddr);
  1950. break;
  1951. }
  1952. }
  1953. return r;
  1954. }
  1955. int vgic_has_attr_regs(const struct vgic_io_range *ranges, phys_addr_t offset)
  1956. {
  1957. if (vgic_find_range(ranges, 4, offset))
  1958. return 0;
  1959. else
  1960. return -ENXIO;
  1961. }
  1962. static void vgic_init_maintenance_interrupt(void *info)
  1963. {
  1964. enable_percpu_irq(vgic->maint_irq, 0);
  1965. }
  1966. static int vgic_cpu_notify(struct notifier_block *self,
  1967. unsigned long action, void *cpu)
  1968. {
  1969. switch (action) {
  1970. case CPU_STARTING:
  1971. case CPU_STARTING_FROZEN:
  1972. vgic_init_maintenance_interrupt(NULL);
  1973. break;
  1974. case CPU_DYING:
  1975. case CPU_DYING_FROZEN:
  1976. disable_percpu_irq(vgic->maint_irq);
  1977. break;
  1978. }
  1979. return NOTIFY_OK;
  1980. }
  1981. static struct notifier_block vgic_cpu_nb = {
  1982. .notifier_call = vgic_cpu_notify,
  1983. };
  1984. static const struct of_device_id vgic_ids[] = {
  1985. { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, },
  1986. { .compatible = "arm,cortex-a7-gic", .data = vgic_v2_probe, },
  1987. { .compatible = "arm,gic-400", .data = vgic_v2_probe, },
  1988. { .compatible = "arm,gic-v3", .data = vgic_v3_probe, },
  1989. {},
  1990. };
  1991. int kvm_vgic_hyp_init(void)
  1992. {
  1993. const struct of_device_id *matched_id;
  1994. const int (*vgic_probe)(struct device_node *,const struct vgic_ops **,
  1995. const struct vgic_params **);
  1996. struct device_node *vgic_node;
  1997. int ret;
  1998. vgic_node = of_find_matching_node_and_match(NULL,
  1999. vgic_ids, &matched_id);
  2000. if (!vgic_node) {
  2001. kvm_err("error: no compatible GIC node found\n");
  2002. return -ENODEV;
  2003. }
  2004. vgic_probe = matched_id->data;
  2005. ret = vgic_probe(vgic_node, &vgic_ops, &vgic);
  2006. if (ret)
  2007. return ret;
  2008. ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
  2009. "vgic", kvm_get_running_vcpus());
  2010. if (ret) {
  2011. kvm_err("Cannot register interrupt %d\n", vgic->maint_irq);
  2012. return ret;
  2013. }
  2014. ret = __register_cpu_notifier(&vgic_cpu_nb);
  2015. if (ret) {
  2016. kvm_err("Cannot register vgic CPU notifier\n");
  2017. goto out_free_irq;
  2018. }
  2019. on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
  2020. return 0;
  2021. out_free_irq:
  2022. free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
  2023. return ret;
  2024. }
  2025. int kvm_irq_map_gsi(struct kvm *kvm,
  2026. struct kvm_kernel_irq_routing_entry *entries,
  2027. int gsi)
  2028. {
  2029. return 0;
  2030. }
  2031. int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
  2032. {
  2033. return pin;
  2034. }
  2035. int kvm_set_irq(struct kvm *kvm, int irq_source_id,
  2036. u32 irq, int level, bool line_status)
  2037. {
  2038. unsigned int spi = irq + VGIC_NR_PRIVATE_IRQS;
  2039. trace_kvm_set_irq(irq, level, irq_source_id);
  2040. BUG_ON(!vgic_initialized(kvm));
  2041. return kvm_vgic_inject_irq(kvm, 0, spi, level);
  2042. }
  2043. /* MSI not implemented yet */
  2044. int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
  2045. struct kvm *kvm, int irq_source_id,
  2046. int level, bool line_status)
  2047. {
  2048. return 0;
  2049. }