sys_regs.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528
  1. /*
  2. * Copyright (C) 2012,2013 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * Derived from arch/arm/kvm/coproc.c:
  6. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  7. * Authors: Rusty Russell <rusty@rustcorp.com.au>
  8. * Christoffer Dall <c.dall@virtualopensystems.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License, version 2, as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  21. */
  22. #include <linux/mm.h>
  23. #include <linux/kvm_host.h>
  24. #include <linux/uaccess.h>
  25. #include <asm/kvm_arm.h>
  26. #include <asm/kvm_host.h>
  27. #include <asm/kvm_emulate.h>
  28. #include <asm/kvm_coproc.h>
  29. #include <asm/kvm_mmu.h>
  30. #include <asm/cacheflush.h>
  31. #include <asm/cputype.h>
  32. #include <asm/debug-monitors.h>
  33. #include <trace/events/kvm.h>
  34. #include "sys_regs.h"
  35. /*
  36. * All of this file is extremly similar to the ARM coproc.c, but the
  37. * types are different. My gut feeling is that it should be pretty
  38. * easy to merge, but that would be an ABI breakage -- again. VFP
  39. * would also need to be abstracted.
  40. *
  41. * For AArch32, we only take care of what is being trapped. Anything
  42. * that has to do with init and userspace access has to go via the
  43. * 64bit interface.
  44. */
  45. /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
  46. static u32 cache_levels;
  47. /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
  48. #define CSSELR_MAX 12
  49. /* Which cache CCSIDR represents depends on CSSELR value. */
  50. static u32 get_ccsidr(u32 csselr)
  51. {
  52. u32 ccsidr;
  53. /* Make sure noone else changes CSSELR during this! */
  54. local_irq_disable();
  55. /* Put value into CSSELR */
  56. asm volatile("msr csselr_el1, %x0" : : "r" (csselr));
  57. isb();
  58. /* Read result out of CCSIDR */
  59. asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr));
  60. local_irq_enable();
  61. return ccsidr;
  62. }
  63. static void do_dc_cisw(u32 val)
  64. {
  65. asm volatile("dc cisw, %x0" : : "r" (val));
  66. dsb(ish);
  67. }
  68. static void do_dc_csw(u32 val)
  69. {
  70. asm volatile("dc csw, %x0" : : "r" (val));
  71. dsb(ish);
  72. }
  73. /* See note at ARM ARM B1.14.4 */
  74. static bool access_dcsw(struct kvm_vcpu *vcpu,
  75. const struct sys_reg_params *p,
  76. const struct sys_reg_desc *r)
  77. {
  78. unsigned long val;
  79. int cpu;
  80. if (!p->is_write)
  81. return read_from_write_only(vcpu, p);
  82. cpu = get_cpu();
  83. cpumask_setall(&vcpu->arch.require_dcache_flush);
  84. cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
  85. /* If we were already preempted, take the long way around */
  86. if (cpu != vcpu->arch.last_pcpu) {
  87. flush_cache_all();
  88. goto done;
  89. }
  90. val = *vcpu_reg(vcpu, p->Rt);
  91. switch (p->CRm) {
  92. case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
  93. case 14: /* DCCISW */
  94. do_dc_cisw(val);
  95. break;
  96. case 10: /* DCCSW */
  97. do_dc_csw(val);
  98. break;
  99. }
  100. done:
  101. put_cpu();
  102. return true;
  103. }
  104. /*
  105. * Generic accessor for VM registers. Only called as long as HCR_TVM
  106. * is set.
  107. */
  108. static bool access_vm_reg(struct kvm_vcpu *vcpu,
  109. const struct sys_reg_params *p,
  110. const struct sys_reg_desc *r)
  111. {
  112. unsigned long val;
  113. BUG_ON(!p->is_write);
  114. val = *vcpu_reg(vcpu, p->Rt);
  115. if (!p->is_aarch32) {
  116. vcpu_sys_reg(vcpu, r->reg) = val;
  117. } else {
  118. if (!p->is_32bit)
  119. vcpu_cp15_64_high(vcpu, r->reg) = val >> 32;
  120. vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
  121. }
  122. return true;
  123. }
  124. /*
  125. * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set. If the
  126. * guest enables the MMU, we stop trapping the VM sys_regs and leave
  127. * it in complete control of the caches.
  128. */
  129. static bool access_sctlr(struct kvm_vcpu *vcpu,
  130. const struct sys_reg_params *p,
  131. const struct sys_reg_desc *r)
  132. {
  133. access_vm_reg(vcpu, p, r);
  134. if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
  135. vcpu->arch.hcr_el2 &= ~HCR_TVM;
  136. stage2_flush_vm(vcpu->kvm);
  137. }
  138. return true;
  139. }
  140. static bool trap_raz_wi(struct kvm_vcpu *vcpu,
  141. const struct sys_reg_params *p,
  142. const struct sys_reg_desc *r)
  143. {
  144. if (p->is_write)
  145. return ignore_write(vcpu, p);
  146. else
  147. return read_zero(vcpu, p);
  148. }
  149. static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
  150. const struct sys_reg_params *p,
  151. const struct sys_reg_desc *r)
  152. {
  153. if (p->is_write) {
  154. return ignore_write(vcpu, p);
  155. } else {
  156. *vcpu_reg(vcpu, p->Rt) = (1 << 3);
  157. return true;
  158. }
  159. }
  160. static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
  161. const struct sys_reg_params *p,
  162. const struct sys_reg_desc *r)
  163. {
  164. if (p->is_write) {
  165. return ignore_write(vcpu, p);
  166. } else {
  167. u32 val;
  168. asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
  169. *vcpu_reg(vcpu, p->Rt) = val;
  170. return true;
  171. }
  172. }
  173. /*
  174. * We want to avoid world-switching all the DBG registers all the
  175. * time:
  176. *
  177. * - If we've touched any debug register, it is likely that we're
  178. * going to touch more of them. It then makes sense to disable the
  179. * traps and start doing the save/restore dance
  180. * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
  181. * then mandatory to save/restore the registers, as the guest
  182. * depends on them.
  183. *
  184. * For this, we use a DIRTY bit, indicating the guest has modified the
  185. * debug registers, used as follow:
  186. *
  187. * On guest entry:
  188. * - If the dirty bit is set (because we're coming back from trapping),
  189. * disable the traps, save host registers, restore guest registers.
  190. * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
  191. * set the dirty bit, disable the traps, save host registers,
  192. * restore guest registers.
  193. * - Otherwise, enable the traps
  194. *
  195. * On guest exit:
  196. * - If the dirty bit is set, save guest registers, restore host
  197. * registers and clear the dirty bit. This ensure that the host can
  198. * now use the debug registers.
  199. */
  200. static bool trap_debug_regs(struct kvm_vcpu *vcpu,
  201. const struct sys_reg_params *p,
  202. const struct sys_reg_desc *r)
  203. {
  204. if (p->is_write) {
  205. vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
  206. vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
  207. } else {
  208. *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
  209. }
  210. return true;
  211. }
  212. static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
  213. {
  214. u64 amair;
  215. asm volatile("mrs %0, amair_el1\n" : "=r" (amair));
  216. vcpu_sys_reg(vcpu, AMAIR_EL1) = amair;
  217. }
  218. static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
  219. {
  220. /*
  221. * Simply map the vcpu_id into the Aff0 field of the MPIDR.
  222. */
  223. vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff);
  224. }
  225. /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
  226. #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
  227. /* DBGBVRn_EL1 */ \
  228. { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \
  229. trap_debug_regs, reset_val, (DBGBVR0_EL1 + (n)), 0 }, \
  230. /* DBGBCRn_EL1 */ \
  231. { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \
  232. trap_debug_regs, reset_val, (DBGBCR0_EL1 + (n)), 0 }, \
  233. /* DBGWVRn_EL1 */ \
  234. { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \
  235. trap_debug_regs, reset_val, (DBGWVR0_EL1 + (n)), 0 }, \
  236. /* DBGWCRn_EL1 */ \
  237. { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \
  238. trap_debug_regs, reset_val, (DBGWCR0_EL1 + (n)), 0 }
  239. /*
  240. * Architected system registers.
  241. * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
  242. *
  243. * We could trap ID_DFR0 and tell the guest we don't support performance
  244. * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
  245. * NAKed, so it will read the PMCR anyway.
  246. *
  247. * Therefore we tell the guest we have 0 counters. Unfortunately, we
  248. * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
  249. * all PM registers, which doesn't crash the guest kernel at least.
  250. *
  251. * Debug handling: We do trap most, if not all debug related system
  252. * registers. The implementation is good enough to ensure that a guest
  253. * can use these with minimal performance degradation. The drawback is
  254. * that we don't implement any of the external debug, none of the
  255. * OSlock protocol. This should be revisited if we ever encounter a
  256. * more demanding guest...
  257. */
  258. static const struct sys_reg_desc sys_reg_descs[] = {
  259. /* DC ISW */
  260. { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010),
  261. access_dcsw },
  262. /* DC CSW */
  263. { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010),
  264. access_dcsw },
  265. /* DC CISW */
  266. { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
  267. access_dcsw },
  268. DBG_BCR_BVR_WCR_WVR_EL1(0),
  269. DBG_BCR_BVR_WCR_WVR_EL1(1),
  270. /* MDCCINT_EL1 */
  271. { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
  272. trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
  273. /* MDSCR_EL1 */
  274. { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
  275. trap_debug_regs, reset_val, MDSCR_EL1, 0 },
  276. DBG_BCR_BVR_WCR_WVR_EL1(2),
  277. DBG_BCR_BVR_WCR_WVR_EL1(3),
  278. DBG_BCR_BVR_WCR_WVR_EL1(4),
  279. DBG_BCR_BVR_WCR_WVR_EL1(5),
  280. DBG_BCR_BVR_WCR_WVR_EL1(6),
  281. DBG_BCR_BVR_WCR_WVR_EL1(7),
  282. DBG_BCR_BVR_WCR_WVR_EL1(8),
  283. DBG_BCR_BVR_WCR_WVR_EL1(9),
  284. DBG_BCR_BVR_WCR_WVR_EL1(10),
  285. DBG_BCR_BVR_WCR_WVR_EL1(11),
  286. DBG_BCR_BVR_WCR_WVR_EL1(12),
  287. DBG_BCR_BVR_WCR_WVR_EL1(13),
  288. DBG_BCR_BVR_WCR_WVR_EL1(14),
  289. DBG_BCR_BVR_WCR_WVR_EL1(15),
  290. /* MDRAR_EL1 */
  291. { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
  292. trap_raz_wi },
  293. /* OSLAR_EL1 */
  294. { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100),
  295. trap_raz_wi },
  296. /* OSLSR_EL1 */
  297. { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100),
  298. trap_oslsr_el1 },
  299. /* OSDLR_EL1 */
  300. { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100),
  301. trap_raz_wi },
  302. /* DBGPRCR_EL1 */
  303. { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100),
  304. trap_raz_wi },
  305. /* DBGCLAIMSET_EL1 */
  306. { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110),
  307. trap_raz_wi },
  308. /* DBGCLAIMCLR_EL1 */
  309. { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110),
  310. trap_raz_wi },
  311. /* DBGAUTHSTATUS_EL1 */
  312. { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
  313. trap_dbgauthstatus_el1 },
  314. /* TEECR32_EL1 */
  315. { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
  316. NULL, reset_val, TEECR32_EL1, 0 },
  317. /* TEEHBR32_EL1 */
  318. { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000),
  319. NULL, reset_val, TEEHBR32_EL1, 0 },
  320. /* MDCCSR_EL1 */
  321. { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
  322. trap_raz_wi },
  323. /* DBGDTR_EL0 */
  324. { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000),
  325. trap_raz_wi },
  326. /* DBGDTR[TR]X_EL0 */
  327. { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000),
  328. trap_raz_wi },
  329. /* DBGVCR32_EL2 */
  330. { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
  331. NULL, reset_val, DBGVCR32_EL2, 0 },
  332. /* MPIDR_EL1 */
  333. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101),
  334. NULL, reset_mpidr, MPIDR_EL1 },
  335. /* SCTLR_EL1 */
  336. { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
  337. access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 },
  338. /* CPACR_EL1 */
  339. { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
  340. NULL, reset_val, CPACR_EL1, 0 },
  341. /* TTBR0_EL1 */
  342. { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
  343. access_vm_reg, reset_unknown, TTBR0_EL1 },
  344. /* TTBR1_EL1 */
  345. { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
  346. access_vm_reg, reset_unknown, TTBR1_EL1 },
  347. /* TCR_EL1 */
  348. { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
  349. access_vm_reg, reset_val, TCR_EL1, 0 },
  350. /* AFSR0_EL1 */
  351. { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
  352. access_vm_reg, reset_unknown, AFSR0_EL1 },
  353. /* AFSR1_EL1 */
  354. { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
  355. access_vm_reg, reset_unknown, AFSR1_EL1 },
  356. /* ESR_EL1 */
  357. { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
  358. access_vm_reg, reset_unknown, ESR_EL1 },
  359. /* FAR_EL1 */
  360. { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
  361. access_vm_reg, reset_unknown, FAR_EL1 },
  362. /* PAR_EL1 */
  363. { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
  364. NULL, reset_unknown, PAR_EL1 },
  365. /* PMINTENSET_EL1 */
  366. { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
  367. trap_raz_wi },
  368. /* PMINTENCLR_EL1 */
  369. { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
  370. trap_raz_wi },
  371. /* MAIR_EL1 */
  372. { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
  373. access_vm_reg, reset_unknown, MAIR_EL1 },
  374. /* AMAIR_EL1 */
  375. { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
  376. access_vm_reg, reset_amair_el1, AMAIR_EL1 },
  377. /* VBAR_EL1 */
  378. { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
  379. NULL, reset_val, VBAR_EL1, 0 },
  380. /* CONTEXTIDR_EL1 */
  381. { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
  382. access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
  383. /* TPIDR_EL1 */
  384. { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
  385. NULL, reset_unknown, TPIDR_EL1 },
  386. /* CNTKCTL_EL1 */
  387. { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000),
  388. NULL, reset_val, CNTKCTL_EL1, 0},
  389. /* CSSELR_EL1 */
  390. { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
  391. NULL, reset_unknown, CSSELR_EL1 },
  392. /* PMCR_EL0 */
  393. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
  394. trap_raz_wi },
  395. /* PMCNTENSET_EL0 */
  396. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
  397. trap_raz_wi },
  398. /* PMCNTENCLR_EL0 */
  399. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
  400. trap_raz_wi },
  401. /* PMOVSCLR_EL0 */
  402. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
  403. trap_raz_wi },
  404. /* PMSWINC_EL0 */
  405. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
  406. trap_raz_wi },
  407. /* PMSELR_EL0 */
  408. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
  409. trap_raz_wi },
  410. /* PMCEID0_EL0 */
  411. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
  412. trap_raz_wi },
  413. /* PMCEID1_EL0 */
  414. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
  415. trap_raz_wi },
  416. /* PMCCNTR_EL0 */
  417. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
  418. trap_raz_wi },
  419. /* PMXEVTYPER_EL0 */
  420. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
  421. trap_raz_wi },
  422. /* PMXEVCNTR_EL0 */
  423. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
  424. trap_raz_wi },
  425. /* PMUSERENR_EL0 */
  426. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
  427. trap_raz_wi },
  428. /* PMOVSSET_EL0 */
  429. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
  430. trap_raz_wi },
  431. /* TPIDR_EL0 */
  432. { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
  433. NULL, reset_unknown, TPIDR_EL0 },
  434. /* TPIDRRO_EL0 */
  435. { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
  436. NULL, reset_unknown, TPIDRRO_EL0 },
  437. /* DACR32_EL2 */
  438. { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
  439. NULL, reset_unknown, DACR32_EL2 },
  440. /* IFSR32_EL2 */
  441. { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001),
  442. NULL, reset_unknown, IFSR32_EL2 },
  443. /* FPEXC32_EL2 */
  444. { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000),
  445. NULL, reset_val, FPEXC32_EL2, 0x70 },
  446. };
  447. static bool trap_dbgidr(struct kvm_vcpu *vcpu,
  448. const struct sys_reg_params *p,
  449. const struct sys_reg_desc *r)
  450. {
  451. if (p->is_write) {
  452. return ignore_write(vcpu, p);
  453. } else {
  454. u64 dfr = read_cpuid(ID_AA64DFR0_EL1);
  455. u64 pfr = read_cpuid(ID_AA64PFR0_EL1);
  456. u32 el3 = !!((pfr >> 12) & 0xf);
  457. *vcpu_reg(vcpu, p->Rt) = ((((dfr >> 20) & 0xf) << 28) |
  458. (((dfr >> 12) & 0xf) << 24) |
  459. (((dfr >> 28) & 0xf) << 20) |
  460. (6 << 16) | (el3 << 14) | (el3 << 12));
  461. return true;
  462. }
  463. }
  464. static bool trap_debug32(struct kvm_vcpu *vcpu,
  465. const struct sys_reg_params *p,
  466. const struct sys_reg_desc *r)
  467. {
  468. if (p->is_write) {
  469. vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
  470. vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
  471. } else {
  472. *vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg);
  473. }
  474. return true;
  475. }
  476. #define DBG_BCR_BVR_WCR_WVR(n) \
  477. /* DBGBVRn */ \
  478. { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_debug32, \
  479. NULL, (cp14_DBGBVR0 + (n) * 2) }, \
  480. /* DBGBCRn */ \
  481. { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_debug32, \
  482. NULL, (cp14_DBGBCR0 + (n) * 2) }, \
  483. /* DBGWVRn */ \
  484. { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_debug32, \
  485. NULL, (cp14_DBGWVR0 + (n) * 2) }, \
  486. /* DBGWCRn */ \
  487. { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_debug32, \
  488. NULL, (cp14_DBGWCR0 + (n) * 2) }
  489. #define DBGBXVR(n) \
  490. { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_debug32, \
  491. NULL, cp14_DBGBXVR0 + n * 2 }
  492. /*
  493. * Trapped cp14 registers. We generally ignore most of the external
  494. * debug, on the principle that they don't really make sense to a
  495. * guest. Revisit this one day, whould this principle change.
  496. */
  497. static const struct sys_reg_desc cp14_regs[] = {
  498. /* DBGIDR */
  499. { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
  500. /* DBGDTRRXext */
  501. { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
  502. DBG_BCR_BVR_WCR_WVR(0),
  503. /* DBGDSCRint */
  504. { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
  505. DBG_BCR_BVR_WCR_WVR(1),
  506. /* DBGDCCINT */
  507. { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
  508. /* DBGDSCRext */
  509. { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
  510. DBG_BCR_BVR_WCR_WVR(2),
  511. /* DBGDTR[RT]Xint */
  512. { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
  513. /* DBGDTR[RT]Xext */
  514. { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
  515. DBG_BCR_BVR_WCR_WVR(3),
  516. DBG_BCR_BVR_WCR_WVR(4),
  517. DBG_BCR_BVR_WCR_WVR(5),
  518. /* DBGWFAR */
  519. { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
  520. /* DBGOSECCR */
  521. { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
  522. DBG_BCR_BVR_WCR_WVR(6),
  523. /* DBGVCR */
  524. { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
  525. DBG_BCR_BVR_WCR_WVR(7),
  526. DBG_BCR_BVR_WCR_WVR(8),
  527. DBG_BCR_BVR_WCR_WVR(9),
  528. DBG_BCR_BVR_WCR_WVR(10),
  529. DBG_BCR_BVR_WCR_WVR(11),
  530. DBG_BCR_BVR_WCR_WVR(12),
  531. DBG_BCR_BVR_WCR_WVR(13),
  532. DBG_BCR_BVR_WCR_WVR(14),
  533. DBG_BCR_BVR_WCR_WVR(15),
  534. /* DBGDRAR (32bit) */
  535. { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
  536. DBGBXVR(0),
  537. /* DBGOSLAR */
  538. { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
  539. DBGBXVR(1),
  540. /* DBGOSLSR */
  541. { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
  542. DBGBXVR(2),
  543. DBGBXVR(3),
  544. /* DBGOSDLR */
  545. { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
  546. DBGBXVR(4),
  547. /* DBGPRCR */
  548. { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
  549. DBGBXVR(5),
  550. DBGBXVR(6),
  551. DBGBXVR(7),
  552. DBGBXVR(8),
  553. DBGBXVR(9),
  554. DBGBXVR(10),
  555. DBGBXVR(11),
  556. DBGBXVR(12),
  557. DBGBXVR(13),
  558. DBGBXVR(14),
  559. DBGBXVR(15),
  560. /* DBGDSAR (32bit) */
  561. { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
  562. /* DBGDEVID2 */
  563. { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
  564. /* DBGDEVID1 */
  565. { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
  566. /* DBGDEVID */
  567. { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
  568. /* DBGCLAIMSET */
  569. { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
  570. /* DBGCLAIMCLR */
  571. { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
  572. /* DBGAUTHSTATUS */
  573. { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
  574. };
  575. /* Trapped cp14 64bit registers */
  576. static const struct sys_reg_desc cp14_64_regs[] = {
  577. /* DBGDRAR (64bit) */
  578. { Op1( 0), CRm( 1), .access = trap_raz_wi },
  579. /* DBGDSAR (64bit) */
  580. { Op1( 0), CRm( 2), .access = trap_raz_wi },
  581. };
  582. /*
  583. * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
  584. * depending on the way they are accessed (as a 32bit or a 64bit
  585. * register).
  586. */
  587. static const struct sys_reg_desc cp15_regs[] = {
  588. { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR },
  589. { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
  590. { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
  591. { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
  592. { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
  593. { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
  594. { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
  595. { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
  596. { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
  597. { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
  598. { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
  599. /*
  600. * DC{C,I,CI}SW operations:
  601. */
  602. { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
  603. { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
  604. { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
  605. /* PMU */
  606. { Op1( 0), CRn( 9), CRm(12), Op2( 0), trap_raz_wi },
  607. { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi },
  608. { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi },
  609. { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi },
  610. { Op1( 0), CRn( 9), CRm(12), Op2( 5), trap_raz_wi },
  611. { Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi },
  612. { Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi },
  613. { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi },
  614. { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi },
  615. { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi },
  616. { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi },
  617. { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi },
  618. { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi },
  619. { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
  620. { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
  621. { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
  622. { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
  623. { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
  624. };
  625. static const struct sys_reg_desc cp15_64_regs[] = {
  626. { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
  627. { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
  628. };
  629. /* Target specific emulation tables */
  630. static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
  631. void kvm_register_target_sys_reg_table(unsigned int target,
  632. struct kvm_sys_reg_target_table *table)
  633. {
  634. target_tables[target] = table;
  635. }
  636. /* Get specific register table for this target. */
  637. static const struct sys_reg_desc *get_target_table(unsigned target,
  638. bool mode_is_64,
  639. size_t *num)
  640. {
  641. struct kvm_sys_reg_target_table *table;
  642. table = target_tables[target];
  643. if (mode_is_64) {
  644. *num = table->table64.num;
  645. return table->table64.table;
  646. } else {
  647. *num = table->table32.num;
  648. return table->table32.table;
  649. }
  650. }
  651. static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
  652. const struct sys_reg_desc table[],
  653. unsigned int num)
  654. {
  655. unsigned int i;
  656. for (i = 0; i < num; i++) {
  657. const struct sys_reg_desc *r = &table[i];
  658. if (params->Op0 != r->Op0)
  659. continue;
  660. if (params->Op1 != r->Op1)
  661. continue;
  662. if (params->CRn != r->CRn)
  663. continue;
  664. if (params->CRm != r->CRm)
  665. continue;
  666. if (params->Op2 != r->Op2)
  667. continue;
  668. return r;
  669. }
  670. return NULL;
  671. }
  672. int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
  673. {
  674. kvm_inject_undefined(vcpu);
  675. return 1;
  676. }
  677. /*
  678. * emulate_cp -- tries to match a sys_reg access in a handling table, and
  679. * call the corresponding trap handler.
  680. *
  681. * @params: pointer to the descriptor of the access
  682. * @table: array of trap descriptors
  683. * @num: size of the trap descriptor array
  684. *
  685. * Return 0 if the access has been handled, and -1 if not.
  686. */
  687. static int emulate_cp(struct kvm_vcpu *vcpu,
  688. const struct sys_reg_params *params,
  689. const struct sys_reg_desc *table,
  690. size_t num)
  691. {
  692. const struct sys_reg_desc *r;
  693. if (!table)
  694. return -1; /* Not handled */
  695. r = find_reg(params, table, num);
  696. if (r) {
  697. /*
  698. * Not having an accessor means that we have
  699. * configured a trap that we don't know how to
  700. * handle. This certainly qualifies as a gross bug
  701. * that should be fixed right away.
  702. */
  703. BUG_ON(!r->access);
  704. if (likely(r->access(vcpu, params, r))) {
  705. /* Skip instruction, since it was emulated */
  706. kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
  707. }
  708. /* Handled */
  709. return 0;
  710. }
  711. /* Not handled */
  712. return -1;
  713. }
  714. static void unhandled_cp_access(struct kvm_vcpu *vcpu,
  715. struct sys_reg_params *params)
  716. {
  717. u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
  718. int cp;
  719. switch(hsr_ec) {
  720. case ESR_EL2_EC_CP15_32:
  721. case ESR_EL2_EC_CP15_64:
  722. cp = 15;
  723. break;
  724. case ESR_EL2_EC_CP14_MR:
  725. case ESR_EL2_EC_CP14_64:
  726. cp = 14;
  727. break;
  728. default:
  729. WARN_ON((cp = -1));
  730. }
  731. kvm_err("Unsupported guest CP%d access at: %08lx\n",
  732. cp, *vcpu_pc(vcpu));
  733. print_sys_reg_instr(params);
  734. kvm_inject_undefined(vcpu);
  735. }
  736. /**
  737. * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access
  738. * @vcpu: The VCPU pointer
  739. * @run: The kvm_run struct
  740. */
  741. static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
  742. const struct sys_reg_desc *global,
  743. size_t nr_global,
  744. const struct sys_reg_desc *target_specific,
  745. size_t nr_specific)
  746. {
  747. struct sys_reg_params params;
  748. u32 hsr = kvm_vcpu_get_hsr(vcpu);
  749. int Rt2 = (hsr >> 10) & 0xf;
  750. params.is_aarch32 = true;
  751. params.is_32bit = false;
  752. params.CRm = (hsr >> 1) & 0xf;
  753. params.Rt = (hsr >> 5) & 0xf;
  754. params.is_write = ((hsr & 1) == 0);
  755. params.Op0 = 0;
  756. params.Op1 = (hsr >> 16) & 0xf;
  757. params.Op2 = 0;
  758. params.CRn = 0;
  759. /*
  760. * Massive hack here. Store Rt2 in the top 32bits so we only
  761. * have one register to deal with. As we use the same trap
  762. * backends between AArch32 and AArch64, we get away with it.
  763. */
  764. if (params.is_write) {
  765. u64 val = *vcpu_reg(vcpu, params.Rt);
  766. val &= 0xffffffff;
  767. val |= *vcpu_reg(vcpu, Rt2) << 32;
  768. *vcpu_reg(vcpu, params.Rt) = val;
  769. }
  770. if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
  771. goto out;
  772. if (!emulate_cp(vcpu, &params, global, nr_global))
  773. goto out;
  774. unhandled_cp_access(vcpu, &params);
  775. out:
  776. /* Do the opposite hack for the read side */
  777. if (!params.is_write) {
  778. u64 val = *vcpu_reg(vcpu, params.Rt);
  779. val >>= 32;
  780. *vcpu_reg(vcpu, Rt2) = val;
  781. }
  782. return 1;
  783. }
  784. /**
  785. * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
  786. * @vcpu: The VCPU pointer
  787. * @run: The kvm_run struct
  788. */
  789. static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
  790. const struct sys_reg_desc *global,
  791. size_t nr_global,
  792. const struct sys_reg_desc *target_specific,
  793. size_t nr_specific)
  794. {
  795. struct sys_reg_params params;
  796. u32 hsr = kvm_vcpu_get_hsr(vcpu);
  797. params.is_aarch32 = true;
  798. params.is_32bit = true;
  799. params.CRm = (hsr >> 1) & 0xf;
  800. params.Rt = (hsr >> 5) & 0xf;
  801. params.is_write = ((hsr & 1) == 0);
  802. params.CRn = (hsr >> 10) & 0xf;
  803. params.Op0 = 0;
  804. params.Op1 = (hsr >> 14) & 0x7;
  805. params.Op2 = (hsr >> 17) & 0x7;
  806. if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
  807. return 1;
  808. if (!emulate_cp(vcpu, &params, global, nr_global))
  809. return 1;
  810. unhandled_cp_access(vcpu, &params);
  811. return 1;
  812. }
  813. int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
  814. {
  815. const struct sys_reg_desc *target_specific;
  816. size_t num;
  817. target_specific = get_target_table(vcpu->arch.target, false, &num);
  818. return kvm_handle_cp_64(vcpu,
  819. cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
  820. target_specific, num);
  821. }
  822. int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
  823. {
  824. const struct sys_reg_desc *target_specific;
  825. size_t num;
  826. target_specific = get_target_table(vcpu->arch.target, false, &num);
  827. return kvm_handle_cp_32(vcpu,
  828. cp15_regs, ARRAY_SIZE(cp15_regs),
  829. target_specific, num);
  830. }
  831. int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
  832. {
  833. return kvm_handle_cp_64(vcpu,
  834. cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
  835. NULL, 0);
  836. }
  837. int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
  838. {
  839. return kvm_handle_cp_32(vcpu,
  840. cp14_regs, ARRAY_SIZE(cp14_regs),
  841. NULL, 0);
  842. }
  843. static int emulate_sys_reg(struct kvm_vcpu *vcpu,
  844. const struct sys_reg_params *params)
  845. {
  846. size_t num;
  847. const struct sys_reg_desc *table, *r;
  848. table = get_target_table(vcpu->arch.target, true, &num);
  849. /* Search target-specific then generic table. */
  850. r = find_reg(params, table, num);
  851. if (!r)
  852. r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
  853. if (likely(r)) {
  854. /*
  855. * Not having an accessor means that we have
  856. * configured a trap that we don't know how to
  857. * handle. This certainly qualifies as a gross bug
  858. * that should be fixed right away.
  859. */
  860. BUG_ON(!r->access);
  861. if (likely(r->access(vcpu, params, r))) {
  862. /* Skip instruction, since it was emulated */
  863. kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
  864. return 1;
  865. }
  866. /* If access function fails, it should complain. */
  867. } else {
  868. kvm_err("Unsupported guest sys_reg access at: %lx\n",
  869. *vcpu_pc(vcpu));
  870. print_sys_reg_instr(params);
  871. }
  872. kvm_inject_undefined(vcpu);
  873. return 1;
  874. }
  875. static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
  876. const struct sys_reg_desc *table, size_t num)
  877. {
  878. unsigned long i;
  879. for (i = 0; i < num; i++)
  880. if (table[i].reset)
  881. table[i].reset(vcpu, &table[i]);
  882. }
  883. /**
  884. * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
  885. * @vcpu: The VCPU pointer
  886. * @run: The kvm_run struct
  887. */
  888. int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
  889. {
  890. struct sys_reg_params params;
  891. unsigned long esr = kvm_vcpu_get_hsr(vcpu);
  892. params.is_aarch32 = false;
  893. params.is_32bit = false;
  894. params.Op0 = (esr >> 20) & 3;
  895. params.Op1 = (esr >> 14) & 0x7;
  896. params.CRn = (esr >> 10) & 0xf;
  897. params.CRm = (esr >> 1) & 0xf;
  898. params.Op2 = (esr >> 17) & 0x7;
  899. params.Rt = (esr >> 5) & 0x1f;
  900. params.is_write = !(esr & 1);
  901. return emulate_sys_reg(vcpu, &params);
  902. }
  903. /******************************************************************************
  904. * Userspace API
  905. *****************************************************************************/
  906. static bool index_to_params(u64 id, struct sys_reg_params *params)
  907. {
  908. switch (id & KVM_REG_SIZE_MASK) {
  909. case KVM_REG_SIZE_U64:
  910. /* Any unused index bits means it's not valid. */
  911. if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
  912. | KVM_REG_ARM_COPROC_MASK
  913. | KVM_REG_ARM64_SYSREG_OP0_MASK
  914. | KVM_REG_ARM64_SYSREG_OP1_MASK
  915. | KVM_REG_ARM64_SYSREG_CRN_MASK
  916. | KVM_REG_ARM64_SYSREG_CRM_MASK
  917. | KVM_REG_ARM64_SYSREG_OP2_MASK))
  918. return false;
  919. params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
  920. >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
  921. params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
  922. >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
  923. params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
  924. >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
  925. params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
  926. >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
  927. params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
  928. >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
  929. return true;
  930. default:
  931. return false;
  932. }
  933. }
  934. /* Decode an index value, and find the sys_reg_desc entry. */
  935. static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
  936. u64 id)
  937. {
  938. size_t num;
  939. const struct sys_reg_desc *table, *r;
  940. struct sys_reg_params params;
  941. /* We only do sys_reg for now. */
  942. if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
  943. return NULL;
  944. if (!index_to_params(id, &params))
  945. return NULL;
  946. table = get_target_table(vcpu->arch.target, true, &num);
  947. r = find_reg(&params, table, num);
  948. if (!r)
  949. r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
  950. /* Not saved in the sys_reg array? */
  951. if (r && !r->reg)
  952. r = NULL;
  953. return r;
  954. }
  955. /*
  956. * These are the invariant sys_reg registers: we let the guest see the
  957. * host versions of these, so they're part of the guest state.
  958. *
  959. * A future CPU may provide a mechanism to present different values to
  960. * the guest, or a future kvm may trap them.
  961. */
  962. #define FUNCTION_INVARIANT(reg) \
  963. static void get_##reg(struct kvm_vcpu *v, \
  964. const struct sys_reg_desc *r) \
  965. { \
  966. u64 val; \
  967. \
  968. asm volatile("mrs %0, " __stringify(reg) "\n" \
  969. : "=r" (val)); \
  970. ((struct sys_reg_desc *)r)->val = val; \
  971. }
  972. FUNCTION_INVARIANT(midr_el1)
  973. FUNCTION_INVARIANT(ctr_el0)
  974. FUNCTION_INVARIANT(revidr_el1)
  975. FUNCTION_INVARIANT(id_pfr0_el1)
  976. FUNCTION_INVARIANT(id_pfr1_el1)
  977. FUNCTION_INVARIANT(id_dfr0_el1)
  978. FUNCTION_INVARIANT(id_afr0_el1)
  979. FUNCTION_INVARIANT(id_mmfr0_el1)
  980. FUNCTION_INVARIANT(id_mmfr1_el1)
  981. FUNCTION_INVARIANT(id_mmfr2_el1)
  982. FUNCTION_INVARIANT(id_mmfr3_el1)
  983. FUNCTION_INVARIANT(id_isar0_el1)
  984. FUNCTION_INVARIANT(id_isar1_el1)
  985. FUNCTION_INVARIANT(id_isar2_el1)
  986. FUNCTION_INVARIANT(id_isar3_el1)
  987. FUNCTION_INVARIANT(id_isar4_el1)
  988. FUNCTION_INVARIANT(id_isar5_el1)
  989. FUNCTION_INVARIANT(clidr_el1)
  990. FUNCTION_INVARIANT(aidr_el1)
  991. /* ->val is filled in by kvm_sys_reg_table_init() */
  992. static struct sys_reg_desc invariant_sys_regs[] = {
  993. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000),
  994. NULL, get_midr_el1 },
  995. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110),
  996. NULL, get_revidr_el1 },
  997. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000),
  998. NULL, get_id_pfr0_el1 },
  999. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001),
  1000. NULL, get_id_pfr1_el1 },
  1001. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010),
  1002. NULL, get_id_dfr0_el1 },
  1003. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011),
  1004. NULL, get_id_afr0_el1 },
  1005. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100),
  1006. NULL, get_id_mmfr0_el1 },
  1007. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101),
  1008. NULL, get_id_mmfr1_el1 },
  1009. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110),
  1010. NULL, get_id_mmfr2_el1 },
  1011. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111),
  1012. NULL, get_id_mmfr3_el1 },
  1013. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
  1014. NULL, get_id_isar0_el1 },
  1015. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001),
  1016. NULL, get_id_isar1_el1 },
  1017. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
  1018. NULL, get_id_isar2_el1 },
  1019. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011),
  1020. NULL, get_id_isar3_el1 },
  1021. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100),
  1022. NULL, get_id_isar4_el1 },
  1023. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101),
  1024. NULL, get_id_isar5_el1 },
  1025. { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001),
  1026. NULL, get_clidr_el1 },
  1027. { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111),
  1028. NULL, get_aidr_el1 },
  1029. { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001),
  1030. NULL, get_ctr_el0 },
  1031. };
  1032. static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
  1033. {
  1034. if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
  1035. return -EFAULT;
  1036. return 0;
  1037. }
  1038. static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
  1039. {
  1040. if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
  1041. return -EFAULT;
  1042. return 0;
  1043. }
  1044. static int get_invariant_sys_reg(u64 id, void __user *uaddr)
  1045. {
  1046. struct sys_reg_params params;
  1047. const struct sys_reg_desc *r;
  1048. if (!index_to_params(id, &params))
  1049. return -ENOENT;
  1050. r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
  1051. if (!r)
  1052. return -ENOENT;
  1053. return reg_to_user(uaddr, &r->val, id);
  1054. }
  1055. static int set_invariant_sys_reg(u64 id, void __user *uaddr)
  1056. {
  1057. struct sys_reg_params params;
  1058. const struct sys_reg_desc *r;
  1059. int err;
  1060. u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
  1061. if (!index_to_params(id, &params))
  1062. return -ENOENT;
  1063. r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
  1064. if (!r)
  1065. return -ENOENT;
  1066. err = reg_from_user(&val, uaddr, id);
  1067. if (err)
  1068. return err;
  1069. /* This is what we mean by invariant: you can't change it. */
  1070. if (r->val != val)
  1071. return -EINVAL;
  1072. return 0;
  1073. }
  1074. static bool is_valid_cache(u32 val)
  1075. {
  1076. u32 level, ctype;
  1077. if (val >= CSSELR_MAX)
  1078. return false;
  1079. /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
  1080. level = (val >> 1);
  1081. ctype = (cache_levels >> (level * 3)) & 7;
  1082. switch (ctype) {
  1083. case 0: /* No cache */
  1084. return false;
  1085. case 1: /* Instruction cache only */
  1086. return (val & 1);
  1087. case 2: /* Data cache only */
  1088. case 4: /* Unified cache */
  1089. return !(val & 1);
  1090. case 3: /* Separate instruction and data caches */
  1091. return true;
  1092. default: /* Reserved: we can't know instruction or data. */
  1093. return false;
  1094. }
  1095. }
  1096. static int demux_c15_get(u64 id, void __user *uaddr)
  1097. {
  1098. u32 val;
  1099. u32 __user *uval = uaddr;
  1100. /* Fail if we have unknown bits set. */
  1101. if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
  1102. | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
  1103. return -ENOENT;
  1104. switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
  1105. case KVM_REG_ARM_DEMUX_ID_CCSIDR:
  1106. if (KVM_REG_SIZE(id) != 4)
  1107. return -ENOENT;
  1108. val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
  1109. >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
  1110. if (!is_valid_cache(val))
  1111. return -ENOENT;
  1112. return put_user(get_ccsidr(val), uval);
  1113. default:
  1114. return -ENOENT;
  1115. }
  1116. }
  1117. static int demux_c15_set(u64 id, void __user *uaddr)
  1118. {
  1119. u32 val, newval;
  1120. u32 __user *uval = uaddr;
  1121. /* Fail if we have unknown bits set. */
  1122. if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
  1123. | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
  1124. return -ENOENT;
  1125. switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
  1126. case KVM_REG_ARM_DEMUX_ID_CCSIDR:
  1127. if (KVM_REG_SIZE(id) != 4)
  1128. return -ENOENT;
  1129. val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
  1130. >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
  1131. if (!is_valid_cache(val))
  1132. return -ENOENT;
  1133. if (get_user(newval, uval))
  1134. return -EFAULT;
  1135. /* This is also invariant: you can't change it. */
  1136. if (newval != get_ccsidr(val))
  1137. return -EINVAL;
  1138. return 0;
  1139. default:
  1140. return -ENOENT;
  1141. }
  1142. }
  1143. int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  1144. {
  1145. const struct sys_reg_desc *r;
  1146. void __user *uaddr = (void __user *)(unsigned long)reg->addr;
  1147. if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
  1148. return demux_c15_get(reg->id, uaddr);
  1149. if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
  1150. return -ENOENT;
  1151. r = index_to_sys_reg_desc(vcpu, reg->id);
  1152. if (!r)
  1153. return get_invariant_sys_reg(reg->id, uaddr);
  1154. return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
  1155. }
  1156. int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  1157. {
  1158. const struct sys_reg_desc *r;
  1159. void __user *uaddr = (void __user *)(unsigned long)reg->addr;
  1160. if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
  1161. return demux_c15_set(reg->id, uaddr);
  1162. if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
  1163. return -ENOENT;
  1164. r = index_to_sys_reg_desc(vcpu, reg->id);
  1165. if (!r)
  1166. return set_invariant_sys_reg(reg->id, uaddr);
  1167. return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
  1168. }
  1169. static unsigned int num_demux_regs(void)
  1170. {
  1171. unsigned int i, count = 0;
  1172. for (i = 0; i < CSSELR_MAX; i++)
  1173. if (is_valid_cache(i))
  1174. count++;
  1175. return count;
  1176. }
  1177. static int write_demux_regids(u64 __user *uindices)
  1178. {
  1179. u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
  1180. unsigned int i;
  1181. val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
  1182. for (i = 0; i < CSSELR_MAX; i++) {
  1183. if (!is_valid_cache(i))
  1184. continue;
  1185. if (put_user(val | i, uindices))
  1186. return -EFAULT;
  1187. uindices++;
  1188. }
  1189. return 0;
  1190. }
  1191. static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
  1192. {
  1193. return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
  1194. KVM_REG_ARM64_SYSREG |
  1195. (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
  1196. (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
  1197. (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
  1198. (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
  1199. (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
  1200. }
  1201. static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
  1202. {
  1203. if (!*uind)
  1204. return true;
  1205. if (put_user(sys_reg_to_index(reg), *uind))
  1206. return false;
  1207. (*uind)++;
  1208. return true;
  1209. }
  1210. /* Assumed ordered tables, see kvm_sys_reg_table_init. */
  1211. static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
  1212. {
  1213. const struct sys_reg_desc *i1, *i2, *end1, *end2;
  1214. unsigned int total = 0;
  1215. size_t num;
  1216. /* We check for duplicates here, to allow arch-specific overrides. */
  1217. i1 = get_target_table(vcpu->arch.target, true, &num);
  1218. end1 = i1 + num;
  1219. i2 = sys_reg_descs;
  1220. end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
  1221. BUG_ON(i1 == end1 || i2 == end2);
  1222. /* Walk carefully, as both tables may refer to the same register. */
  1223. while (i1 || i2) {
  1224. int cmp = cmp_sys_reg(i1, i2);
  1225. /* target-specific overrides generic entry. */
  1226. if (cmp <= 0) {
  1227. /* Ignore registers we trap but don't save. */
  1228. if (i1->reg) {
  1229. if (!copy_reg_to_user(i1, &uind))
  1230. return -EFAULT;
  1231. total++;
  1232. }
  1233. } else {
  1234. /* Ignore registers we trap but don't save. */
  1235. if (i2->reg) {
  1236. if (!copy_reg_to_user(i2, &uind))
  1237. return -EFAULT;
  1238. total++;
  1239. }
  1240. }
  1241. if (cmp <= 0 && ++i1 == end1)
  1242. i1 = NULL;
  1243. if (cmp >= 0 && ++i2 == end2)
  1244. i2 = NULL;
  1245. }
  1246. return total;
  1247. }
  1248. unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
  1249. {
  1250. return ARRAY_SIZE(invariant_sys_regs)
  1251. + num_demux_regs()
  1252. + walk_sys_regs(vcpu, (u64 __user *)NULL);
  1253. }
  1254. int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
  1255. {
  1256. unsigned int i;
  1257. int err;
  1258. /* Then give them all the invariant registers' indices. */
  1259. for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
  1260. if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
  1261. return -EFAULT;
  1262. uindices++;
  1263. }
  1264. err = walk_sys_regs(vcpu, uindices);
  1265. if (err < 0)
  1266. return err;
  1267. uindices += err;
  1268. return write_demux_regids(uindices);
  1269. }
  1270. static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
  1271. {
  1272. unsigned int i;
  1273. for (i = 1; i < n; i++) {
  1274. if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
  1275. kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
  1276. return 1;
  1277. }
  1278. }
  1279. return 0;
  1280. }
  1281. void kvm_sys_reg_table_init(void)
  1282. {
  1283. unsigned int i;
  1284. struct sys_reg_desc clidr;
  1285. /* Make sure tables are unique and in order. */
  1286. BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
  1287. BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
  1288. BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
  1289. BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
  1290. BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
  1291. BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
  1292. /* We abuse the reset function to overwrite the table itself. */
  1293. for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
  1294. invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
  1295. /*
  1296. * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
  1297. *
  1298. * If software reads the Cache Type fields from Ctype1
  1299. * upwards, once it has seen a value of 0b000, no caches
  1300. * exist at further-out levels of the hierarchy. So, for
  1301. * example, if Ctype3 is the first Cache Type field with a
  1302. * value of 0b000, the values of Ctype4 to Ctype7 must be
  1303. * ignored.
  1304. */
  1305. get_clidr_el1(NULL, &clidr); /* Ugly... */
  1306. cache_levels = clidr.val;
  1307. for (i = 0; i < 7; i++)
  1308. if (((cache_levels >> (i*3)) & 7) == 0)
  1309. break;
  1310. /* Clear all higher bits. */
  1311. cache_levels &= (1 << (i*3))-1;
  1312. }
  1313. /**
  1314. * kvm_reset_sys_regs - sets system registers to reset value
  1315. * @vcpu: The VCPU pointer
  1316. *
  1317. * This function finds the right table above and sets the registers on the
  1318. * virtual CPU struct to their architecturally defined reset values.
  1319. */
  1320. void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
  1321. {
  1322. size_t num;
  1323. const struct sys_reg_desc *table;
  1324. /* Catch someone adding a register without putting in reset entry. */
  1325. memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
  1326. /* Generic chip reset first (so target could override). */
  1327. reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
  1328. table = get_target_table(vcpu->arch.target, true, &num);
  1329. reset_sys_reg_descs(vcpu, table, num);
  1330. for (num = 1; num < NR_SYS_REGS; num++)
  1331. if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
  1332. panic("Didn't reset vcpu_sys_reg(%zi)", num);
  1333. }