sys_regs.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128
  1. /*
  2. * Copyright (C) 2012,2013 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * Derived from arch/arm/kvm/coproc.c:
  6. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  7. * Authors: Rusty Russell <rusty@rustcorp.com.au>
  8. * Christoffer Dall <c.dall@virtualopensystems.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License, version 2, as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  21. */
  22. #include <linux/mm.h>
  23. #include <linux/kvm_host.h>
  24. #include <linux/uaccess.h>
  25. #include <asm/kvm_arm.h>
  26. #include <asm/kvm_host.h>
  27. #include <asm/kvm_emulate.h>
  28. #include <asm/kvm_coproc.h>
  29. #include <asm/kvm_mmu.h>
  30. #include <asm/cacheflush.h>
  31. #include <asm/cputype.h>
  32. #include <trace/events/kvm.h>
  33. #include "sys_regs.h"
  34. /*
  35. * All of this file is extremly similar to the ARM coproc.c, but the
  36. * types are different. My gut feeling is that it should be pretty
  37. * easy to merge, but that would be an ABI breakage -- again. VFP
  38. * would also need to be abstracted.
  39. *
  40. * For AArch32, we only take care of what is being trapped. Anything
  41. * that has to do with init and userspace access has to go via the
  42. * 64bit interface.
  43. */
  44. /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
  45. static u32 cache_levels;
  46. /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
  47. #define CSSELR_MAX 12
  48. /* Which cache CCSIDR represents depends on CSSELR value. */
  49. static u32 get_ccsidr(u32 csselr)
  50. {
  51. u32 ccsidr;
  52. /* Make sure noone else changes CSSELR during this! */
  53. local_irq_disable();
  54. /* Put value into CSSELR */
  55. asm volatile("msr csselr_el1, %x0" : : "r" (csselr));
  56. isb();
  57. /* Read result out of CCSIDR */
  58. asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr));
  59. local_irq_enable();
  60. return ccsidr;
  61. }
  62. static void do_dc_cisw(u32 val)
  63. {
  64. asm volatile("dc cisw, %x0" : : "r" (val));
  65. dsb();
  66. }
  67. static void do_dc_csw(u32 val)
  68. {
  69. asm volatile("dc csw, %x0" : : "r" (val));
  70. dsb();
  71. }
  72. /* See note at ARM ARM B1.14.4 */
  73. static bool access_dcsw(struct kvm_vcpu *vcpu,
  74. const struct sys_reg_params *p,
  75. const struct sys_reg_desc *r)
  76. {
  77. unsigned long val;
  78. int cpu;
  79. if (!p->is_write)
  80. return read_from_write_only(vcpu, p);
  81. cpu = get_cpu();
  82. cpumask_setall(&vcpu->arch.require_dcache_flush);
  83. cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
  84. /* If we were already preempted, take the long way around */
  85. if (cpu != vcpu->arch.last_pcpu) {
  86. flush_cache_all();
  87. goto done;
  88. }
  89. val = *vcpu_reg(vcpu, p->Rt);
  90. switch (p->CRm) {
  91. case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
  92. case 14: /* DCCISW */
  93. do_dc_cisw(val);
  94. break;
  95. case 10: /* DCCSW */
  96. do_dc_csw(val);
  97. break;
  98. }
  99. done:
  100. put_cpu();
  101. return true;
  102. }
  103. /*
  104. * Generic accessor for VM registers. Only called as long as HCR_TVM
  105. * is set.
  106. */
  107. static bool access_vm_reg(struct kvm_vcpu *vcpu,
  108. const struct sys_reg_params *p,
  109. const struct sys_reg_desc *r)
  110. {
  111. unsigned long val;
  112. BUG_ON(!p->is_write);
  113. val = *vcpu_reg(vcpu, p->Rt);
  114. if (!p->is_aarch32) {
  115. vcpu_sys_reg(vcpu, r->reg) = val;
  116. } else {
  117. vcpu_cp15(vcpu, r->reg) = val & 0xffffffffUL;
  118. if (!p->is_32bit)
  119. vcpu_cp15(vcpu, r->reg + 1) = val >> 32;
  120. }
  121. return true;
  122. }
  123. /*
  124. * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set. If the
  125. * guest enables the MMU, we stop trapping the VM sys_regs and leave
  126. * it in complete control of the caches.
  127. */
  128. static bool access_sctlr(struct kvm_vcpu *vcpu,
  129. const struct sys_reg_params *p,
  130. const struct sys_reg_desc *r)
  131. {
  132. access_vm_reg(vcpu, p, r);
  133. if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
  134. vcpu->arch.hcr_el2 &= ~HCR_TVM;
  135. stage2_flush_vm(vcpu->kvm);
  136. }
  137. return true;
  138. }
  139. /*
  140. * We could trap ID_DFR0 and tell the guest we don't support performance
  141. * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
  142. * NAKed, so it will read the PMCR anyway.
  143. *
  144. * Therefore we tell the guest we have 0 counters. Unfortunately, we
  145. * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
  146. * all PM registers, which doesn't crash the guest kernel at least.
  147. */
  148. static bool pm_fake(struct kvm_vcpu *vcpu,
  149. const struct sys_reg_params *p,
  150. const struct sys_reg_desc *r)
  151. {
  152. if (p->is_write)
  153. return ignore_write(vcpu, p);
  154. else
  155. return read_zero(vcpu, p);
  156. }
  157. static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
  158. {
  159. u64 amair;
  160. asm volatile("mrs %0, amair_el1\n" : "=r" (amair));
  161. vcpu_sys_reg(vcpu, AMAIR_EL1) = amair;
  162. }
  163. static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
  164. {
  165. /*
  166. * Simply map the vcpu_id into the Aff0 field of the MPIDR.
  167. */
  168. vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff);
  169. }
  170. /*
  171. * Architected system registers.
  172. * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
  173. */
  174. static const struct sys_reg_desc sys_reg_descs[] = {
  175. /* DC ISW */
  176. { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010),
  177. access_dcsw },
  178. /* DC CSW */
  179. { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010),
  180. access_dcsw },
  181. /* DC CISW */
  182. { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
  183. access_dcsw },
  184. /* TEECR32_EL1 */
  185. { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
  186. NULL, reset_val, TEECR32_EL1, 0 },
  187. /* TEEHBR32_EL1 */
  188. { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000),
  189. NULL, reset_val, TEEHBR32_EL1, 0 },
  190. /* DBGVCR32_EL2 */
  191. { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
  192. NULL, reset_val, DBGVCR32_EL2, 0 },
  193. /* MPIDR_EL1 */
  194. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101),
  195. NULL, reset_mpidr, MPIDR_EL1 },
  196. /* SCTLR_EL1 */
  197. { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
  198. access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 },
  199. /* CPACR_EL1 */
  200. { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
  201. NULL, reset_val, CPACR_EL1, 0 },
  202. /* TTBR0_EL1 */
  203. { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
  204. access_vm_reg, reset_unknown, TTBR0_EL1 },
  205. /* TTBR1_EL1 */
  206. { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
  207. access_vm_reg, reset_unknown, TTBR1_EL1 },
  208. /* TCR_EL1 */
  209. { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
  210. access_vm_reg, reset_val, TCR_EL1, 0 },
  211. /* AFSR0_EL1 */
  212. { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
  213. access_vm_reg, reset_unknown, AFSR0_EL1 },
  214. /* AFSR1_EL1 */
  215. { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
  216. access_vm_reg, reset_unknown, AFSR1_EL1 },
  217. /* ESR_EL1 */
  218. { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
  219. access_vm_reg, reset_unknown, ESR_EL1 },
  220. /* FAR_EL1 */
  221. { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
  222. access_vm_reg, reset_unknown, FAR_EL1 },
  223. /* PAR_EL1 */
  224. { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
  225. NULL, reset_unknown, PAR_EL1 },
  226. /* PMINTENSET_EL1 */
  227. { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
  228. pm_fake },
  229. /* PMINTENCLR_EL1 */
  230. { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
  231. pm_fake },
  232. /* MAIR_EL1 */
  233. { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
  234. access_vm_reg, reset_unknown, MAIR_EL1 },
  235. /* AMAIR_EL1 */
  236. { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
  237. access_vm_reg, reset_amair_el1, AMAIR_EL1 },
  238. /* VBAR_EL1 */
  239. { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
  240. NULL, reset_val, VBAR_EL1, 0 },
  241. /* CONTEXTIDR_EL1 */
  242. { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
  243. access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
  244. /* TPIDR_EL1 */
  245. { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
  246. NULL, reset_unknown, TPIDR_EL1 },
  247. /* CNTKCTL_EL1 */
  248. { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000),
  249. NULL, reset_val, CNTKCTL_EL1, 0},
  250. /* CSSELR_EL1 */
  251. { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
  252. NULL, reset_unknown, CSSELR_EL1 },
  253. /* PMCR_EL0 */
  254. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
  255. pm_fake },
  256. /* PMCNTENSET_EL0 */
  257. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
  258. pm_fake },
  259. /* PMCNTENCLR_EL0 */
  260. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
  261. pm_fake },
  262. /* PMOVSCLR_EL0 */
  263. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
  264. pm_fake },
  265. /* PMSWINC_EL0 */
  266. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
  267. pm_fake },
  268. /* PMSELR_EL0 */
  269. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
  270. pm_fake },
  271. /* PMCEID0_EL0 */
  272. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
  273. pm_fake },
  274. /* PMCEID1_EL0 */
  275. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
  276. pm_fake },
  277. /* PMCCNTR_EL0 */
  278. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
  279. pm_fake },
  280. /* PMXEVTYPER_EL0 */
  281. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
  282. pm_fake },
  283. /* PMXEVCNTR_EL0 */
  284. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
  285. pm_fake },
  286. /* PMUSERENR_EL0 */
  287. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
  288. pm_fake },
  289. /* PMOVSSET_EL0 */
  290. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
  291. pm_fake },
  292. /* TPIDR_EL0 */
  293. { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
  294. NULL, reset_unknown, TPIDR_EL0 },
  295. /* TPIDRRO_EL0 */
  296. { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
  297. NULL, reset_unknown, TPIDRRO_EL0 },
  298. /* DACR32_EL2 */
  299. { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
  300. NULL, reset_unknown, DACR32_EL2 },
  301. /* IFSR32_EL2 */
  302. { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001),
  303. NULL, reset_unknown, IFSR32_EL2 },
  304. /* FPEXC32_EL2 */
  305. { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000),
  306. NULL, reset_val, FPEXC32_EL2, 0x70 },
  307. };
  308. /*
  309. * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
  310. * depending on the way they are accessed (as a 32bit or a 64bit
  311. * register).
  312. */
  313. static const struct sys_reg_desc cp15_regs[] = {
  314. { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
  315. { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR },
  316. { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
  317. { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
  318. { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
  319. { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
  320. { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
  321. { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
  322. { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
  323. { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
  324. { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
  325. { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
  326. /*
  327. * DC{C,I,CI}SW operations:
  328. */
  329. { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
  330. { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
  331. { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
  332. { Op1( 0), CRn( 9), CRm(12), Op2( 0), pm_fake },
  333. { Op1( 0), CRn( 9), CRm(12), Op2( 1), pm_fake },
  334. { Op1( 0), CRn( 9), CRm(12), Op2( 2), pm_fake },
  335. { Op1( 0), CRn( 9), CRm(12), Op2( 3), pm_fake },
  336. { Op1( 0), CRn( 9), CRm(12), Op2( 5), pm_fake },
  337. { Op1( 0), CRn( 9), CRm(12), Op2( 6), pm_fake },
  338. { Op1( 0), CRn( 9), CRm(12), Op2( 7), pm_fake },
  339. { Op1( 0), CRn( 9), CRm(13), Op2( 0), pm_fake },
  340. { Op1( 0), CRn( 9), CRm(13), Op2( 1), pm_fake },
  341. { Op1( 0), CRn( 9), CRm(13), Op2( 2), pm_fake },
  342. { Op1( 0), CRn( 9), CRm(14), Op2( 0), pm_fake },
  343. { Op1( 0), CRn( 9), CRm(14), Op2( 1), pm_fake },
  344. { Op1( 0), CRn( 9), CRm(14), Op2( 2), pm_fake },
  345. { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
  346. { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
  347. { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
  348. { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
  349. { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
  350. { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
  351. };
  352. /* Target specific emulation tables */
  353. static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
  354. void kvm_register_target_sys_reg_table(unsigned int target,
  355. struct kvm_sys_reg_target_table *table)
  356. {
  357. target_tables[target] = table;
  358. }
  359. /* Get specific register table for this target. */
  360. static const struct sys_reg_desc *get_target_table(unsigned target,
  361. bool mode_is_64,
  362. size_t *num)
  363. {
  364. struct kvm_sys_reg_target_table *table;
  365. table = target_tables[target];
  366. if (mode_is_64) {
  367. *num = table->table64.num;
  368. return table->table64.table;
  369. } else {
  370. *num = table->table32.num;
  371. return table->table32.table;
  372. }
  373. }
  374. static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
  375. const struct sys_reg_desc table[],
  376. unsigned int num)
  377. {
  378. unsigned int i;
  379. for (i = 0; i < num; i++) {
  380. const struct sys_reg_desc *r = &table[i];
  381. if (params->Op0 != r->Op0)
  382. continue;
  383. if (params->Op1 != r->Op1)
  384. continue;
  385. if (params->CRn != r->CRn)
  386. continue;
  387. if (params->CRm != r->CRm)
  388. continue;
  389. if (params->Op2 != r->Op2)
  390. continue;
  391. return r;
  392. }
  393. return NULL;
  394. }
  395. int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
  396. {
  397. kvm_inject_undefined(vcpu);
  398. return 1;
  399. }
  400. int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
  401. {
  402. kvm_inject_undefined(vcpu);
  403. return 1;
  404. }
  405. static void emulate_cp15(struct kvm_vcpu *vcpu,
  406. const struct sys_reg_params *params)
  407. {
  408. size_t num;
  409. const struct sys_reg_desc *table, *r;
  410. table = get_target_table(vcpu->arch.target, false, &num);
  411. /* Search target-specific then generic table. */
  412. r = find_reg(params, table, num);
  413. if (!r)
  414. r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
  415. if (likely(r)) {
  416. /*
  417. * Not having an accessor means that we have
  418. * configured a trap that we don't know how to
  419. * handle. This certainly qualifies as a gross bug
  420. * that should be fixed right away.
  421. */
  422. BUG_ON(!r->access);
  423. if (likely(r->access(vcpu, params, r))) {
  424. /* Skip instruction, since it was emulated */
  425. kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
  426. return;
  427. }
  428. /* If access function fails, it should complain. */
  429. }
  430. kvm_err("Unsupported guest CP15 access at: %08lx\n", *vcpu_pc(vcpu));
  431. print_sys_reg_instr(params);
  432. kvm_inject_undefined(vcpu);
  433. }
  434. /**
  435. * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
  436. * @vcpu: The VCPU pointer
  437. * @run: The kvm_run struct
  438. */
  439. int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
  440. {
  441. struct sys_reg_params params;
  442. u32 hsr = kvm_vcpu_get_hsr(vcpu);
  443. int Rt2 = (hsr >> 10) & 0xf;
  444. params.is_aarch32 = true;
  445. params.is_32bit = false;
  446. params.CRm = (hsr >> 1) & 0xf;
  447. params.Rt = (hsr >> 5) & 0xf;
  448. params.is_write = ((hsr & 1) == 0);
  449. params.Op0 = 0;
  450. params.Op1 = (hsr >> 16) & 0xf;
  451. params.Op2 = 0;
  452. params.CRn = 0;
  453. /*
  454. * Massive hack here. Store Rt2 in the top 32bits so we only
  455. * have one register to deal with. As we use the same trap
  456. * backends between AArch32 and AArch64, we get away with it.
  457. */
  458. if (params.is_write) {
  459. u64 val = *vcpu_reg(vcpu, params.Rt);
  460. val &= 0xffffffff;
  461. val |= *vcpu_reg(vcpu, Rt2) << 32;
  462. *vcpu_reg(vcpu, params.Rt) = val;
  463. }
  464. emulate_cp15(vcpu, &params);
  465. /* Do the opposite hack for the read side */
  466. if (!params.is_write) {
  467. u64 val = *vcpu_reg(vcpu, params.Rt);
  468. val >>= 32;
  469. *vcpu_reg(vcpu, Rt2) = val;
  470. }
  471. return 1;
  472. }
  473. /**
  474. * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
  475. * @vcpu: The VCPU pointer
  476. * @run: The kvm_run struct
  477. */
  478. int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
  479. {
  480. struct sys_reg_params params;
  481. u32 hsr = kvm_vcpu_get_hsr(vcpu);
  482. params.is_aarch32 = true;
  483. params.is_32bit = true;
  484. params.CRm = (hsr >> 1) & 0xf;
  485. params.Rt = (hsr >> 5) & 0xf;
  486. params.is_write = ((hsr & 1) == 0);
  487. params.CRn = (hsr >> 10) & 0xf;
  488. params.Op0 = 0;
  489. params.Op1 = (hsr >> 14) & 0x7;
  490. params.Op2 = (hsr >> 17) & 0x7;
  491. emulate_cp15(vcpu, &params);
  492. return 1;
  493. }
  494. static int emulate_sys_reg(struct kvm_vcpu *vcpu,
  495. const struct sys_reg_params *params)
  496. {
  497. size_t num;
  498. const struct sys_reg_desc *table, *r;
  499. table = get_target_table(vcpu->arch.target, true, &num);
  500. /* Search target-specific then generic table. */
  501. r = find_reg(params, table, num);
  502. if (!r)
  503. r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
  504. if (likely(r)) {
  505. /*
  506. * Not having an accessor means that we have
  507. * configured a trap that we don't know how to
  508. * handle. This certainly qualifies as a gross bug
  509. * that should be fixed right away.
  510. */
  511. BUG_ON(!r->access);
  512. if (likely(r->access(vcpu, params, r))) {
  513. /* Skip instruction, since it was emulated */
  514. kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
  515. return 1;
  516. }
  517. /* If access function fails, it should complain. */
  518. } else {
  519. kvm_err("Unsupported guest sys_reg access at: %lx\n",
  520. *vcpu_pc(vcpu));
  521. print_sys_reg_instr(params);
  522. }
  523. kvm_inject_undefined(vcpu);
  524. return 1;
  525. }
  526. static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
  527. const struct sys_reg_desc *table, size_t num)
  528. {
  529. unsigned long i;
  530. for (i = 0; i < num; i++)
  531. if (table[i].reset)
  532. table[i].reset(vcpu, &table[i]);
  533. }
  534. /**
  535. * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
  536. * @vcpu: The VCPU pointer
  537. * @run: The kvm_run struct
  538. */
  539. int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
  540. {
  541. struct sys_reg_params params;
  542. unsigned long esr = kvm_vcpu_get_hsr(vcpu);
  543. params.is_aarch32 = false;
  544. params.is_32bit = false;
  545. params.Op0 = (esr >> 20) & 3;
  546. params.Op1 = (esr >> 14) & 0x7;
  547. params.CRn = (esr >> 10) & 0xf;
  548. params.CRm = (esr >> 1) & 0xf;
  549. params.Op2 = (esr >> 17) & 0x7;
  550. params.Rt = (esr >> 5) & 0x1f;
  551. params.is_write = !(esr & 1);
  552. return emulate_sys_reg(vcpu, &params);
  553. }
  554. /******************************************************************************
  555. * Userspace API
  556. *****************************************************************************/
  557. static bool index_to_params(u64 id, struct sys_reg_params *params)
  558. {
  559. switch (id & KVM_REG_SIZE_MASK) {
  560. case KVM_REG_SIZE_U64:
  561. /* Any unused index bits means it's not valid. */
  562. if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
  563. | KVM_REG_ARM_COPROC_MASK
  564. | KVM_REG_ARM64_SYSREG_OP0_MASK
  565. | KVM_REG_ARM64_SYSREG_OP1_MASK
  566. | KVM_REG_ARM64_SYSREG_CRN_MASK
  567. | KVM_REG_ARM64_SYSREG_CRM_MASK
  568. | KVM_REG_ARM64_SYSREG_OP2_MASK))
  569. return false;
  570. params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
  571. >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
  572. params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
  573. >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
  574. params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
  575. >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
  576. params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
  577. >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
  578. params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
  579. >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
  580. return true;
  581. default:
  582. return false;
  583. }
  584. }
  585. /* Decode an index value, and find the sys_reg_desc entry. */
  586. static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
  587. u64 id)
  588. {
  589. size_t num;
  590. const struct sys_reg_desc *table, *r;
  591. struct sys_reg_params params;
  592. /* We only do sys_reg for now. */
  593. if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
  594. return NULL;
  595. if (!index_to_params(id, &params))
  596. return NULL;
  597. table = get_target_table(vcpu->arch.target, true, &num);
  598. r = find_reg(&params, table, num);
  599. if (!r)
  600. r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
  601. /* Not saved in the sys_reg array? */
  602. if (r && !r->reg)
  603. r = NULL;
  604. return r;
  605. }
  606. /*
  607. * These are the invariant sys_reg registers: we let the guest see the
  608. * host versions of these, so they're part of the guest state.
  609. *
  610. * A future CPU may provide a mechanism to present different values to
  611. * the guest, or a future kvm may trap them.
  612. */
  613. #define FUNCTION_INVARIANT(reg) \
  614. static void get_##reg(struct kvm_vcpu *v, \
  615. const struct sys_reg_desc *r) \
  616. { \
  617. u64 val; \
  618. \
  619. asm volatile("mrs %0, " __stringify(reg) "\n" \
  620. : "=r" (val)); \
  621. ((struct sys_reg_desc *)r)->val = val; \
  622. }
  623. FUNCTION_INVARIANT(midr_el1)
  624. FUNCTION_INVARIANT(ctr_el0)
  625. FUNCTION_INVARIANT(revidr_el1)
  626. FUNCTION_INVARIANT(id_pfr0_el1)
  627. FUNCTION_INVARIANT(id_pfr1_el1)
  628. FUNCTION_INVARIANT(id_dfr0_el1)
  629. FUNCTION_INVARIANT(id_afr0_el1)
  630. FUNCTION_INVARIANT(id_mmfr0_el1)
  631. FUNCTION_INVARIANT(id_mmfr1_el1)
  632. FUNCTION_INVARIANT(id_mmfr2_el1)
  633. FUNCTION_INVARIANT(id_mmfr3_el1)
  634. FUNCTION_INVARIANT(id_isar0_el1)
  635. FUNCTION_INVARIANT(id_isar1_el1)
  636. FUNCTION_INVARIANT(id_isar2_el1)
  637. FUNCTION_INVARIANT(id_isar3_el1)
  638. FUNCTION_INVARIANT(id_isar4_el1)
  639. FUNCTION_INVARIANT(id_isar5_el1)
  640. FUNCTION_INVARIANT(clidr_el1)
  641. FUNCTION_INVARIANT(aidr_el1)
  642. /* ->val is filled in by kvm_sys_reg_table_init() */
  643. static struct sys_reg_desc invariant_sys_regs[] = {
  644. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000),
  645. NULL, get_midr_el1 },
  646. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110),
  647. NULL, get_revidr_el1 },
  648. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000),
  649. NULL, get_id_pfr0_el1 },
  650. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001),
  651. NULL, get_id_pfr1_el1 },
  652. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010),
  653. NULL, get_id_dfr0_el1 },
  654. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011),
  655. NULL, get_id_afr0_el1 },
  656. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100),
  657. NULL, get_id_mmfr0_el1 },
  658. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101),
  659. NULL, get_id_mmfr1_el1 },
  660. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110),
  661. NULL, get_id_mmfr2_el1 },
  662. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111),
  663. NULL, get_id_mmfr3_el1 },
  664. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
  665. NULL, get_id_isar0_el1 },
  666. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001),
  667. NULL, get_id_isar1_el1 },
  668. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
  669. NULL, get_id_isar2_el1 },
  670. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011),
  671. NULL, get_id_isar3_el1 },
  672. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100),
  673. NULL, get_id_isar4_el1 },
  674. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101),
  675. NULL, get_id_isar5_el1 },
  676. { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001),
  677. NULL, get_clidr_el1 },
  678. { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111),
  679. NULL, get_aidr_el1 },
  680. { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001),
  681. NULL, get_ctr_el0 },
  682. };
  683. static int reg_from_user(void *val, const void __user *uaddr, u64 id)
  684. {
  685. /* This Just Works because we are little endian. */
  686. if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
  687. return -EFAULT;
  688. return 0;
  689. }
  690. static int reg_to_user(void __user *uaddr, const void *val, u64 id)
  691. {
  692. /* This Just Works because we are little endian. */
  693. if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
  694. return -EFAULT;
  695. return 0;
  696. }
  697. static int get_invariant_sys_reg(u64 id, void __user *uaddr)
  698. {
  699. struct sys_reg_params params;
  700. const struct sys_reg_desc *r;
  701. if (!index_to_params(id, &params))
  702. return -ENOENT;
  703. r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
  704. if (!r)
  705. return -ENOENT;
  706. return reg_to_user(uaddr, &r->val, id);
  707. }
  708. static int set_invariant_sys_reg(u64 id, void __user *uaddr)
  709. {
  710. struct sys_reg_params params;
  711. const struct sys_reg_desc *r;
  712. int err;
  713. u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
  714. if (!index_to_params(id, &params))
  715. return -ENOENT;
  716. r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
  717. if (!r)
  718. return -ENOENT;
  719. err = reg_from_user(&val, uaddr, id);
  720. if (err)
  721. return err;
  722. /* This is what we mean by invariant: you can't change it. */
  723. if (r->val != val)
  724. return -EINVAL;
  725. return 0;
  726. }
  727. static bool is_valid_cache(u32 val)
  728. {
  729. u32 level, ctype;
  730. if (val >= CSSELR_MAX)
  731. return -ENOENT;
  732. /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
  733. level = (val >> 1);
  734. ctype = (cache_levels >> (level * 3)) & 7;
  735. switch (ctype) {
  736. case 0: /* No cache */
  737. return false;
  738. case 1: /* Instruction cache only */
  739. return (val & 1);
  740. case 2: /* Data cache only */
  741. case 4: /* Unified cache */
  742. return !(val & 1);
  743. case 3: /* Separate instruction and data caches */
  744. return true;
  745. default: /* Reserved: we can't know instruction or data. */
  746. return false;
  747. }
  748. }
  749. static int demux_c15_get(u64 id, void __user *uaddr)
  750. {
  751. u32 val;
  752. u32 __user *uval = uaddr;
  753. /* Fail if we have unknown bits set. */
  754. if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
  755. | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
  756. return -ENOENT;
  757. switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
  758. case KVM_REG_ARM_DEMUX_ID_CCSIDR:
  759. if (KVM_REG_SIZE(id) != 4)
  760. return -ENOENT;
  761. val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
  762. >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
  763. if (!is_valid_cache(val))
  764. return -ENOENT;
  765. return put_user(get_ccsidr(val), uval);
  766. default:
  767. return -ENOENT;
  768. }
  769. }
  770. static int demux_c15_set(u64 id, void __user *uaddr)
  771. {
  772. u32 val, newval;
  773. u32 __user *uval = uaddr;
  774. /* Fail if we have unknown bits set. */
  775. if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
  776. | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
  777. return -ENOENT;
  778. switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
  779. case KVM_REG_ARM_DEMUX_ID_CCSIDR:
  780. if (KVM_REG_SIZE(id) != 4)
  781. return -ENOENT;
  782. val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
  783. >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
  784. if (!is_valid_cache(val))
  785. return -ENOENT;
  786. if (get_user(newval, uval))
  787. return -EFAULT;
  788. /* This is also invariant: you can't change it. */
  789. if (newval != get_ccsidr(val))
  790. return -EINVAL;
  791. return 0;
  792. default:
  793. return -ENOENT;
  794. }
  795. }
  796. int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  797. {
  798. const struct sys_reg_desc *r;
  799. void __user *uaddr = (void __user *)(unsigned long)reg->addr;
  800. if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
  801. return demux_c15_get(reg->id, uaddr);
  802. if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
  803. return -ENOENT;
  804. r = index_to_sys_reg_desc(vcpu, reg->id);
  805. if (!r)
  806. return get_invariant_sys_reg(reg->id, uaddr);
  807. return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
  808. }
  809. int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  810. {
  811. const struct sys_reg_desc *r;
  812. void __user *uaddr = (void __user *)(unsigned long)reg->addr;
  813. if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
  814. return demux_c15_set(reg->id, uaddr);
  815. if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
  816. return -ENOENT;
  817. r = index_to_sys_reg_desc(vcpu, reg->id);
  818. if (!r)
  819. return set_invariant_sys_reg(reg->id, uaddr);
  820. return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
  821. }
  822. static unsigned int num_demux_regs(void)
  823. {
  824. unsigned int i, count = 0;
  825. for (i = 0; i < CSSELR_MAX; i++)
  826. if (is_valid_cache(i))
  827. count++;
  828. return count;
  829. }
  830. static int write_demux_regids(u64 __user *uindices)
  831. {
  832. u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
  833. unsigned int i;
  834. val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
  835. for (i = 0; i < CSSELR_MAX; i++) {
  836. if (!is_valid_cache(i))
  837. continue;
  838. if (put_user(val | i, uindices))
  839. return -EFAULT;
  840. uindices++;
  841. }
  842. return 0;
  843. }
  844. static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
  845. {
  846. return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
  847. KVM_REG_ARM64_SYSREG |
  848. (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
  849. (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
  850. (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
  851. (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
  852. (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
  853. }
  854. static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
  855. {
  856. if (!*uind)
  857. return true;
  858. if (put_user(sys_reg_to_index(reg), *uind))
  859. return false;
  860. (*uind)++;
  861. return true;
  862. }
  863. /* Assumed ordered tables, see kvm_sys_reg_table_init. */
  864. static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
  865. {
  866. const struct sys_reg_desc *i1, *i2, *end1, *end2;
  867. unsigned int total = 0;
  868. size_t num;
  869. /* We check for duplicates here, to allow arch-specific overrides. */
  870. i1 = get_target_table(vcpu->arch.target, true, &num);
  871. end1 = i1 + num;
  872. i2 = sys_reg_descs;
  873. end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
  874. BUG_ON(i1 == end1 || i2 == end2);
  875. /* Walk carefully, as both tables may refer to the same register. */
  876. while (i1 || i2) {
  877. int cmp = cmp_sys_reg(i1, i2);
  878. /* target-specific overrides generic entry. */
  879. if (cmp <= 0) {
  880. /* Ignore registers we trap but don't save. */
  881. if (i1->reg) {
  882. if (!copy_reg_to_user(i1, &uind))
  883. return -EFAULT;
  884. total++;
  885. }
  886. } else {
  887. /* Ignore registers we trap but don't save. */
  888. if (i2->reg) {
  889. if (!copy_reg_to_user(i2, &uind))
  890. return -EFAULT;
  891. total++;
  892. }
  893. }
  894. if (cmp <= 0 && ++i1 == end1)
  895. i1 = NULL;
  896. if (cmp >= 0 && ++i2 == end2)
  897. i2 = NULL;
  898. }
  899. return total;
  900. }
  901. unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
  902. {
  903. return ARRAY_SIZE(invariant_sys_regs)
  904. + num_demux_regs()
  905. + walk_sys_regs(vcpu, (u64 __user *)NULL);
  906. }
  907. int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
  908. {
  909. unsigned int i;
  910. int err;
  911. /* Then give them all the invariant registers' indices. */
  912. for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
  913. if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
  914. return -EFAULT;
  915. uindices++;
  916. }
  917. err = walk_sys_regs(vcpu, uindices);
  918. if (err < 0)
  919. return err;
  920. uindices += err;
  921. return write_demux_regids(uindices);
  922. }
  923. void kvm_sys_reg_table_init(void)
  924. {
  925. unsigned int i;
  926. struct sys_reg_desc clidr;
  927. /* Make sure tables are unique and in order. */
  928. for (i = 1; i < ARRAY_SIZE(sys_reg_descs); i++)
  929. BUG_ON(cmp_sys_reg(&sys_reg_descs[i-1], &sys_reg_descs[i]) >= 0);
  930. /* We abuse the reset function to overwrite the table itself. */
  931. for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
  932. invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
  933. /*
  934. * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
  935. *
  936. * If software reads the Cache Type fields from Ctype1
  937. * upwards, once it has seen a value of 0b000, no caches
  938. * exist at further-out levels of the hierarchy. So, for
  939. * example, if Ctype3 is the first Cache Type field with a
  940. * value of 0b000, the values of Ctype4 to Ctype7 must be
  941. * ignored.
  942. */
  943. get_clidr_el1(NULL, &clidr); /* Ugly... */
  944. cache_levels = clidr.val;
  945. for (i = 0; i < 7; i++)
  946. if (((cache_levels >> (i*3)) & 7) == 0)
  947. break;
  948. /* Clear all higher bits. */
  949. cache_levels &= (1 << (i*3))-1;
  950. }
  951. /**
  952. * kvm_reset_sys_regs - sets system registers to reset value
  953. * @vcpu: The VCPU pointer
  954. *
  955. * This function finds the right table above and sets the registers on the
  956. * virtual CPU struct to their architecturally defined reset values.
  957. */
  958. void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
  959. {
  960. size_t num;
  961. const struct sys_reg_desc *table;
  962. /* Catch someone adding a register without putting in reset entry. */
  963. memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
  964. /* Generic chip reset first (so target could override). */
  965. reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
  966. table = get_target_table(vcpu->arch.target, true, &num);
  967. reset_sys_reg_descs(vcpu, table, num);
  968. for (num = 1; num < NR_SYS_REGS; num++)
  969. if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
  970. panic("Didn't reset vcpu_sys_reg(%zi)", num);
  971. }