sys_regs.c 70 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592
  1. /*
  2. * Copyright (C) 2012,2013 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * Derived from arch/arm/kvm/coproc.c:
  6. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  7. * Authors: Rusty Russell <rusty@rustcorp.com.au>
  8. * Christoffer Dall <c.dall@virtualopensystems.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License, version 2, as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  21. */
  22. #include <linux/bsearch.h>
  23. #include <linux/kvm_host.h>
  24. #include <linux/mm.h>
  25. #include <linux/printk.h>
  26. #include <linux/uaccess.h>
  27. #include <asm/cacheflush.h>
  28. #include <asm/cputype.h>
  29. #include <asm/debug-monitors.h>
  30. #include <asm/esr.h>
  31. #include <asm/kvm_arm.h>
  32. #include <asm/kvm_coproc.h>
  33. #include <asm/kvm_emulate.h>
  34. #include <asm/kvm_host.h>
  35. #include <asm/kvm_hyp.h>
  36. #include <asm/kvm_mmu.h>
  37. #include <asm/perf_event.h>
  38. #include <asm/sysreg.h>
  39. #include <trace/events/kvm.h>
  40. #include "sys_regs.h"
  41. #include "trace.h"
  42. /*
  43. * All of this file is extremly similar to the ARM coproc.c, but the
  44. * types are different. My gut feeling is that it should be pretty
  45. * easy to merge, but that would be an ABI breakage -- again. VFP
  46. * would also need to be abstracted.
  47. *
  48. * For AArch32, we only take care of what is being trapped. Anything
  49. * that has to do with init and userspace access has to go via the
  50. * 64bit interface.
  51. */
  52. static bool read_from_write_only(struct kvm_vcpu *vcpu,
  53. struct sys_reg_params *params,
  54. const struct sys_reg_desc *r)
  55. {
  56. WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
  57. print_sys_reg_instr(params);
  58. kvm_inject_undefined(vcpu);
  59. return false;
  60. }
  61. static bool write_to_read_only(struct kvm_vcpu *vcpu,
  62. struct sys_reg_params *params,
  63. const struct sys_reg_desc *r)
  64. {
  65. WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
  66. print_sys_reg_instr(params);
  67. kvm_inject_undefined(vcpu);
  68. return false;
  69. }
  70. u64 vcpu_read_sys_reg(struct kvm_vcpu *vcpu, int reg)
  71. {
  72. if (!vcpu->arch.sysregs_loaded_on_cpu)
  73. goto immediate_read;
  74. /*
  75. * System registers listed in the switch are not saved on every
  76. * exit from the guest but are only saved on vcpu_put.
  77. *
  78. * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
  79. * should never be listed below, because the guest cannot modify its
  80. * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
  81. * thread when emulating cross-VCPU communication.
  82. */
  83. switch (reg) {
  84. case CSSELR_EL1: return read_sysreg_s(SYS_CSSELR_EL1);
  85. case SCTLR_EL1: return read_sysreg_s(sctlr_EL12);
  86. case ACTLR_EL1: return read_sysreg_s(SYS_ACTLR_EL1);
  87. case CPACR_EL1: return read_sysreg_s(cpacr_EL12);
  88. case TTBR0_EL1: return read_sysreg_s(ttbr0_EL12);
  89. case TTBR1_EL1: return read_sysreg_s(ttbr1_EL12);
  90. case TCR_EL1: return read_sysreg_s(tcr_EL12);
  91. case ESR_EL1: return read_sysreg_s(esr_EL12);
  92. case AFSR0_EL1: return read_sysreg_s(afsr0_EL12);
  93. case AFSR1_EL1: return read_sysreg_s(afsr1_EL12);
  94. case FAR_EL1: return read_sysreg_s(far_EL12);
  95. case MAIR_EL1: return read_sysreg_s(mair_EL12);
  96. case VBAR_EL1: return read_sysreg_s(vbar_EL12);
  97. case CONTEXTIDR_EL1: return read_sysreg_s(contextidr_EL12);
  98. case TPIDR_EL0: return read_sysreg_s(SYS_TPIDR_EL0);
  99. case TPIDRRO_EL0: return read_sysreg_s(SYS_TPIDRRO_EL0);
  100. case TPIDR_EL1: return read_sysreg_s(SYS_TPIDR_EL1);
  101. case AMAIR_EL1: return read_sysreg_s(amair_EL12);
  102. case CNTKCTL_EL1: return read_sysreg_s(cntkctl_EL12);
  103. case PAR_EL1: return read_sysreg_s(SYS_PAR_EL1);
  104. case DACR32_EL2: return read_sysreg_s(SYS_DACR32_EL2);
  105. case IFSR32_EL2: return read_sysreg_s(SYS_IFSR32_EL2);
  106. case DBGVCR32_EL2: return read_sysreg_s(SYS_DBGVCR32_EL2);
  107. }
  108. immediate_read:
  109. return __vcpu_sys_reg(vcpu, reg);
  110. }
  111. void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
  112. {
  113. if (!vcpu->arch.sysregs_loaded_on_cpu)
  114. goto immediate_write;
  115. /*
  116. * System registers listed in the switch are not restored on every
  117. * entry to the guest but are only restored on vcpu_load.
  118. *
  119. * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
  120. * should never be listed below, because the the MPIDR should only be
  121. * set once, before running the VCPU, and never changed later.
  122. */
  123. switch (reg) {
  124. case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); return;
  125. case SCTLR_EL1: write_sysreg_s(val, sctlr_EL12); return;
  126. case ACTLR_EL1: write_sysreg_s(val, SYS_ACTLR_EL1); return;
  127. case CPACR_EL1: write_sysreg_s(val, cpacr_EL12); return;
  128. case TTBR0_EL1: write_sysreg_s(val, ttbr0_EL12); return;
  129. case TTBR1_EL1: write_sysreg_s(val, ttbr1_EL12); return;
  130. case TCR_EL1: write_sysreg_s(val, tcr_EL12); return;
  131. case ESR_EL1: write_sysreg_s(val, esr_EL12); return;
  132. case AFSR0_EL1: write_sysreg_s(val, afsr0_EL12); return;
  133. case AFSR1_EL1: write_sysreg_s(val, afsr1_EL12); return;
  134. case FAR_EL1: write_sysreg_s(val, far_EL12); return;
  135. case MAIR_EL1: write_sysreg_s(val, mair_EL12); return;
  136. case VBAR_EL1: write_sysreg_s(val, vbar_EL12); return;
  137. case CONTEXTIDR_EL1: write_sysreg_s(val, contextidr_EL12); return;
  138. case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); return;
  139. case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); return;
  140. case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); return;
  141. case AMAIR_EL1: write_sysreg_s(val, amair_EL12); return;
  142. case CNTKCTL_EL1: write_sysreg_s(val, cntkctl_EL12); return;
  143. case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); return;
  144. case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); return;
  145. case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); return;
  146. case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); return;
  147. }
  148. immediate_write:
  149. __vcpu_sys_reg(vcpu, reg) = val;
  150. }
  151. /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
  152. static u32 cache_levels;
  153. /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
  154. #define CSSELR_MAX 12
  155. /* Which cache CCSIDR represents depends on CSSELR value. */
  156. static u32 get_ccsidr(u32 csselr)
  157. {
  158. u32 ccsidr;
  159. /* Make sure noone else changes CSSELR during this! */
  160. local_irq_disable();
  161. write_sysreg(csselr, csselr_el1);
  162. isb();
  163. ccsidr = read_sysreg(ccsidr_el1);
  164. local_irq_enable();
  165. return ccsidr;
  166. }
  167. /*
  168. * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
  169. */
  170. static bool access_dcsw(struct kvm_vcpu *vcpu,
  171. struct sys_reg_params *p,
  172. const struct sys_reg_desc *r)
  173. {
  174. if (!p->is_write)
  175. return read_from_write_only(vcpu, p, r);
  176. /*
  177. * Only track S/W ops if we don't have FWB. It still indicates
  178. * that the guest is a bit broken (S/W operations should only
  179. * be done by firmware, knowing that there is only a single
  180. * CPU left in the system, and certainly not from non-secure
  181. * software).
  182. */
  183. if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
  184. kvm_set_way_flush(vcpu);
  185. return true;
  186. }
  187. /*
  188. * Generic accessor for VM registers. Only called as long as HCR_TVM
  189. * is set. If the guest enables the MMU, we stop trapping the VM
  190. * sys_regs and leave it in complete control of the caches.
  191. */
  192. static bool access_vm_reg(struct kvm_vcpu *vcpu,
  193. struct sys_reg_params *p,
  194. const struct sys_reg_desc *r)
  195. {
  196. bool was_enabled = vcpu_has_cache_enabled(vcpu);
  197. u64 val;
  198. int reg = r->reg;
  199. BUG_ON(!p->is_write);
  200. /* See the 32bit mapping in kvm_host.h */
  201. if (p->is_aarch32)
  202. reg = r->reg / 2;
  203. if (!p->is_aarch32 || !p->is_32bit) {
  204. val = p->regval;
  205. } else {
  206. val = vcpu_read_sys_reg(vcpu, reg);
  207. if (r->reg % 2)
  208. val = (p->regval << 32) | (u64)lower_32_bits(val);
  209. else
  210. val = ((u64)upper_32_bits(val) << 32) |
  211. lower_32_bits(p->regval);
  212. }
  213. vcpu_write_sys_reg(vcpu, val, reg);
  214. kvm_toggle_cache(vcpu, was_enabled);
  215. return true;
  216. }
  217. /*
  218. * Trap handler for the GICv3 SGI generation system register.
  219. * Forward the request to the VGIC emulation.
  220. * The cp15_64 code makes sure this automatically works
  221. * for both AArch64 and AArch32 accesses.
  222. */
  223. static bool access_gic_sgi(struct kvm_vcpu *vcpu,
  224. struct sys_reg_params *p,
  225. const struct sys_reg_desc *r)
  226. {
  227. bool g1;
  228. if (!p->is_write)
  229. return read_from_write_only(vcpu, p, r);
  230. /*
  231. * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
  232. * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
  233. * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
  234. * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
  235. * group.
  236. */
  237. if (p->is_aarch32) {
  238. switch (p->Op1) {
  239. default: /* Keep GCC quiet */
  240. case 0: /* ICC_SGI1R */
  241. g1 = true;
  242. break;
  243. case 1: /* ICC_ASGI1R */
  244. case 2: /* ICC_SGI0R */
  245. g1 = false;
  246. break;
  247. }
  248. } else {
  249. switch (p->Op2) {
  250. default: /* Keep GCC quiet */
  251. case 5: /* ICC_SGI1R_EL1 */
  252. g1 = true;
  253. break;
  254. case 6: /* ICC_ASGI1R_EL1 */
  255. case 7: /* ICC_SGI0R_EL1 */
  256. g1 = false;
  257. break;
  258. }
  259. }
  260. vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
  261. return true;
  262. }
  263. static bool access_gic_sre(struct kvm_vcpu *vcpu,
  264. struct sys_reg_params *p,
  265. const struct sys_reg_desc *r)
  266. {
  267. if (p->is_write)
  268. return ignore_write(vcpu, p);
  269. p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
  270. return true;
  271. }
  272. static bool trap_raz_wi(struct kvm_vcpu *vcpu,
  273. struct sys_reg_params *p,
  274. const struct sys_reg_desc *r)
  275. {
  276. if (p->is_write)
  277. return ignore_write(vcpu, p);
  278. else
  279. return read_zero(vcpu, p);
  280. }
  281. static bool trap_undef(struct kvm_vcpu *vcpu,
  282. struct sys_reg_params *p,
  283. const struct sys_reg_desc *r)
  284. {
  285. kvm_inject_undefined(vcpu);
  286. return false;
  287. }
  288. static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
  289. struct sys_reg_params *p,
  290. const struct sys_reg_desc *r)
  291. {
  292. if (p->is_write) {
  293. return ignore_write(vcpu, p);
  294. } else {
  295. p->regval = (1 << 3);
  296. return true;
  297. }
  298. }
  299. static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
  300. struct sys_reg_params *p,
  301. const struct sys_reg_desc *r)
  302. {
  303. if (p->is_write) {
  304. return ignore_write(vcpu, p);
  305. } else {
  306. p->regval = read_sysreg(dbgauthstatus_el1);
  307. return true;
  308. }
  309. }
  310. /*
  311. * We want to avoid world-switching all the DBG registers all the
  312. * time:
  313. *
  314. * - If we've touched any debug register, it is likely that we're
  315. * going to touch more of them. It then makes sense to disable the
  316. * traps and start doing the save/restore dance
  317. * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
  318. * then mandatory to save/restore the registers, as the guest
  319. * depends on them.
  320. *
  321. * For this, we use a DIRTY bit, indicating the guest has modified the
  322. * debug registers, used as follow:
  323. *
  324. * On guest entry:
  325. * - If the dirty bit is set (because we're coming back from trapping),
  326. * disable the traps, save host registers, restore guest registers.
  327. * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
  328. * set the dirty bit, disable the traps, save host registers,
  329. * restore guest registers.
  330. * - Otherwise, enable the traps
  331. *
  332. * On guest exit:
  333. * - If the dirty bit is set, save guest registers, restore host
  334. * registers and clear the dirty bit. This ensure that the host can
  335. * now use the debug registers.
  336. */
  337. static bool trap_debug_regs(struct kvm_vcpu *vcpu,
  338. struct sys_reg_params *p,
  339. const struct sys_reg_desc *r)
  340. {
  341. if (p->is_write) {
  342. vcpu_write_sys_reg(vcpu, p->regval, r->reg);
  343. vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
  344. } else {
  345. p->regval = vcpu_read_sys_reg(vcpu, r->reg);
  346. }
  347. trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
  348. return true;
  349. }
  350. /*
  351. * reg_to_dbg/dbg_to_reg
  352. *
  353. * A 32 bit write to a debug register leave top bits alone
  354. * A 32 bit read from a debug register only returns the bottom bits
  355. *
  356. * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
  357. * hyp.S code switches between host and guest values in future.
  358. */
  359. static void reg_to_dbg(struct kvm_vcpu *vcpu,
  360. struct sys_reg_params *p,
  361. u64 *dbg_reg)
  362. {
  363. u64 val = p->regval;
  364. if (p->is_32bit) {
  365. val &= 0xffffffffUL;
  366. val |= ((*dbg_reg >> 32) << 32);
  367. }
  368. *dbg_reg = val;
  369. vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
  370. }
  371. static void dbg_to_reg(struct kvm_vcpu *vcpu,
  372. struct sys_reg_params *p,
  373. u64 *dbg_reg)
  374. {
  375. p->regval = *dbg_reg;
  376. if (p->is_32bit)
  377. p->regval &= 0xffffffffUL;
  378. }
  379. static bool trap_bvr(struct kvm_vcpu *vcpu,
  380. struct sys_reg_params *p,
  381. const struct sys_reg_desc *rd)
  382. {
  383. u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
  384. if (p->is_write)
  385. reg_to_dbg(vcpu, p, dbg_reg);
  386. else
  387. dbg_to_reg(vcpu, p, dbg_reg);
  388. trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
  389. return true;
  390. }
  391. static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  392. const struct kvm_one_reg *reg, void __user *uaddr)
  393. {
  394. __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
  395. if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
  396. return -EFAULT;
  397. return 0;
  398. }
  399. static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  400. const struct kvm_one_reg *reg, void __user *uaddr)
  401. {
  402. __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
  403. if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
  404. return -EFAULT;
  405. return 0;
  406. }
  407. static void reset_bvr(struct kvm_vcpu *vcpu,
  408. const struct sys_reg_desc *rd)
  409. {
  410. vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
  411. }
  412. static bool trap_bcr(struct kvm_vcpu *vcpu,
  413. struct sys_reg_params *p,
  414. const struct sys_reg_desc *rd)
  415. {
  416. u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
  417. if (p->is_write)
  418. reg_to_dbg(vcpu, p, dbg_reg);
  419. else
  420. dbg_to_reg(vcpu, p, dbg_reg);
  421. trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
  422. return true;
  423. }
  424. static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  425. const struct kvm_one_reg *reg, void __user *uaddr)
  426. {
  427. __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
  428. if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
  429. return -EFAULT;
  430. return 0;
  431. }
  432. static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  433. const struct kvm_one_reg *reg, void __user *uaddr)
  434. {
  435. __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
  436. if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
  437. return -EFAULT;
  438. return 0;
  439. }
  440. static void reset_bcr(struct kvm_vcpu *vcpu,
  441. const struct sys_reg_desc *rd)
  442. {
  443. vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
  444. }
  445. static bool trap_wvr(struct kvm_vcpu *vcpu,
  446. struct sys_reg_params *p,
  447. const struct sys_reg_desc *rd)
  448. {
  449. u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
  450. if (p->is_write)
  451. reg_to_dbg(vcpu, p, dbg_reg);
  452. else
  453. dbg_to_reg(vcpu, p, dbg_reg);
  454. trace_trap_reg(__func__, rd->reg, p->is_write,
  455. vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
  456. return true;
  457. }
  458. static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  459. const struct kvm_one_reg *reg, void __user *uaddr)
  460. {
  461. __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
  462. if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
  463. return -EFAULT;
  464. return 0;
  465. }
  466. static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  467. const struct kvm_one_reg *reg, void __user *uaddr)
  468. {
  469. __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
  470. if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
  471. return -EFAULT;
  472. return 0;
  473. }
  474. static void reset_wvr(struct kvm_vcpu *vcpu,
  475. const struct sys_reg_desc *rd)
  476. {
  477. vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
  478. }
  479. static bool trap_wcr(struct kvm_vcpu *vcpu,
  480. struct sys_reg_params *p,
  481. const struct sys_reg_desc *rd)
  482. {
  483. u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
  484. if (p->is_write)
  485. reg_to_dbg(vcpu, p, dbg_reg);
  486. else
  487. dbg_to_reg(vcpu, p, dbg_reg);
  488. trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
  489. return true;
  490. }
  491. static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  492. const struct kvm_one_reg *reg, void __user *uaddr)
  493. {
  494. __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
  495. if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
  496. return -EFAULT;
  497. return 0;
  498. }
  499. static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  500. const struct kvm_one_reg *reg, void __user *uaddr)
  501. {
  502. __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
  503. if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
  504. return -EFAULT;
  505. return 0;
  506. }
  507. static void reset_wcr(struct kvm_vcpu *vcpu,
  508. const struct sys_reg_desc *rd)
  509. {
  510. vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
  511. }
  512. static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
  513. {
  514. u64 amair = read_sysreg(amair_el1);
  515. vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
  516. }
  517. static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
  518. {
  519. u64 mpidr;
  520. /*
  521. * Map the vcpu_id into the first three affinity level fields of
  522. * the MPIDR. We limit the number of VCPUs in level 0 due to a
  523. * limitation to 16 CPUs in that level in the ICC_SGIxR registers
  524. * of the GICv3 to be able to address each CPU directly when
  525. * sending IPIs.
  526. */
  527. mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
  528. mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
  529. mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
  530. vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
  531. }
  532. static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
  533. {
  534. u64 pmcr, val;
  535. pmcr = read_sysreg(pmcr_el0);
  536. /*
  537. * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
  538. * except PMCR.E resetting to zero.
  539. */
  540. val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
  541. | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
  542. __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
  543. }
  544. static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
  545. {
  546. u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
  547. bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
  548. if (!enabled)
  549. kvm_inject_undefined(vcpu);
  550. return !enabled;
  551. }
  552. static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
  553. {
  554. return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
  555. }
  556. static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
  557. {
  558. return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
  559. }
  560. static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
  561. {
  562. return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
  563. }
  564. static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
  565. {
  566. return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
  567. }
  568. static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  569. const struct sys_reg_desc *r)
  570. {
  571. u64 val;
  572. if (!kvm_arm_pmu_v3_ready(vcpu))
  573. return trap_raz_wi(vcpu, p, r);
  574. if (pmu_access_el0_disabled(vcpu))
  575. return false;
  576. if (p->is_write) {
  577. /* Only update writeable bits of PMCR */
  578. val = __vcpu_sys_reg(vcpu, PMCR_EL0);
  579. val &= ~ARMV8_PMU_PMCR_MASK;
  580. val |= p->regval & ARMV8_PMU_PMCR_MASK;
  581. __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
  582. kvm_pmu_handle_pmcr(vcpu, val);
  583. } else {
  584. /* PMCR.P & PMCR.C are RAZ */
  585. val = __vcpu_sys_reg(vcpu, PMCR_EL0)
  586. & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
  587. p->regval = val;
  588. }
  589. return true;
  590. }
  591. static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  592. const struct sys_reg_desc *r)
  593. {
  594. if (!kvm_arm_pmu_v3_ready(vcpu))
  595. return trap_raz_wi(vcpu, p, r);
  596. if (pmu_access_event_counter_el0_disabled(vcpu))
  597. return false;
  598. if (p->is_write)
  599. __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
  600. else
  601. /* return PMSELR.SEL field */
  602. p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
  603. & ARMV8_PMU_COUNTER_MASK;
  604. return true;
  605. }
  606. static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  607. const struct sys_reg_desc *r)
  608. {
  609. u64 pmceid;
  610. if (!kvm_arm_pmu_v3_ready(vcpu))
  611. return trap_raz_wi(vcpu, p, r);
  612. BUG_ON(p->is_write);
  613. if (pmu_access_el0_disabled(vcpu))
  614. return false;
  615. if (!(p->Op2 & 1))
  616. pmceid = read_sysreg(pmceid0_el0);
  617. else
  618. pmceid = read_sysreg(pmceid1_el0);
  619. p->regval = pmceid;
  620. return true;
  621. }
  622. static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
  623. {
  624. u64 pmcr, val;
  625. pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
  626. val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
  627. if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
  628. kvm_inject_undefined(vcpu);
  629. return false;
  630. }
  631. return true;
  632. }
  633. static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
  634. struct sys_reg_params *p,
  635. const struct sys_reg_desc *r)
  636. {
  637. u64 idx;
  638. if (!kvm_arm_pmu_v3_ready(vcpu))
  639. return trap_raz_wi(vcpu, p, r);
  640. if (r->CRn == 9 && r->CRm == 13) {
  641. if (r->Op2 == 2) {
  642. /* PMXEVCNTR_EL0 */
  643. if (pmu_access_event_counter_el0_disabled(vcpu))
  644. return false;
  645. idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
  646. & ARMV8_PMU_COUNTER_MASK;
  647. } else if (r->Op2 == 0) {
  648. /* PMCCNTR_EL0 */
  649. if (pmu_access_cycle_counter_el0_disabled(vcpu))
  650. return false;
  651. idx = ARMV8_PMU_CYCLE_IDX;
  652. } else {
  653. return false;
  654. }
  655. } else if (r->CRn == 0 && r->CRm == 9) {
  656. /* PMCCNTR */
  657. if (pmu_access_event_counter_el0_disabled(vcpu))
  658. return false;
  659. idx = ARMV8_PMU_CYCLE_IDX;
  660. } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
  661. /* PMEVCNTRn_EL0 */
  662. if (pmu_access_event_counter_el0_disabled(vcpu))
  663. return false;
  664. idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
  665. } else {
  666. return false;
  667. }
  668. if (!pmu_counter_idx_valid(vcpu, idx))
  669. return false;
  670. if (p->is_write) {
  671. if (pmu_access_el0_disabled(vcpu))
  672. return false;
  673. kvm_pmu_set_counter_value(vcpu, idx, p->regval);
  674. } else {
  675. p->regval = kvm_pmu_get_counter_value(vcpu, idx);
  676. }
  677. return true;
  678. }
  679. static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  680. const struct sys_reg_desc *r)
  681. {
  682. u64 idx, reg;
  683. if (!kvm_arm_pmu_v3_ready(vcpu))
  684. return trap_raz_wi(vcpu, p, r);
  685. if (pmu_access_el0_disabled(vcpu))
  686. return false;
  687. if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
  688. /* PMXEVTYPER_EL0 */
  689. idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
  690. reg = PMEVTYPER0_EL0 + idx;
  691. } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
  692. idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
  693. if (idx == ARMV8_PMU_CYCLE_IDX)
  694. reg = PMCCFILTR_EL0;
  695. else
  696. /* PMEVTYPERn_EL0 */
  697. reg = PMEVTYPER0_EL0 + idx;
  698. } else {
  699. BUG();
  700. }
  701. if (!pmu_counter_idx_valid(vcpu, idx))
  702. return false;
  703. if (p->is_write) {
  704. kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
  705. __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
  706. } else {
  707. p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
  708. }
  709. return true;
  710. }
  711. static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  712. const struct sys_reg_desc *r)
  713. {
  714. u64 val, mask;
  715. if (!kvm_arm_pmu_v3_ready(vcpu))
  716. return trap_raz_wi(vcpu, p, r);
  717. if (pmu_access_el0_disabled(vcpu))
  718. return false;
  719. mask = kvm_pmu_valid_counter_mask(vcpu);
  720. if (p->is_write) {
  721. val = p->regval & mask;
  722. if (r->Op2 & 0x1) {
  723. /* accessing PMCNTENSET_EL0 */
  724. __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
  725. kvm_pmu_enable_counter(vcpu, val);
  726. } else {
  727. /* accessing PMCNTENCLR_EL0 */
  728. __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
  729. kvm_pmu_disable_counter(vcpu, val);
  730. }
  731. } else {
  732. p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
  733. }
  734. return true;
  735. }
  736. static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  737. const struct sys_reg_desc *r)
  738. {
  739. u64 mask = kvm_pmu_valid_counter_mask(vcpu);
  740. if (!kvm_arm_pmu_v3_ready(vcpu))
  741. return trap_raz_wi(vcpu, p, r);
  742. if (!vcpu_mode_priv(vcpu)) {
  743. kvm_inject_undefined(vcpu);
  744. return false;
  745. }
  746. if (p->is_write) {
  747. u64 val = p->regval & mask;
  748. if (r->Op2 & 0x1)
  749. /* accessing PMINTENSET_EL1 */
  750. __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
  751. else
  752. /* accessing PMINTENCLR_EL1 */
  753. __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
  754. } else {
  755. p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
  756. }
  757. return true;
  758. }
  759. static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  760. const struct sys_reg_desc *r)
  761. {
  762. u64 mask = kvm_pmu_valid_counter_mask(vcpu);
  763. if (!kvm_arm_pmu_v3_ready(vcpu))
  764. return trap_raz_wi(vcpu, p, r);
  765. if (pmu_access_el0_disabled(vcpu))
  766. return false;
  767. if (p->is_write) {
  768. if (r->CRm & 0x2)
  769. /* accessing PMOVSSET_EL0 */
  770. __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
  771. else
  772. /* accessing PMOVSCLR_EL0 */
  773. __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
  774. } else {
  775. p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
  776. }
  777. return true;
  778. }
  779. static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  780. const struct sys_reg_desc *r)
  781. {
  782. u64 mask;
  783. if (!kvm_arm_pmu_v3_ready(vcpu))
  784. return trap_raz_wi(vcpu, p, r);
  785. if (!p->is_write)
  786. return read_from_write_only(vcpu, p, r);
  787. if (pmu_write_swinc_el0_disabled(vcpu))
  788. return false;
  789. mask = kvm_pmu_valid_counter_mask(vcpu);
  790. kvm_pmu_software_increment(vcpu, p->regval & mask);
  791. return true;
  792. }
  793. static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  794. const struct sys_reg_desc *r)
  795. {
  796. if (!kvm_arm_pmu_v3_ready(vcpu))
  797. return trap_raz_wi(vcpu, p, r);
  798. if (p->is_write) {
  799. if (!vcpu_mode_priv(vcpu)) {
  800. kvm_inject_undefined(vcpu);
  801. return false;
  802. }
  803. __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
  804. p->regval & ARMV8_PMU_USERENR_MASK;
  805. } else {
  806. p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
  807. & ARMV8_PMU_USERENR_MASK;
  808. }
  809. return true;
  810. }
  811. /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
  812. #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
  813. { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
  814. trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
  815. { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
  816. trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
  817. { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
  818. trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
  819. { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
  820. trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
  821. /* Macro to expand the PMEVCNTRn_EL0 register */
  822. #define PMU_PMEVCNTR_EL0(n) \
  823. { SYS_DESC(SYS_PMEVCNTRn_EL0(n)), \
  824. access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
  825. /* Macro to expand the PMEVTYPERn_EL0 register */
  826. #define PMU_PMEVTYPER_EL0(n) \
  827. { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
  828. access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
  829. static bool access_cntp_tval(struct kvm_vcpu *vcpu,
  830. struct sys_reg_params *p,
  831. const struct sys_reg_desc *r)
  832. {
  833. u64 now = kvm_phys_timer_read();
  834. u64 cval;
  835. if (p->is_write) {
  836. kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL,
  837. p->regval + now);
  838. } else {
  839. cval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
  840. p->regval = cval - now;
  841. }
  842. return true;
  843. }
  844. static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
  845. struct sys_reg_params *p,
  846. const struct sys_reg_desc *r)
  847. {
  848. if (p->is_write)
  849. kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CTL, p->regval);
  850. else
  851. p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CTL);
  852. return true;
  853. }
  854. static bool access_cntp_cval(struct kvm_vcpu *vcpu,
  855. struct sys_reg_params *p,
  856. const struct sys_reg_desc *r)
  857. {
  858. if (p->is_write)
  859. kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, p->regval);
  860. else
  861. p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
  862. return true;
  863. }
  864. /* Read a sanitised cpufeature ID register by sys_reg_desc */
  865. static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
  866. {
  867. u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
  868. (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
  869. u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
  870. if (id == SYS_ID_AA64PFR0_EL1) {
  871. if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT))
  872. kvm_debug("SVE unsupported for guests, suppressing\n");
  873. val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
  874. } else if (id == SYS_ID_AA64MMFR1_EL1) {
  875. if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))
  876. kvm_debug("LORegions unsupported for guests, suppressing\n");
  877. val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT);
  878. }
  879. return val;
  880. }
  881. /* cpufeature ID register access trap handlers */
  882. static bool __access_id_reg(struct kvm_vcpu *vcpu,
  883. struct sys_reg_params *p,
  884. const struct sys_reg_desc *r,
  885. bool raz)
  886. {
  887. if (p->is_write)
  888. return write_to_read_only(vcpu, p, r);
  889. p->regval = read_id_reg(r, raz);
  890. return true;
  891. }
  892. static bool access_id_reg(struct kvm_vcpu *vcpu,
  893. struct sys_reg_params *p,
  894. const struct sys_reg_desc *r)
  895. {
  896. return __access_id_reg(vcpu, p, r, false);
  897. }
  898. static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
  899. struct sys_reg_params *p,
  900. const struct sys_reg_desc *r)
  901. {
  902. return __access_id_reg(vcpu, p, r, true);
  903. }
  904. static int reg_from_user(u64 *val, const void __user *uaddr, u64 id);
  905. static int reg_to_user(void __user *uaddr, const u64 *val, u64 id);
  906. static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
  907. /*
  908. * cpufeature ID register user accessors
  909. *
  910. * For now, these registers are immutable for userspace, so no values
  911. * are stored, and for set_id_reg() we don't allow the effective value
  912. * to be changed.
  913. */
  914. static int __get_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
  915. bool raz)
  916. {
  917. const u64 id = sys_reg_to_index(rd);
  918. const u64 val = read_id_reg(rd, raz);
  919. return reg_to_user(uaddr, &val, id);
  920. }
  921. static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
  922. bool raz)
  923. {
  924. const u64 id = sys_reg_to_index(rd);
  925. int err;
  926. u64 val;
  927. err = reg_from_user(&val, uaddr, id);
  928. if (err)
  929. return err;
  930. /* This is what we mean by invariant: you can't change it. */
  931. if (val != read_id_reg(rd, raz))
  932. return -EINVAL;
  933. return 0;
  934. }
  935. static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  936. const struct kvm_one_reg *reg, void __user *uaddr)
  937. {
  938. return __get_id_reg(rd, uaddr, false);
  939. }
  940. static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  941. const struct kvm_one_reg *reg, void __user *uaddr)
  942. {
  943. return __set_id_reg(rd, uaddr, false);
  944. }
  945. static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  946. const struct kvm_one_reg *reg, void __user *uaddr)
  947. {
  948. return __get_id_reg(rd, uaddr, true);
  949. }
  950. static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  951. const struct kvm_one_reg *reg, void __user *uaddr)
  952. {
  953. return __set_id_reg(rd, uaddr, true);
  954. }
  955. /* sys_reg_desc initialiser for known cpufeature ID registers */
  956. #define ID_SANITISED(name) { \
  957. SYS_DESC(SYS_##name), \
  958. .access = access_id_reg, \
  959. .get_user = get_id_reg, \
  960. .set_user = set_id_reg, \
  961. }
  962. /*
  963. * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
  964. * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
  965. * (1 <= crm < 8, 0 <= Op2 < 8).
  966. */
  967. #define ID_UNALLOCATED(crm, op2) { \
  968. Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
  969. .access = access_raz_id_reg, \
  970. .get_user = get_raz_id_reg, \
  971. .set_user = set_raz_id_reg, \
  972. }
  973. /*
  974. * sys_reg_desc initialiser for known ID registers that we hide from guests.
  975. * For now, these are exposed just like unallocated ID regs: they appear
  976. * RAZ for the guest.
  977. */
  978. #define ID_HIDDEN(name) { \
  979. SYS_DESC(SYS_##name), \
  980. .access = access_raz_id_reg, \
  981. .get_user = get_raz_id_reg, \
  982. .set_user = set_raz_id_reg, \
  983. }
  984. /*
  985. * Architected system registers.
  986. * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
  987. *
  988. * Debug handling: We do trap most, if not all debug related system
  989. * registers. The implementation is good enough to ensure that a guest
  990. * can use these with minimal performance degradation. The drawback is
  991. * that we don't implement any of the external debug, none of the
  992. * OSlock protocol. This should be revisited if we ever encounter a
  993. * more demanding guest...
  994. */
  995. static const struct sys_reg_desc sys_reg_descs[] = {
  996. { SYS_DESC(SYS_DC_ISW), access_dcsw },
  997. { SYS_DESC(SYS_DC_CSW), access_dcsw },
  998. { SYS_DESC(SYS_DC_CISW), access_dcsw },
  999. DBG_BCR_BVR_WCR_WVR_EL1(0),
  1000. DBG_BCR_BVR_WCR_WVR_EL1(1),
  1001. { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
  1002. { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
  1003. DBG_BCR_BVR_WCR_WVR_EL1(2),
  1004. DBG_BCR_BVR_WCR_WVR_EL1(3),
  1005. DBG_BCR_BVR_WCR_WVR_EL1(4),
  1006. DBG_BCR_BVR_WCR_WVR_EL1(5),
  1007. DBG_BCR_BVR_WCR_WVR_EL1(6),
  1008. DBG_BCR_BVR_WCR_WVR_EL1(7),
  1009. DBG_BCR_BVR_WCR_WVR_EL1(8),
  1010. DBG_BCR_BVR_WCR_WVR_EL1(9),
  1011. DBG_BCR_BVR_WCR_WVR_EL1(10),
  1012. DBG_BCR_BVR_WCR_WVR_EL1(11),
  1013. DBG_BCR_BVR_WCR_WVR_EL1(12),
  1014. DBG_BCR_BVR_WCR_WVR_EL1(13),
  1015. DBG_BCR_BVR_WCR_WVR_EL1(14),
  1016. DBG_BCR_BVR_WCR_WVR_EL1(15),
  1017. { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
  1018. { SYS_DESC(SYS_OSLAR_EL1), trap_raz_wi },
  1019. { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1 },
  1020. { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
  1021. { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
  1022. { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
  1023. { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
  1024. { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
  1025. { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
  1026. { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
  1027. // DBGDTR[TR]X_EL0 share the same encoding
  1028. { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
  1029. { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
  1030. { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
  1031. /*
  1032. * ID regs: all ID_SANITISED() entries here must have corresponding
  1033. * entries in arm64_ftr_regs[].
  1034. */
  1035. /* AArch64 mappings of the AArch32 ID registers */
  1036. /* CRm=1 */
  1037. ID_SANITISED(ID_PFR0_EL1),
  1038. ID_SANITISED(ID_PFR1_EL1),
  1039. ID_SANITISED(ID_DFR0_EL1),
  1040. ID_HIDDEN(ID_AFR0_EL1),
  1041. ID_SANITISED(ID_MMFR0_EL1),
  1042. ID_SANITISED(ID_MMFR1_EL1),
  1043. ID_SANITISED(ID_MMFR2_EL1),
  1044. ID_SANITISED(ID_MMFR3_EL1),
  1045. /* CRm=2 */
  1046. ID_SANITISED(ID_ISAR0_EL1),
  1047. ID_SANITISED(ID_ISAR1_EL1),
  1048. ID_SANITISED(ID_ISAR2_EL1),
  1049. ID_SANITISED(ID_ISAR3_EL1),
  1050. ID_SANITISED(ID_ISAR4_EL1),
  1051. ID_SANITISED(ID_ISAR5_EL1),
  1052. ID_SANITISED(ID_MMFR4_EL1),
  1053. ID_UNALLOCATED(2,7),
  1054. /* CRm=3 */
  1055. ID_SANITISED(MVFR0_EL1),
  1056. ID_SANITISED(MVFR1_EL1),
  1057. ID_SANITISED(MVFR2_EL1),
  1058. ID_UNALLOCATED(3,3),
  1059. ID_UNALLOCATED(3,4),
  1060. ID_UNALLOCATED(3,5),
  1061. ID_UNALLOCATED(3,6),
  1062. ID_UNALLOCATED(3,7),
  1063. /* AArch64 ID registers */
  1064. /* CRm=4 */
  1065. ID_SANITISED(ID_AA64PFR0_EL1),
  1066. ID_SANITISED(ID_AA64PFR1_EL1),
  1067. ID_UNALLOCATED(4,2),
  1068. ID_UNALLOCATED(4,3),
  1069. ID_UNALLOCATED(4,4),
  1070. ID_UNALLOCATED(4,5),
  1071. ID_UNALLOCATED(4,6),
  1072. ID_UNALLOCATED(4,7),
  1073. /* CRm=5 */
  1074. ID_SANITISED(ID_AA64DFR0_EL1),
  1075. ID_SANITISED(ID_AA64DFR1_EL1),
  1076. ID_UNALLOCATED(5,2),
  1077. ID_UNALLOCATED(5,3),
  1078. ID_HIDDEN(ID_AA64AFR0_EL1),
  1079. ID_HIDDEN(ID_AA64AFR1_EL1),
  1080. ID_UNALLOCATED(5,6),
  1081. ID_UNALLOCATED(5,7),
  1082. /* CRm=6 */
  1083. ID_SANITISED(ID_AA64ISAR0_EL1),
  1084. ID_SANITISED(ID_AA64ISAR1_EL1),
  1085. ID_UNALLOCATED(6,2),
  1086. ID_UNALLOCATED(6,3),
  1087. ID_UNALLOCATED(6,4),
  1088. ID_UNALLOCATED(6,5),
  1089. ID_UNALLOCATED(6,6),
  1090. ID_UNALLOCATED(6,7),
  1091. /* CRm=7 */
  1092. ID_SANITISED(ID_AA64MMFR0_EL1),
  1093. ID_SANITISED(ID_AA64MMFR1_EL1),
  1094. ID_SANITISED(ID_AA64MMFR2_EL1),
  1095. ID_UNALLOCATED(7,3),
  1096. ID_UNALLOCATED(7,4),
  1097. ID_UNALLOCATED(7,5),
  1098. ID_UNALLOCATED(7,6),
  1099. ID_UNALLOCATED(7,7),
  1100. { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
  1101. { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
  1102. { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
  1103. { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
  1104. { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
  1105. { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
  1106. { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
  1107. { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
  1108. { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
  1109. { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
  1110. { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
  1111. { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
  1112. { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
  1113. { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
  1114. { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
  1115. { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
  1116. { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
  1117. { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
  1118. { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
  1119. { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, NULL, PMINTENSET_EL1 },
  1120. { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
  1121. { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
  1122. { SYS_DESC(SYS_LORSA_EL1), trap_undef },
  1123. { SYS_DESC(SYS_LOREA_EL1), trap_undef },
  1124. { SYS_DESC(SYS_LORN_EL1), trap_undef },
  1125. { SYS_DESC(SYS_LORC_EL1), trap_undef },
  1126. { SYS_DESC(SYS_LORID_EL1), trap_undef },
  1127. { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
  1128. { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
  1129. { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
  1130. { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
  1131. { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
  1132. { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
  1133. { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
  1134. { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
  1135. { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
  1136. { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
  1137. { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
  1138. { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
  1139. { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
  1140. { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
  1141. { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
  1142. { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
  1143. { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
  1144. { SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 },
  1145. { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
  1146. { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
  1147. { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
  1148. { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
  1149. { SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
  1150. { SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
  1151. { SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
  1152. { SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
  1153. { SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
  1154. { SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
  1155. { SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
  1156. /*
  1157. * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
  1158. * in 32bit mode. Here we choose to reset it as zero for consistency.
  1159. */
  1160. { SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
  1161. { SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
  1162. { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
  1163. { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
  1164. { SYS_DESC(SYS_CNTP_TVAL_EL0), access_cntp_tval },
  1165. { SYS_DESC(SYS_CNTP_CTL_EL0), access_cntp_ctl },
  1166. { SYS_DESC(SYS_CNTP_CVAL_EL0), access_cntp_cval },
  1167. /* PMEVCNTRn_EL0 */
  1168. PMU_PMEVCNTR_EL0(0),
  1169. PMU_PMEVCNTR_EL0(1),
  1170. PMU_PMEVCNTR_EL0(2),
  1171. PMU_PMEVCNTR_EL0(3),
  1172. PMU_PMEVCNTR_EL0(4),
  1173. PMU_PMEVCNTR_EL0(5),
  1174. PMU_PMEVCNTR_EL0(6),
  1175. PMU_PMEVCNTR_EL0(7),
  1176. PMU_PMEVCNTR_EL0(8),
  1177. PMU_PMEVCNTR_EL0(9),
  1178. PMU_PMEVCNTR_EL0(10),
  1179. PMU_PMEVCNTR_EL0(11),
  1180. PMU_PMEVCNTR_EL0(12),
  1181. PMU_PMEVCNTR_EL0(13),
  1182. PMU_PMEVCNTR_EL0(14),
  1183. PMU_PMEVCNTR_EL0(15),
  1184. PMU_PMEVCNTR_EL0(16),
  1185. PMU_PMEVCNTR_EL0(17),
  1186. PMU_PMEVCNTR_EL0(18),
  1187. PMU_PMEVCNTR_EL0(19),
  1188. PMU_PMEVCNTR_EL0(20),
  1189. PMU_PMEVCNTR_EL0(21),
  1190. PMU_PMEVCNTR_EL0(22),
  1191. PMU_PMEVCNTR_EL0(23),
  1192. PMU_PMEVCNTR_EL0(24),
  1193. PMU_PMEVCNTR_EL0(25),
  1194. PMU_PMEVCNTR_EL0(26),
  1195. PMU_PMEVCNTR_EL0(27),
  1196. PMU_PMEVCNTR_EL0(28),
  1197. PMU_PMEVCNTR_EL0(29),
  1198. PMU_PMEVCNTR_EL0(30),
  1199. /* PMEVTYPERn_EL0 */
  1200. PMU_PMEVTYPER_EL0(0),
  1201. PMU_PMEVTYPER_EL0(1),
  1202. PMU_PMEVTYPER_EL0(2),
  1203. PMU_PMEVTYPER_EL0(3),
  1204. PMU_PMEVTYPER_EL0(4),
  1205. PMU_PMEVTYPER_EL0(5),
  1206. PMU_PMEVTYPER_EL0(6),
  1207. PMU_PMEVTYPER_EL0(7),
  1208. PMU_PMEVTYPER_EL0(8),
  1209. PMU_PMEVTYPER_EL0(9),
  1210. PMU_PMEVTYPER_EL0(10),
  1211. PMU_PMEVTYPER_EL0(11),
  1212. PMU_PMEVTYPER_EL0(12),
  1213. PMU_PMEVTYPER_EL0(13),
  1214. PMU_PMEVTYPER_EL0(14),
  1215. PMU_PMEVTYPER_EL0(15),
  1216. PMU_PMEVTYPER_EL0(16),
  1217. PMU_PMEVTYPER_EL0(17),
  1218. PMU_PMEVTYPER_EL0(18),
  1219. PMU_PMEVTYPER_EL0(19),
  1220. PMU_PMEVTYPER_EL0(20),
  1221. PMU_PMEVTYPER_EL0(21),
  1222. PMU_PMEVTYPER_EL0(22),
  1223. PMU_PMEVTYPER_EL0(23),
  1224. PMU_PMEVTYPER_EL0(24),
  1225. PMU_PMEVTYPER_EL0(25),
  1226. PMU_PMEVTYPER_EL0(26),
  1227. PMU_PMEVTYPER_EL0(27),
  1228. PMU_PMEVTYPER_EL0(28),
  1229. PMU_PMEVTYPER_EL0(29),
  1230. PMU_PMEVTYPER_EL0(30),
  1231. /*
  1232. * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
  1233. * in 32bit mode. Here we choose to reset it as zero for consistency.
  1234. */
  1235. { SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
  1236. { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
  1237. { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
  1238. { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x70 },
  1239. };
  1240. static bool trap_dbgidr(struct kvm_vcpu *vcpu,
  1241. struct sys_reg_params *p,
  1242. const struct sys_reg_desc *r)
  1243. {
  1244. if (p->is_write) {
  1245. return ignore_write(vcpu, p);
  1246. } else {
  1247. u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
  1248. u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
  1249. u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
  1250. p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
  1251. (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
  1252. (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
  1253. | (6 << 16) | (el3 << 14) | (el3 << 12));
  1254. return true;
  1255. }
  1256. }
  1257. static bool trap_debug32(struct kvm_vcpu *vcpu,
  1258. struct sys_reg_params *p,
  1259. const struct sys_reg_desc *r)
  1260. {
  1261. if (p->is_write) {
  1262. vcpu_cp14(vcpu, r->reg) = p->regval;
  1263. vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
  1264. } else {
  1265. p->regval = vcpu_cp14(vcpu, r->reg);
  1266. }
  1267. return true;
  1268. }
  1269. /* AArch32 debug register mappings
  1270. *
  1271. * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
  1272. * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
  1273. *
  1274. * All control registers and watchpoint value registers are mapped to
  1275. * the lower 32 bits of their AArch64 equivalents. We share the trap
  1276. * handlers with the above AArch64 code which checks what mode the
  1277. * system is in.
  1278. */
  1279. static bool trap_xvr(struct kvm_vcpu *vcpu,
  1280. struct sys_reg_params *p,
  1281. const struct sys_reg_desc *rd)
  1282. {
  1283. u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
  1284. if (p->is_write) {
  1285. u64 val = *dbg_reg;
  1286. val &= 0xffffffffUL;
  1287. val |= p->regval << 32;
  1288. *dbg_reg = val;
  1289. vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
  1290. } else {
  1291. p->regval = *dbg_reg >> 32;
  1292. }
  1293. trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
  1294. return true;
  1295. }
  1296. #define DBG_BCR_BVR_WCR_WVR(n) \
  1297. /* DBGBVRn */ \
  1298. { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
  1299. /* DBGBCRn */ \
  1300. { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
  1301. /* DBGWVRn */ \
  1302. { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
  1303. /* DBGWCRn */ \
  1304. { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
  1305. #define DBGBXVR(n) \
  1306. { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
  1307. /*
  1308. * Trapped cp14 registers. We generally ignore most of the external
  1309. * debug, on the principle that they don't really make sense to a
  1310. * guest. Revisit this one day, would this principle change.
  1311. */
  1312. static const struct sys_reg_desc cp14_regs[] = {
  1313. /* DBGIDR */
  1314. { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
  1315. /* DBGDTRRXext */
  1316. { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
  1317. DBG_BCR_BVR_WCR_WVR(0),
  1318. /* DBGDSCRint */
  1319. { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
  1320. DBG_BCR_BVR_WCR_WVR(1),
  1321. /* DBGDCCINT */
  1322. { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
  1323. /* DBGDSCRext */
  1324. { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
  1325. DBG_BCR_BVR_WCR_WVR(2),
  1326. /* DBGDTR[RT]Xint */
  1327. { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
  1328. /* DBGDTR[RT]Xext */
  1329. { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
  1330. DBG_BCR_BVR_WCR_WVR(3),
  1331. DBG_BCR_BVR_WCR_WVR(4),
  1332. DBG_BCR_BVR_WCR_WVR(5),
  1333. /* DBGWFAR */
  1334. { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
  1335. /* DBGOSECCR */
  1336. { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
  1337. DBG_BCR_BVR_WCR_WVR(6),
  1338. /* DBGVCR */
  1339. { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
  1340. DBG_BCR_BVR_WCR_WVR(7),
  1341. DBG_BCR_BVR_WCR_WVR(8),
  1342. DBG_BCR_BVR_WCR_WVR(9),
  1343. DBG_BCR_BVR_WCR_WVR(10),
  1344. DBG_BCR_BVR_WCR_WVR(11),
  1345. DBG_BCR_BVR_WCR_WVR(12),
  1346. DBG_BCR_BVR_WCR_WVR(13),
  1347. DBG_BCR_BVR_WCR_WVR(14),
  1348. DBG_BCR_BVR_WCR_WVR(15),
  1349. /* DBGDRAR (32bit) */
  1350. { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
  1351. DBGBXVR(0),
  1352. /* DBGOSLAR */
  1353. { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
  1354. DBGBXVR(1),
  1355. /* DBGOSLSR */
  1356. { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
  1357. DBGBXVR(2),
  1358. DBGBXVR(3),
  1359. /* DBGOSDLR */
  1360. { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
  1361. DBGBXVR(4),
  1362. /* DBGPRCR */
  1363. { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
  1364. DBGBXVR(5),
  1365. DBGBXVR(6),
  1366. DBGBXVR(7),
  1367. DBGBXVR(8),
  1368. DBGBXVR(9),
  1369. DBGBXVR(10),
  1370. DBGBXVR(11),
  1371. DBGBXVR(12),
  1372. DBGBXVR(13),
  1373. DBGBXVR(14),
  1374. DBGBXVR(15),
  1375. /* DBGDSAR (32bit) */
  1376. { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
  1377. /* DBGDEVID2 */
  1378. { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
  1379. /* DBGDEVID1 */
  1380. { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
  1381. /* DBGDEVID */
  1382. { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
  1383. /* DBGCLAIMSET */
  1384. { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
  1385. /* DBGCLAIMCLR */
  1386. { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
  1387. /* DBGAUTHSTATUS */
  1388. { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
  1389. };
  1390. /* Trapped cp14 64bit registers */
  1391. static const struct sys_reg_desc cp14_64_regs[] = {
  1392. /* DBGDRAR (64bit) */
  1393. { Op1( 0), CRm( 1), .access = trap_raz_wi },
  1394. /* DBGDSAR (64bit) */
  1395. { Op1( 0), CRm( 2), .access = trap_raz_wi },
  1396. };
  1397. /* Macro to expand the PMEVCNTRn register */
  1398. #define PMU_PMEVCNTR(n) \
  1399. /* PMEVCNTRn */ \
  1400. { Op1(0), CRn(0b1110), \
  1401. CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
  1402. access_pmu_evcntr }
  1403. /* Macro to expand the PMEVTYPERn register */
  1404. #define PMU_PMEVTYPER(n) \
  1405. /* PMEVTYPERn */ \
  1406. { Op1(0), CRn(0b1110), \
  1407. CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
  1408. access_pmu_evtyper }
  1409. /*
  1410. * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
  1411. * depending on the way they are accessed (as a 32bit or a 64bit
  1412. * register).
  1413. */
  1414. static const struct sys_reg_desc cp15_regs[] = {
  1415. { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
  1416. { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
  1417. { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
  1418. { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
  1419. { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
  1420. { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
  1421. { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
  1422. { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
  1423. { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
  1424. { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
  1425. { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
  1426. /*
  1427. * DC{C,I,CI}SW operations:
  1428. */
  1429. { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
  1430. { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
  1431. { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
  1432. /* PMU */
  1433. { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
  1434. { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
  1435. { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
  1436. { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
  1437. { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
  1438. { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
  1439. { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
  1440. { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
  1441. { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
  1442. { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
  1443. { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
  1444. { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
  1445. { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
  1446. { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
  1447. { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
  1448. { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
  1449. { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
  1450. { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
  1451. { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
  1452. /* ICC_SRE */
  1453. { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
  1454. { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
  1455. /* CNTP_TVAL */
  1456. { Op1( 0), CRn(14), CRm( 2), Op2( 0), access_cntp_tval },
  1457. /* CNTP_CTL */
  1458. { Op1( 0), CRn(14), CRm( 2), Op2( 1), access_cntp_ctl },
  1459. /* PMEVCNTRn */
  1460. PMU_PMEVCNTR(0),
  1461. PMU_PMEVCNTR(1),
  1462. PMU_PMEVCNTR(2),
  1463. PMU_PMEVCNTR(3),
  1464. PMU_PMEVCNTR(4),
  1465. PMU_PMEVCNTR(5),
  1466. PMU_PMEVCNTR(6),
  1467. PMU_PMEVCNTR(7),
  1468. PMU_PMEVCNTR(8),
  1469. PMU_PMEVCNTR(9),
  1470. PMU_PMEVCNTR(10),
  1471. PMU_PMEVCNTR(11),
  1472. PMU_PMEVCNTR(12),
  1473. PMU_PMEVCNTR(13),
  1474. PMU_PMEVCNTR(14),
  1475. PMU_PMEVCNTR(15),
  1476. PMU_PMEVCNTR(16),
  1477. PMU_PMEVCNTR(17),
  1478. PMU_PMEVCNTR(18),
  1479. PMU_PMEVCNTR(19),
  1480. PMU_PMEVCNTR(20),
  1481. PMU_PMEVCNTR(21),
  1482. PMU_PMEVCNTR(22),
  1483. PMU_PMEVCNTR(23),
  1484. PMU_PMEVCNTR(24),
  1485. PMU_PMEVCNTR(25),
  1486. PMU_PMEVCNTR(26),
  1487. PMU_PMEVCNTR(27),
  1488. PMU_PMEVCNTR(28),
  1489. PMU_PMEVCNTR(29),
  1490. PMU_PMEVCNTR(30),
  1491. /* PMEVTYPERn */
  1492. PMU_PMEVTYPER(0),
  1493. PMU_PMEVTYPER(1),
  1494. PMU_PMEVTYPER(2),
  1495. PMU_PMEVTYPER(3),
  1496. PMU_PMEVTYPER(4),
  1497. PMU_PMEVTYPER(5),
  1498. PMU_PMEVTYPER(6),
  1499. PMU_PMEVTYPER(7),
  1500. PMU_PMEVTYPER(8),
  1501. PMU_PMEVTYPER(9),
  1502. PMU_PMEVTYPER(10),
  1503. PMU_PMEVTYPER(11),
  1504. PMU_PMEVTYPER(12),
  1505. PMU_PMEVTYPER(13),
  1506. PMU_PMEVTYPER(14),
  1507. PMU_PMEVTYPER(15),
  1508. PMU_PMEVTYPER(16),
  1509. PMU_PMEVTYPER(17),
  1510. PMU_PMEVTYPER(18),
  1511. PMU_PMEVTYPER(19),
  1512. PMU_PMEVTYPER(20),
  1513. PMU_PMEVTYPER(21),
  1514. PMU_PMEVTYPER(22),
  1515. PMU_PMEVTYPER(23),
  1516. PMU_PMEVTYPER(24),
  1517. PMU_PMEVTYPER(25),
  1518. PMU_PMEVTYPER(26),
  1519. PMU_PMEVTYPER(27),
  1520. PMU_PMEVTYPER(28),
  1521. PMU_PMEVTYPER(29),
  1522. PMU_PMEVTYPER(30),
  1523. /* PMCCFILTR */
  1524. { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
  1525. };
  1526. static const struct sys_reg_desc cp15_64_regs[] = {
  1527. { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
  1528. { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
  1529. { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
  1530. { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
  1531. { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
  1532. { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
  1533. { Op1( 2), CRn( 0), CRm(14), Op2( 0), access_cntp_cval },
  1534. };
  1535. /* Target specific emulation tables */
  1536. static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
  1537. void kvm_register_target_sys_reg_table(unsigned int target,
  1538. struct kvm_sys_reg_target_table *table)
  1539. {
  1540. target_tables[target] = table;
  1541. }
  1542. /* Get specific register table for this target. */
  1543. static const struct sys_reg_desc *get_target_table(unsigned target,
  1544. bool mode_is_64,
  1545. size_t *num)
  1546. {
  1547. struct kvm_sys_reg_target_table *table;
  1548. table = target_tables[target];
  1549. if (mode_is_64) {
  1550. *num = table->table64.num;
  1551. return table->table64.table;
  1552. } else {
  1553. *num = table->table32.num;
  1554. return table->table32.table;
  1555. }
  1556. }
  1557. #define reg_to_match_value(x) \
  1558. ({ \
  1559. unsigned long val; \
  1560. val = (x)->Op0 << 14; \
  1561. val |= (x)->Op1 << 11; \
  1562. val |= (x)->CRn << 7; \
  1563. val |= (x)->CRm << 3; \
  1564. val |= (x)->Op2; \
  1565. val; \
  1566. })
  1567. static int match_sys_reg(const void *key, const void *elt)
  1568. {
  1569. const unsigned long pval = (unsigned long)key;
  1570. const struct sys_reg_desc *r = elt;
  1571. return pval - reg_to_match_value(r);
  1572. }
  1573. static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
  1574. const struct sys_reg_desc table[],
  1575. unsigned int num)
  1576. {
  1577. unsigned long pval = reg_to_match_value(params);
  1578. return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
  1579. }
  1580. int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
  1581. {
  1582. kvm_inject_undefined(vcpu);
  1583. return 1;
  1584. }
  1585. static void perform_access(struct kvm_vcpu *vcpu,
  1586. struct sys_reg_params *params,
  1587. const struct sys_reg_desc *r)
  1588. {
  1589. /*
  1590. * Not having an accessor means that we have configured a trap
  1591. * that we don't know how to handle. This certainly qualifies
  1592. * as a gross bug that should be fixed right away.
  1593. */
  1594. BUG_ON(!r->access);
  1595. /* Skip instruction if instructed so */
  1596. if (likely(r->access(vcpu, params, r)))
  1597. kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
  1598. }
  1599. /*
  1600. * emulate_cp -- tries to match a sys_reg access in a handling table, and
  1601. * call the corresponding trap handler.
  1602. *
  1603. * @params: pointer to the descriptor of the access
  1604. * @table: array of trap descriptors
  1605. * @num: size of the trap descriptor array
  1606. *
  1607. * Return 0 if the access has been handled, and -1 if not.
  1608. */
  1609. static int emulate_cp(struct kvm_vcpu *vcpu,
  1610. struct sys_reg_params *params,
  1611. const struct sys_reg_desc *table,
  1612. size_t num)
  1613. {
  1614. const struct sys_reg_desc *r;
  1615. if (!table)
  1616. return -1; /* Not handled */
  1617. r = find_reg(params, table, num);
  1618. if (r) {
  1619. perform_access(vcpu, params, r);
  1620. return 0;
  1621. }
  1622. /* Not handled */
  1623. return -1;
  1624. }
  1625. static void unhandled_cp_access(struct kvm_vcpu *vcpu,
  1626. struct sys_reg_params *params)
  1627. {
  1628. u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
  1629. int cp = -1;
  1630. switch(hsr_ec) {
  1631. case ESR_ELx_EC_CP15_32:
  1632. case ESR_ELx_EC_CP15_64:
  1633. cp = 15;
  1634. break;
  1635. case ESR_ELx_EC_CP14_MR:
  1636. case ESR_ELx_EC_CP14_64:
  1637. cp = 14;
  1638. break;
  1639. default:
  1640. WARN_ON(1);
  1641. }
  1642. kvm_err("Unsupported guest CP%d access at: %08lx\n",
  1643. cp, *vcpu_pc(vcpu));
  1644. print_sys_reg_instr(params);
  1645. kvm_inject_undefined(vcpu);
  1646. }
  1647. /**
  1648. * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
  1649. * @vcpu: The VCPU pointer
  1650. * @run: The kvm_run struct
  1651. */
  1652. static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
  1653. const struct sys_reg_desc *global,
  1654. size_t nr_global,
  1655. const struct sys_reg_desc *target_specific,
  1656. size_t nr_specific)
  1657. {
  1658. struct sys_reg_params params;
  1659. u32 hsr = kvm_vcpu_get_hsr(vcpu);
  1660. int Rt = kvm_vcpu_sys_get_rt(vcpu);
  1661. int Rt2 = (hsr >> 10) & 0x1f;
  1662. params.is_aarch32 = true;
  1663. params.is_32bit = false;
  1664. params.CRm = (hsr >> 1) & 0xf;
  1665. params.is_write = ((hsr & 1) == 0);
  1666. params.Op0 = 0;
  1667. params.Op1 = (hsr >> 16) & 0xf;
  1668. params.Op2 = 0;
  1669. params.CRn = 0;
  1670. /*
  1671. * Make a 64-bit value out of Rt and Rt2. As we use the same trap
  1672. * backends between AArch32 and AArch64, we get away with it.
  1673. */
  1674. if (params.is_write) {
  1675. params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
  1676. params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
  1677. }
  1678. /*
  1679. * Try to emulate the coprocessor access using the target
  1680. * specific table first, and using the global table afterwards.
  1681. * If either of the tables contains a handler, handle the
  1682. * potential register operation in the case of a read and return
  1683. * with success.
  1684. */
  1685. if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
  1686. !emulate_cp(vcpu, &params, global, nr_global)) {
  1687. /* Split up the value between registers for the read side */
  1688. if (!params.is_write) {
  1689. vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
  1690. vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
  1691. }
  1692. return 1;
  1693. }
  1694. unhandled_cp_access(vcpu, &params);
  1695. return 1;
  1696. }
  1697. /**
  1698. * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
  1699. * @vcpu: The VCPU pointer
  1700. * @run: The kvm_run struct
  1701. */
  1702. static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
  1703. const struct sys_reg_desc *global,
  1704. size_t nr_global,
  1705. const struct sys_reg_desc *target_specific,
  1706. size_t nr_specific)
  1707. {
  1708. struct sys_reg_params params;
  1709. u32 hsr = kvm_vcpu_get_hsr(vcpu);
  1710. int Rt = kvm_vcpu_sys_get_rt(vcpu);
  1711. params.is_aarch32 = true;
  1712. params.is_32bit = true;
  1713. params.CRm = (hsr >> 1) & 0xf;
  1714. params.regval = vcpu_get_reg(vcpu, Rt);
  1715. params.is_write = ((hsr & 1) == 0);
  1716. params.CRn = (hsr >> 10) & 0xf;
  1717. params.Op0 = 0;
  1718. params.Op1 = (hsr >> 14) & 0x7;
  1719. params.Op2 = (hsr >> 17) & 0x7;
  1720. if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
  1721. !emulate_cp(vcpu, &params, global, nr_global)) {
  1722. if (!params.is_write)
  1723. vcpu_set_reg(vcpu, Rt, params.regval);
  1724. return 1;
  1725. }
  1726. unhandled_cp_access(vcpu, &params);
  1727. return 1;
  1728. }
  1729. int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
  1730. {
  1731. const struct sys_reg_desc *target_specific;
  1732. size_t num;
  1733. target_specific = get_target_table(vcpu->arch.target, false, &num);
  1734. return kvm_handle_cp_64(vcpu,
  1735. cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
  1736. target_specific, num);
  1737. }
  1738. int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
  1739. {
  1740. const struct sys_reg_desc *target_specific;
  1741. size_t num;
  1742. target_specific = get_target_table(vcpu->arch.target, false, &num);
  1743. return kvm_handle_cp_32(vcpu,
  1744. cp15_regs, ARRAY_SIZE(cp15_regs),
  1745. target_specific, num);
  1746. }
  1747. int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
  1748. {
  1749. return kvm_handle_cp_64(vcpu,
  1750. cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
  1751. NULL, 0);
  1752. }
  1753. int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
  1754. {
  1755. return kvm_handle_cp_32(vcpu,
  1756. cp14_regs, ARRAY_SIZE(cp14_regs),
  1757. NULL, 0);
  1758. }
  1759. static int emulate_sys_reg(struct kvm_vcpu *vcpu,
  1760. struct sys_reg_params *params)
  1761. {
  1762. size_t num;
  1763. const struct sys_reg_desc *table, *r;
  1764. table = get_target_table(vcpu->arch.target, true, &num);
  1765. /* Search target-specific then generic table. */
  1766. r = find_reg(params, table, num);
  1767. if (!r)
  1768. r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
  1769. if (likely(r)) {
  1770. perform_access(vcpu, params, r);
  1771. } else {
  1772. kvm_err("Unsupported guest sys_reg access at: %lx\n",
  1773. *vcpu_pc(vcpu));
  1774. print_sys_reg_instr(params);
  1775. kvm_inject_undefined(vcpu);
  1776. }
  1777. return 1;
  1778. }
  1779. static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
  1780. const struct sys_reg_desc *table, size_t num)
  1781. {
  1782. unsigned long i;
  1783. for (i = 0; i < num; i++)
  1784. if (table[i].reset)
  1785. table[i].reset(vcpu, &table[i]);
  1786. }
  1787. /**
  1788. * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
  1789. * @vcpu: The VCPU pointer
  1790. * @run: The kvm_run struct
  1791. */
  1792. int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
  1793. {
  1794. struct sys_reg_params params;
  1795. unsigned long esr = kvm_vcpu_get_hsr(vcpu);
  1796. int Rt = kvm_vcpu_sys_get_rt(vcpu);
  1797. int ret;
  1798. trace_kvm_handle_sys_reg(esr);
  1799. params.is_aarch32 = false;
  1800. params.is_32bit = false;
  1801. params.Op0 = (esr >> 20) & 3;
  1802. params.Op1 = (esr >> 14) & 0x7;
  1803. params.CRn = (esr >> 10) & 0xf;
  1804. params.CRm = (esr >> 1) & 0xf;
  1805. params.Op2 = (esr >> 17) & 0x7;
  1806. params.regval = vcpu_get_reg(vcpu, Rt);
  1807. params.is_write = !(esr & 1);
  1808. ret = emulate_sys_reg(vcpu, &params);
  1809. if (!params.is_write)
  1810. vcpu_set_reg(vcpu, Rt, params.regval);
  1811. return ret;
  1812. }
  1813. /******************************************************************************
  1814. * Userspace API
  1815. *****************************************************************************/
  1816. static bool index_to_params(u64 id, struct sys_reg_params *params)
  1817. {
  1818. switch (id & KVM_REG_SIZE_MASK) {
  1819. case KVM_REG_SIZE_U64:
  1820. /* Any unused index bits means it's not valid. */
  1821. if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
  1822. | KVM_REG_ARM_COPROC_MASK
  1823. | KVM_REG_ARM64_SYSREG_OP0_MASK
  1824. | KVM_REG_ARM64_SYSREG_OP1_MASK
  1825. | KVM_REG_ARM64_SYSREG_CRN_MASK
  1826. | KVM_REG_ARM64_SYSREG_CRM_MASK
  1827. | KVM_REG_ARM64_SYSREG_OP2_MASK))
  1828. return false;
  1829. params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
  1830. >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
  1831. params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
  1832. >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
  1833. params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
  1834. >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
  1835. params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
  1836. >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
  1837. params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
  1838. >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
  1839. return true;
  1840. default:
  1841. return false;
  1842. }
  1843. }
  1844. const struct sys_reg_desc *find_reg_by_id(u64 id,
  1845. struct sys_reg_params *params,
  1846. const struct sys_reg_desc table[],
  1847. unsigned int num)
  1848. {
  1849. if (!index_to_params(id, params))
  1850. return NULL;
  1851. return find_reg(params, table, num);
  1852. }
  1853. /* Decode an index value, and find the sys_reg_desc entry. */
  1854. static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
  1855. u64 id)
  1856. {
  1857. size_t num;
  1858. const struct sys_reg_desc *table, *r;
  1859. struct sys_reg_params params;
  1860. /* We only do sys_reg for now. */
  1861. if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
  1862. return NULL;
  1863. table = get_target_table(vcpu->arch.target, true, &num);
  1864. r = find_reg_by_id(id, &params, table, num);
  1865. if (!r)
  1866. r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
  1867. /* Not saved in the sys_reg array and not otherwise accessible? */
  1868. if (r && !(r->reg || r->get_user))
  1869. r = NULL;
  1870. return r;
  1871. }
  1872. /*
  1873. * These are the invariant sys_reg registers: we let the guest see the
  1874. * host versions of these, so they're part of the guest state.
  1875. *
  1876. * A future CPU may provide a mechanism to present different values to
  1877. * the guest, or a future kvm may trap them.
  1878. */
  1879. #define FUNCTION_INVARIANT(reg) \
  1880. static void get_##reg(struct kvm_vcpu *v, \
  1881. const struct sys_reg_desc *r) \
  1882. { \
  1883. ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
  1884. }
  1885. FUNCTION_INVARIANT(midr_el1)
  1886. FUNCTION_INVARIANT(ctr_el0)
  1887. FUNCTION_INVARIANT(revidr_el1)
  1888. FUNCTION_INVARIANT(clidr_el1)
  1889. FUNCTION_INVARIANT(aidr_el1)
  1890. /* ->val is filled in by kvm_sys_reg_table_init() */
  1891. static struct sys_reg_desc invariant_sys_regs[] = {
  1892. { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
  1893. { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
  1894. { SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
  1895. { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
  1896. { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
  1897. };
  1898. static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
  1899. {
  1900. if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
  1901. return -EFAULT;
  1902. return 0;
  1903. }
  1904. static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
  1905. {
  1906. if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
  1907. return -EFAULT;
  1908. return 0;
  1909. }
  1910. static int get_invariant_sys_reg(u64 id, void __user *uaddr)
  1911. {
  1912. struct sys_reg_params params;
  1913. const struct sys_reg_desc *r;
  1914. r = find_reg_by_id(id, &params, invariant_sys_regs,
  1915. ARRAY_SIZE(invariant_sys_regs));
  1916. if (!r)
  1917. return -ENOENT;
  1918. return reg_to_user(uaddr, &r->val, id);
  1919. }
  1920. static int set_invariant_sys_reg(u64 id, void __user *uaddr)
  1921. {
  1922. struct sys_reg_params params;
  1923. const struct sys_reg_desc *r;
  1924. int err;
  1925. u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
  1926. r = find_reg_by_id(id, &params, invariant_sys_regs,
  1927. ARRAY_SIZE(invariant_sys_regs));
  1928. if (!r)
  1929. return -ENOENT;
  1930. err = reg_from_user(&val, uaddr, id);
  1931. if (err)
  1932. return err;
  1933. /* This is what we mean by invariant: you can't change it. */
  1934. if (r->val != val)
  1935. return -EINVAL;
  1936. return 0;
  1937. }
  1938. static bool is_valid_cache(u32 val)
  1939. {
  1940. u32 level, ctype;
  1941. if (val >= CSSELR_MAX)
  1942. return false;
  1943. /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
  1944. level = (val >> 1);
  1945. ctype = (cache_levels >> (level * 3)) & 7;
  1946. switch (ctype) {
  1947. case 0: /* No cache */
  1948. return false;
  1949. case 1: /* Instruction cache only */
  1950. return (val & 1);
  1951. case 2: /* Data cache only */
  1952. case 4: /* Unified cache */
  1953. return !(val & 1);
  1954. case 3: /* Separate instruction and data caches */
  1955. return true;
  1956. default: /* Reserved: we can't know instruction or data. */
  1957. return false;
  1958. }
  1959. }
  1960. static int demux_c15_get(u64 id, void __user *uaddr)
  1961. {
  1962. u32 val;
  1963. u32 __user *uval = uaddr;
  1964. /* Fail if we have unknown bits set. */
  1965. if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
  1966. | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
  1967. return -ENOENT;
  1968. switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
  1969. case KVM_REG_ARM_DEMUX_ID_CCSIDR:
  1970. if (KVM_REG_SIZE(id) != 4)
  1971. return -ENOENT;
  1972. val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
  1973. >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
  1974. if (!is_valid_cache(val))
  1975. return -ENOENT;
  1976. return put_user(get_ccsidr(val), uval);
  1977. default:
  1978. return -ENOENT;
  1979. }
  1980. }
  1981. static int demux_c15_set(u64 id, void __user *uaddr)
  1982. {
  1983. u32 val, newval;
  1984. u32 __user *uval = uaddr;
  1985. /* Fail if we have unknown bits set. */
  1986. if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
  1987. | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
  1988. return -ENOENT;
  1989. switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
  1990. case KVM_REG_ARM_DEMUX_ID_CCSIDR:
  1991. if (KVM_REG_SIZE(id) != 4)
  1992. return -ENOENT;
  1993. val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
  1994. >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
  1995. if (!is_valid_cache(val))
  1996. return -ENOENT;
  1997. if (get_user(newval, uval))
  1998. return -EFAULT;
  1999. /* This is also invariant: you can't change it. */
  2000. if (newval != get_ccsidr(val))
  2001. return -EINVAL;
  2002. return 0;
  2003. default:
  2004. return -ENOENT;
  2005. }
  2006. }
  2007. int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  2008. {
  2009. const struct sys_reg_desc *r;
  2010. void __user *uaddr = (void __user *)(unsigned long)reg->addr;
  2011. if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
  2012. return demux_c15_get(reg->id, uaddr);
  2013. if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
  2014. return -ENOENT;
  2015. r = index_to_sys_reg_desc(vcpu, reg->id);
  2016. if (!r)
  2017. return get_invariant_sys_reg(reg->id, uaddr);
  2018. if (r->get_user)
  2019. return (r->get_user)(vcpu, r, reg, uaddr);
  2020. return reg_to_user(uaddr, &__vcpu_sys_reg(vcpu, r->reg), reg->id);
  2021. }
  2022. int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  2023. {
  2024. const struct sys_reg_desc *r;
  2025. void __user *uaddr = (void __user *)(unsigned long)reg->addr;
  2026. if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
  2027. return demux_c15_set(reg->id, uaddr);
  2028. if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
  2029. return -ENOENT;
  2030. r = index_to_sys_reg_desc(vcpu, reg->id);
  2031. if (!r)
  2032. return set_invariant_sys_reg(reg->id, uaddr);
  2033. if (r->set_user)
  2034. return (r->set_user)(vcpu, r, reg, uaddr);
  2035. return reg_from_user(&__vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
  2036. }
  2037. static unsigned int num_demux_regs(void)
  2038. {
  2039. unsigned int i, count = 0;
  2040. for (i = 0; i < CSSELR_MAX; i++)
  2041. if (is_valid_cache(i))
  2042. count++;
  2043. return count;
  2044. }
  2045. static int write_demux_regids(u64 __user *uindices)
  2046. {
  2047. u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
  2048. unsigned int i;
  2049. val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
  2050. for (i = 0; i < CSSELR_MAX; i++) {
  2051. if (!is_valid_cache(i))
  2052. continue;
  2053. if (put_user(val | i, uindices))
  2054. return -EFAULT;
  2055. uindices++;
  2056. }
  2057. return 0;
  2058. }
  2059. static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
  2060. {
  2061. return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
  2062. KVM_REG_ARM64_SYSREG |
  2063. (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
  2064. (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
  2065. (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
  2066. (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
  2067. (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
  2068. }
  2069. static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
  2070. {
  2071. if (!*uind)
  2072. return true;
  2073. if (put_user(sys_reg_to_index(reg), *uind))
  2074. return false;
  2075. (*uind)++;
  2076. return true;
  2077. }
  2078. static int walk_one_sys_reg(const struct sys_reg_desc *rd,
  2079. u64 __user **uind,
  2080. unsigned int *total)
  2081. {
  2082. /*
  2083. * Ignore registers we trap but don't save,
  2084. * and for which no custom user accessor is provided.
  2085. */
  2086. if (!(rd->reg || rd->get_user))
  2087. return 0;
  2088. if (!copy_reg_to_user(rd, uind))
  2089. return -EFAULT;
  2090. (*total)++;
  2091. return 0;
  2092. }
  2093. /* Assumed ordered tables, see kvm_sys_reg_table_init. */
  2094. static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
  2095. {
  2096. const struct sys_reg_desc *i1, *i2, *end1, *end2;
  2097. unsigned int total = 0;
  2098. size_t num;
  2099. int err;
  2100. /* We check for duplicates here, to allow arch-specific overrides. */
  2101. i1 = get_target_table(vcpu->arch.target, true, &num);
  2102. end1 = i1 + num;
  2103. i2 = sys_reg_descs;
  2104. end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
  2105. BUG_ON(i1 == end1 || i2 == end2);
  2106. /* Walk carefully, as both tables may refer to the same register. */
  2107. while (i1 || i2) {
  2108. int cmp = cmp_sys_reg(i1, i2);
  2109. /* target-specific overrides generic entry. */
  2110. if (cmp <= 0)
  2111. err = walk_one_sys_reg(i1, &uind, &total);
  2112. else
  2113. err = walk_one_sys_reg(i2, &uind, &total);
  2114. if (err)
  2115. return err;
  2116. if (cmp <= 0 && ++i1 == end1)
  2117. i1 = NULL;
  2118. if (cmp >= 0 && ++i2 == end2)
  2119. i2 = NULL;
  2120. }
  2121. return total;
  2122. }
  2123. unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
  2124. {
  2125. return ARRAY_SIZE(invariant_sys_regs)
  2126. + num_demux_regs()
  2127. + walk_sys_regs(vcpu, (u64 __user *)NULL);
  2128. }
  2129. int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
  2130. {
  2131. unsigned int i;
  2132. int err;
  2133. /* Then give them all the invariant registers' indices. */
  2134. for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
  2135. if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
  2136. return -EFAULT;
  2137. uindices++;
  2138. }
  2139. err = walk_sys_regs(vcpu, uindices);
  2140. if (err < 0)
  2141. return err;
  2142. uindices += err;
  2143. return write_demux_regids(uindices);
  2144. }
  2145. static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
  2146. {
  2147. unsigned int i;
  2148. for (i = 1; i < n; i++) {
  2149. if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
  2150. kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
  2151. return 1;
  2152. }
  2153. }
  2154. return 0;
  2155. }
  2156. void kvm_sys_reg_table_init(void)
  2157. {
  2158. unsigned int i;
  2159. struct sys_reg_desc clidr;
  2160. /* Make sure tables are unique and in order. */
  2161. BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
  2162. BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
  2163. BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
  2164. BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
  2165. BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
  2166. BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
  2167. /* We abuse the reset function to overwrite the table itself. */
  2168. for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
  2169. invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
  2170. /*
  2171. * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
  2172. *
  2173. * If software reads the Cache Type fields from Ctype1
  2174. * upwards, once it has seen a value of 0b000, no caches
  2175. * exist at further-out levels of the hierarchy. So, for
  2176. * example, if Ctype3 is the first Cache Type field with a
  2177. * value of 0b000, the values of Ctype4 to Ctype7 must be
  2178. * ignored.
  2179. */
  2180. get_clidr_el1(NULL, &clidr); /* Ugly... */
  2181. cache_levels = clidr.val;
  2182. for (i = 0; i < 7; i++)
  2183. if (((cache_levels >> (i*3)) & 7) == 0)
  2184. break;
  2185. /* Clear all higher bits. */
  2186. cache_levels &= (1 << (i*3))-1;
  2187. }
  2188. /**
  2189. * kvm_reset_sys_regs - sets system registers to reset value
  2190. * @vcpu: The VCPU pointer
  2191. *
  2192. * This function finds the right table above and sets the registers on the
  2193. * virtual CPU struct to their architecturally defined reset values.
  2194. */
  2195. void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
  2196. {
  2197. size_t num;
  2198. const struct sys_reg_desc *table;
  2199. /* Catch someone adding a register without putting in reset entry. */
  2200. memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
  2201. /* Generic chip reset first (so target could override). */
  2202. reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
  2203. table = get_target_table(vcpu->arch.target, true, &num);
  2204. reset_sys_reg_descs(vcpu, table, num);
  2205. for (num = 1; num < NR_SYS_REGS; num++)
  2206. if (__vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
  2207. panic("Didn't reset __vcpu_sys_reg(%zi)", num);
  2208. }