hyp.S 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274
  1. /*
  2. * Copyright (C) 2012,2013 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/linkage.h>
  18. #include <asm/assembler.h>
  19. #include <asm/memory.h>
  20. #include <asm/asm-offsets.h>
  21. #include <asm/debug-monitors.h>
  22. #include <asm/fpsimdmacros.h>
  23. #include <asm/kvm.h>
  24. #include <asm/kvm_asm.h>
  25. #include <asm/kvm_arm.h>
  26. #include <asm/kvm_mmu.h>
  27. #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
  28. #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
  29. #define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x)
  30. #define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x)
  31. .text
  32. .pushsection .hyp.text, "ax"
  33. .align PAGE_SHIFT
  34. .macro save_common_regs
  35. // x2: base address for cpu context
  36. // x3: tmp register
  37. add x3, x2, #CPU_XREG_OFFSET(19)
  38. stp x19, x20, [x3]
  39. stp x21, x22, [x3, #16]
  40. stp x23, x24, [x3, #32]
  41. stp x25, x26, [x3, #48]
  42. stp x27, x28, [x3, #64]
  43. stp x29, lr, [x3, #80]
  44. mrs x19, sp_el0
  45. mrs x20, elr_el2 // EL1 PC
  46. mrs x21, spsr_el2 // EL1 pstate
  47. stp x19, x20, [x3, #96]
  48. str x21, [x3, #112]
  49. mrs x22, sp_el1
  50. mrs x23, elr_el1
  51. mrs x24, spsr_el1
  52. str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
  53. str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
  54. str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
  55. .endm
  56. .macro restore_common_regs
  57. // x2: base address for cpu context
  58. // x3: tmp register
  59. ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
  60. ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
  61. ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
  62. msr sp_el1, x22
  63. msr elr_el1, x23
  64. msr spsr_el1, x24
  65. add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0
  66. ldp x19, x20, [x3]
  67. ldr x21, [x3, #16]
  68. msr sp_el0, x19
  69. msr elr_el2, x20 // EL1 PC
  70. msr spsr_el2, x21 // EL1 pstate
  71. add x3, x2, #CPU_XREG_OFFSET(19)
  72. ldp x19, x20, [x3]
  73. ldp x21, x22, [x3, #16]
  74. ldp x23, x24, [x3, #32]
  75. ldp x25, x26, [x3, #48]
  76. ldp x27, x28, [x3, #64]
  77. ldp x29, lr, [x3, #80]
  78. .endm
  79. .macro save_host_regs
  80. save_common_regs
  81. .endm
  82. .macro restore_host_regs
  83. restore_common_regs
  84. .endm
  85. .macro save_fpsimd
  86. // x2: cpu context address
  87. // x3, x4: tmp regs
  88. add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
  89. fpsimd_save x3, 4
  90. .endm
  91. .macro restore_fpsimd
  92. // x2: cpu context address
  93. // x3, x4: tmp regs
  94. add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
  95. fpsimd_restore x3, 4
  96. .endm
  97. .macro save_guest_regs
  98. // x0 is the vcpu address
  99. // x1 is the return code, do not corrupt!
  100. // x2 is the cpu context
  101. // x3 is a tmp register
  102. // Guest's x0-x3 are on the stack
  103. // Compute base to save registers
  104. add x3, x2, #CPU_XREG_OFFSET(4)
  105. stp x4, x5, [x3]
  106. stp x6, x7, [x3, #16]
  107. stp x8, x9, [x3, #32]
  108. stp x10, x11, [x3, #48]
  109. stp x12, x13, [x3, #64]
  110. stp x14, x15, [x3, #80]
  111. stp x16, x17, [x3, #96]
  112. str x18, [x3, #112]
  113. pop x6, x7 // x2, x3
  114. pop x4, x5 // x0, x1
  115. add x3, x2, #CPU_XREG_OFFSET(0)
  116. stp x4, x5, [x3]
  117. stp x6, x7, [x3, #16]
  118. save_common_regs
  119. .endm
  120. .macro restore_guest_regs
  121. // x0 is the vcpu address.
  122. // x2 is the cpu context
  123. // x3 is a tmp register
  124. // Prepare x0-x3 for later restore
  125. add x3, x2, #CPU_XREG_OFFSET(0)
  126. ldp x4, x5, [x3]
  127. ldp x6, x7, [x3, #16]
  128. push x4, x5 // Push x0-x3 on the stack
  129. push x6, x7
  130. // x4-x18
  131. ldp x4, x5, [x3, #32]
  132. ldp x6, x7, [x3, #48]
  133. ldp x8, x9, [x3, #64]
  134. ldp x10, x11, [x3, #80]
  135. ldp x12, x13, [x3, #96]
  136. ldp x14, x15, [x3, #112]
  137. ldp x16, x17, [x3, #128]
  138. ldr x18, [x3, #144]
  139. // x19-x29, lr, sp*, elr*, spsr*
  140. restore_common_regs
  141. // Last bits of the 64bit state
  142. pop x2, x3
  143. pop x0, x1
  144. // Do not touch any register after this!
  145. .endm
  146. /*
  147. * Macros to perform system register save/restore.
  148. *
  149. * Ordering here is absolutely critical, and must be kept consistent
  150. * in {save,restore}_sysregs, {save,restore}_guest_32bit_state,
  151. * and in kvm_asm.h.
  152. *
  153. * In other words, don't touch any of these unless you know what
  154. * you are doing.
  155. */
  156. .macro save_sysregs
  157. // x2: base address for cpu context
  158. // x3: tmp register
  159. add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
  160. mrs x4, vmpidr_el2
  161. mrs x5, csselr_el1
  162. mrs x6, sctlr_el1
  163. mrs x7, actlr_el1
  164. mrs x8, cpacr_el1
  165. mrs x9, ttbr0_el1
  166. mrs x10, ttbr1_el1
  167. mrs x11, tcr_el1
  168. mrs x12, esr_el1
  169. mrs x13, afsr0_el1
  170. mrs x14, afsr1_el1
  171. mrs x15, far_el1
  172. mrs x16, mair_el1
  173. mrs x17, vbar_el1
  174. mrs x18, contextidr_el1
  175. mrs x19, tpidr_el0
  176. mrs x20, tpidrro_el0
  177. mrs x21, tpidr_el1
  178. mrs x22, amair_el1
  179. mrs x23, cntkctl_el1
  180. mrs x24, par_el1
  181. mrs x25, mdscr_el1
  182. stp x4, x5, [x3]
  183. stp x6, x7, [x3, #16]
  184. stp x8, x9, [x3, #32]
  185. stp x10, x11, [x3, #48]
  186. stp x12, x13, [x3, #64]
  187. stp x14, x15, [x3, #80]
  188. stp x16, x17, [x3, #96]
  189. stp x18, x19, [x3, #112]
  190. stp x20, x21, [x3, #128]
  191. stp x22, x23, [x3, #144]
  192. stp x24, x25, [x3, #160]
  193. .endm
  194. .macro save_debug
  195. // x2: base address for cpu context
  196. // x3: tmp register
  197. mrs x26, id_aa64dfr0_el1
  198. ubfx x24, x26, #12, #4 // Extract BRPs
  199. ubfx x25, x26, #20, #4 // Extract WRPs
  200. mov w26, #15
  201. sub w24, w26, w24 // How many BPs to skip
  202. sub w25, w26, w25 // How many WPs to skip
  203. add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
  204. adr x26, 1f
  205. add x26, x26, x24, lsl #2
  206. br x26
  207. 1:
  208. mrs x20, dbgbcr15_el1
  209. mrs x19, dbgbcr14_el1
  210. mrs x18, dbgbcr13_el1
  211. mrs x17, dbgbcr12_el1
  212. mrs x16, dbgbcr11_el1
  213. mrs x15, dbgbcr10_el1
  214. mrs x14, dbgbcr9_el1
  215. mrs x13, dbgbcr8_el1
  216. mrs x12, dbgbcr7_el1
  217. mrs x11, dbgbcr6_el1
  218. mrs x10, dbgbcr5_el1
  219. mrs x9, dbgbcr4_el1
  220. mrs x8, dbgbcr3_el1
  221. mrs x7, dbgbcr2_el1
  222. mrs x6, dbgbcr1_el1
  223. mrs x5, dbgbcr0_el1
  224. adr x26, 1f
  225. add x26, x26, x24, lsl #2
  226. br x26
  227. 1:
  228. str x20, [x3, #(15 * 8)]
  229. str x19, [x3, #(14 * 8)]
  230. str x18, [x3, #(13 * 8)]
  231. str x17, [x3, #(12 * 8)]
  232. str x16, [x3, #(11 * 8)]
  233. str x15, [x3, #(10 * 8)]
  234. str x14, [x3, #(9 * 8)]
  235. str x13, [x3, #(8 * 8)]
  236. str x12, [x3, #(7 * 8)]
  237. str x11, [x3, #(6 * 8)]
  238. str x10, [x3, #(5 * 8)]
  239. str x9, [x3, #(4 * 8)]
  240. str x8, [x3, #(3 * 8)]
  241. str x7, [x3, #(2 * 8)]
  242. str x6, [x3, #(1 * 8)]
  243. str x5, [x3, #(0 * 8)]
  244. add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
  245. adr x26, 1f
  246. add x26, x26, x24, lsl #2
  247. br x26
  248. 1:
  249. mrs x20, dbgbvr15_el1
  250. mrs x19, dbgbvr14_el1
  251. mrs x18, dbgbvr13_el1
  252. mrs x17, dbgbvr12_el1
  253. mrs x16, dbgbvr11_el1
  254. mrs x15, dbgbvr10_el1
  255. mrs x14, dbgbvr9_el1
  256. mrs x13, dbgbvr8_el1
  257. mrs x12, dbgbvr7_el1
  258. mrs x11, dbgbvr6_el1
  259. mrs x10, dbgbvr5_el1
  260. mrs x9, dbgbvr4_el1
  261. mrs x8, dbgbvr3_el1
  262. mrs x7, dbgbvr2_el1
  263. mrs x6, dbgbvr1_el1
  264. mrs x5, dbgbvr0_el1
  265. adr x26, 1f
  266. add x26, x26, x24, lsl #2
  267. br x26
  268. 1:
  269. str x20, [x3, #(15 * 8)]
  270. str x19, [x3, #(14 * 8)]
  271. str x18, [x3, #(13 * 8)]
  272. str x17, [x3, #(12 * 8)]
  273. str x16, [x3, #(11 * 8)]
  274. str x15, [x3, #(10 * 8)]
  275. str x14, [x3, #(9 * 8)]
  276. str x13, [x3, #(8 * 8)]
  277. str x12, [x3, #(7 * 8)]
  278. str x11, [x3, #(6 * 8)]
  279. str x10, [x3, #(5 * 8)]
  280. str x9, [x3, #(4 * 8)]
  281. str x8, [x3, #(3 * 8)]
  282. str x7, [x3, #(2 * 8)]
  283. str x6, [x3, #(1 * 8)]
  284. str x5, [x3, #(0 * 8)]
  285. add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
  286. adr x26, 1f
  287. add x26, x26, x25, lsl #2
  288. br x26
  289. 1:
  290. mrs x20, dbgwcr15_el1
  291. mrs x19, dbgwcr14_el1
  292. mrs x18, dbgwcr13_el1
  293. mrs x17, dbgwcr12_el1
  294. mrs x16, dbgwcr11_el1
  295. mrs x15, dbgwcr10_el1
  296. mrs x14, dbgwcr9_el1
  297. mrs x13, dbgwcr8_el1
  298. mrs x12, dbgwcr7_el1
  299. mrs x11, dbgwcr6_el1
  300. mrs x10, dbgwcr5_el1
  301. mrs x9, dbgwcr4_el1
  302. mrs x8, dbgwcr3_el1
  303. mrs x7, dbgwcr2_el1
  304. mrs x6, dbgwcr1_el1
  305. mrs x5, dbgwcr0_el1
  306. adr x26, 1f
  307. add x26, x26, x25, lsl #2
  308. br x26
  309. 1:
  310. str x20, [x3, #(15 * 8)]
  311. str x19, [x3, #(14 * 8)]
  312. str x18, [x3, #(13 * 8)]
  313. str x17, [x3, #(12 * 8)]
  314. str x16, [x3, #(11 * 8)]
  315. str x15, [x3, #(10 * 8)]
  316. str x14, [x3, #(9 * 8)]
  317. str x13, [x3, #(8 * 8)]
  318. str x12, [x3, #(7 * 8)]
  319. str x11, [x3, #(6 * 8)]
  320. str x10, [x3, #(5 * 8)]
  321. str x9, [x3, #(4 * 8)]
  322. str x8, [x3, #(3 * 8)]
  323. str x7, [x3, #(2 * 8)]
  324. str x6, [x3, #(1 * 8)]
  325. str x5, [x3, #(0 * 8)]
  326. add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
  327. adr x26, 1f
  328. add x26, x26, x25, lsl #2
  329. br x26
  330. 1:
  331. mrs x20, dbgwvr15_el1
  332. mrs x19, dbgwvr14_el1
  333. mrs x18, dbgwvr13_el1
  334. mrs x17, dbgwvr12_el1
  335. mrs x16, dbgwvr11_el1
  336. mrs x15, dbgwvr10_el1
  337. mrs x14, dbgwvr9_el1
  338. mrs x13, dbgwvr8_el1
  339. mrs x12, dbgwvr7_el1
  340. mrs x11, dbgwvr6_el1
  341. mrs x10, dbgwvr5_el1
  342. mrs x9, dbgwvr4_el1
  343. mrs x8, dbgwvr3_el1
  344. mrs x7, dbgwvr2_el1
  345. mrs x6, dbgwvr1_el1
  346. mrs x5, dbgwvr0_el1
  347. adr x26, 1f
  348. add x26, x26, x25, lsl #2
  349. br x26
  350. 1:
  351. str x20, [x3, #(15 * 8)]
  352. str x19, [x3, #(14 * 8)]
  353. str x18, [x3, #(13 * 8)]
  354. str x17, [x3, #(12 * 8)]
  355. str x16, [x3, #(11 * 8)]
  356. str x15, [x3, #(10 * 8)]
  357. str x14, [x3, #(9 * 8)]
  358. str x13, [x3, #(8 * 8)]
  359. str x12, [x3, #(7 * 8)]
  360. str x11, [x3, #(6 * 8)]
  361. str x10, [x3, #(5 * 8)]
  362. str x9, [x3, #(4 * 8)]
  363. str x8, [x3, #(3 * 8)]
  364. str x7, [x3, #(2 * 8)]
  365. str x6, [x3, #(1 * 8)]
  366. str x5, [x3, #(0 * 8)]
  367. mrs x21, mdccint_el1
  368. str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
  369. .endm
  370. .macro restore_sysregs
  371. // x2: base address for cpu context
  372. // x3: tmp register
  373. add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
  374. ldp x4, x5, [x3]
  375. ldp x6, x7, [x3, #16]
  376. ldp x8, x9, [x3, #32]
  377. ldp x10, x11, [x3, #48]
  378. ldp x12, x13, [x3, #64]
  379. ldp x14, x15, [x3, #80]
  380. ldp x16, x17, [x3, #96]
  381. ldp x18, x19, [x3, #112]
  382. ldp x20, x21, [x3, #128]
  383. ldp x22, x23, [x3, #144]
  384. ldp x24, x25, [x3, #160]
  385. msr vmpidr_el2, x4
  386. msr csselr_el1, x5
  387. msr sctlr_el1, x6
  388. msr actlr_el1, x7
  389. msr cpacr_el1, x8
  390. msr ttbr0_el1, x9
  391. msr ttbr1_el1, x10
  392. msr tcr_el1, x11
  393. msr esr_el1, x12
  394. msr afsr0_el1, x13
  395. msr afsr1_el1, x14
  396. msr far_el1, x15
  397. msr mair_el1, x16
  398. msr vbar_el1, x17
  399. msr contextidr_el1, x18
  400. msr tpidr_el0, x19
  401. msr tpidrro_el0, x20
  402. msr tpidr_el1, x21
  403. msr amair_el1, x22
  404. msr cntkctl_el1, x23
  405. msr par_el1, x24
  406. msr mdscr_el1, x25
  407. .endm
  408. .macro restore_debug
  409. // x2: base address for cpu context
  410. // x3: tmp register
  411. mrs x26, id_aa64dfr0_el1
  412. ubfx x24, x26, #12, #4 // Extract BRPs
  413. ubfx x25, x26, #20, #4 // Extract WRPs
  414. mov w26, #15
  415. sub w24, w26, w24 // How many BPs to skip
  416. sub w25, w26, w25 // How many WPs to skip
  417. add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
  418. adr x26, 1f
  419. add x26, x26, x24, lsl #2
  420. br x26
  421. 1:
  422. ldr x20, [x3, #(15 * 8)]
  423. ldr x19, [x3, #(14 * 8)]
  424. ldr x18, [x3, #(13 * 8)]
  425. ldr x17, [x3, #(12 * 8)]
  426. ldr x16, [x3, #(11 * 8)]
  427. ldr x15, [x3, #(10 * 8)]
  428. ldr x14, [x3, #(9 * 8)]
  429. ldr x13, [x3, #(8 * 8)]
  430. ldr x12, [x3, #(7 * 8)]
  431. ldr x11, [x3, #(6 * 8)]
  432. ldr x10, [x3, #(5 * 8)]
  433. ldr x9, [x3, #(4 * 8)]
  434. ldr x8, [x3, #(3 * 8)]
  435. ldr x7, [x3, #(2 * 8)]
  436. ldr x6, [x3, #(1 * 8)]
  437. ldr x5, [x3, #(0 * 8)]
  438. adr x26, 1f
  439. add x26, x26, x24, lsl #2
  440. br x26
  441. 1:
  442. msr dbgbcr15_el1, x20
  443. msr dbgbcr14_el1, x19
  444. msr dbgbcr13_el1, x18
  445. msr dbgbcr12_el1, x17
  446. msr dbgbcr11_el1, x16
  447. msr dbgbcr10_el1, x15
  448. msr dbgbcr9_el1, x14
  449. msr dbgbcr8_el1, x13
  450. msr dbgbcr7_el1, x12
  451. msr dbgbcr6_el1, x11
  452. msr dbgbcr5_el1, x10
  453. msr dbgbcr4_el1, x9
  454. msr dbgbcr3_el1, x8
  455. msr dbgbcr2_el1, x7
  456. msr dbgbcr1_el1, x6
  457. msr dbgbcr0_el1, x5
  458. add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
  459. adr x26, 1f
  460. add x26, x26, x24, lsl #2
  461. br x26
  462. 1:
  463. ldr x20, [x3, #(15 * 8)]
  464. ldr x19, [x3, #(14 * 8)]
  465. ldr x18, [x3, #(13 * 8)]
  466. ldr x17, [x3, #(12 * 8)]
  467. ldr x16, [x3, #(11 * 8)]
  468. ldr x15, [x3, #(10 * 8)]
  469. ldr x14, [x3, #(9 * 8)]
  470. ldr x13, [x3, #(8 * 8)]
  471. ldr x12, [x3, #(7 * 8)]
  472. ldr x11, [x3, #(6 * 8)]
  473. ldr x10, [x3, #(5 * 8)]
  474. ldr x9, [x3, #(4 * 8)]
  475. ldr x8, [x3, #(3 * 8)]
  476. ldr x7, [x3, #(2 * 8)]
  477. ldr x6, [x3, #(1 * 8)]
  478. ldr x5, [x3, #(0 * 8)]
  479. adr x26, 1f
  480. add x26, x26, x24, lsl #2
  481. br x26
  482. 1:
  483. msr dbgbvr15_el1, x20
  484. msr dbgbvr14_el1, x19
  485. msr dbgbvr13_el1, x18
  486. msr dbgbvr12_el1, x17
  487. msr dbgbvr11_el1, x16
  488. msr dbgbvr10_el1, x15
  489. msr dbgbvr9_el1, x14
  490. msr dbgbvr8_el1, x13
  491. msr dbgbvr7_el1, x12
  492. msr dbgbvr6_el1, x11
  493. msr dbgbvr5_el1, x10
  494. msr dbgbvr4_el1, x9
  495. msr dbgbvr3_el1, x8
  496. msr dbgbvr2_el1, x7
  497. msr dbgbvr1_el1, x6
  498. msr dbgbvr0_el1, x5
  499. add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
  500. adr x26, 1f
  501. add x26, x26, x25, lsl #2
  502. br x26
  503. 1:
  504. ldr x20, [x3, #(15 * 8)]
  505. ldr x19, [x3, #(14 * 8)]
  506. ldr x18, [x3, #(13 * 8)]
  507. ldr x17, [x3, #(12 * 8)]
  508. ldr x16, [x3, #(11 * 8)]
  509. ldr x15, [x3, #(10 * 8)]
  510. ldr x14, [x3, #(9 * 8)]
  511. ldr x13, [x3, #(8 * 8)]
  512. ldr x12, [x3, #(7 * 8)]
  513. ldr x11, [x3, #(6 * 8)]
  514. ldr x10, [x3, #(5 * 8)]
  515. ldr x9, [x3, #(4 * 8)]
  516. ldr x8, [x3, #(3 * 8)]
  517. ldr x7, [x3, #(2 * 8)]
  518. ldr x6, [x3, #(1 * 8)]
  519. ldr x5, [x3, #(0 * 8)]
  520. adr x26, 1f
  521. add x26, x26, x25, lsl #2
  522. br x26
  523. 1:
  524. msr dbgwcr15_el1, x20
  525. msr dbgwcr14_el1, x19
  526. msr dbgwcr13_el1, x18
  527. msr dbgwcr12_el1, x17
  528. msr dbgwcr11_el1, x16
  529. msr dbgwcr10_el1, x15
  530. msr dbgwcr9_el1, x14
  531. msr dbgwcr8_el1, x13
  532. msr dbgwcr7_el1, x12
  533. msr dbgwcr6_el1, x11
  534. msr dbgwcr5_el1, x10
  535. msr dbgwcr4_el1, x9
  536. msr dbgwcr3_el1, x8
  537. msr dbgwcr2_el1, x7
  538. msr dbgwcr1_el1, x6
  539. msr dbgwcr0_el1, x5
  540. add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
  541. adr x26, 1f
  542. add x26, x26, x25, lsl #2
  543. br x26
  544. 1:
  545. ldr x20, [x3, #(15 * 8)]
  546. ldr x19, [x3, #(14 * 8)]
  547. ldr x18, [x3, #(13 * 8)]
  548. ldr x17, [x3, #(12 * 8)]
  549. ldr x16, [x3, #(11 * 8)]
  550. ldr x15, [x3, #(10 * 8)]
  551. ldr x14, [x3, #(9 * 8)]
  552. ldr x13, [x3, #(8 * 8)]
  553. ldr x12, [x3, #(7 * 8)]
  554. ldr x11, [x3, #(6 * 8)]
  555. ldr x10, [x3, #(5 * 8)]
  556. ldr x9, [x3, #(4 * 8)]
  557. ldr x8, [x3, #(3 * 8)]
  558. ldr x7, [x3, #(2 * 8)]
  559. ldr x6, [x3, #(1 * 8)]
  560. ldr x5, [x3, #(0 * 8)]
  561. adr x26, 1f
  562. add x26, x26, x25, lsl #2
  563. br x26
  564. 1:
  565. msr dbgwvr15_el1, x20
  566. msr dbgwvr14_el1, x19
  567. msr dbgwvr13_el1, x18
  568. msr dbgwvr12_el1, x17
  569. msr dbgwvr11_el1, x16
  570. msr dbgwvr10_el1, x15
  571. msr dbgwvr9_el1, x14
  572. msr dbgwvr8_el1, x13
  573. msr dbgwvr7_el1, x12
  574. msr dbgwvr6_el1, x11
  575. msr dbgwvr5_el1, x10
  576. msr dbgwvr4_el1, x9
  577. msr dbgwvr3_el1, x8
  578. msr dbgwvr2_el1, x7
  579. msr dbgwvr1_el1, x6
  580. msr dbgwvr0_el1, x5
  581. ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
  582. msr mdccint_el1, x21
  583. .endm
  584. .macro skip_32bit_state tmp, target
  585. // Skip 32bit state if not needed
  586. mrs \tmp, hcr_el2
  587. tbnz \tmp, #HCR_RW_SHIFT, \target
  588. .endm
  589. .macro skip_tee_state tmp, target
  590. // Skip ThumbEE state if not needed
  591. mrs \tmp, id_pfr0_el1
  592. tbz \tmp, #12, \target
  593. .endm
  594. .macro skip_debug_state tmp, target
  595. ldr \tmp, [x0, #VCPU_DEBUG_FLAGS]
  596. tbz \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target
  597. .endm
  598. .macro compute_debug_state target
  599. // Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY
  600. // is set, we do a full save/restore cycle and disable trapping.
  601. add x25, x0, #VCPU_CONTEXT
  602. // Check the state of MDSCR_EL1
  603. ldr x25, [x25, #CPU_SYSREG_OFFSET(MDSCR_EL1)]
  604. and x26, x25, #DBG_MDSCR_KDE
  605. and x25, x25, #DBG_MDSCR_MDE
  606. adds xzr, x25, x26
  607. b.eq 9998f // Nothing to see there
  608. // If any interesting bits was set, we must set the flag
  609. mov x26, #KVM_ARM64_DEBUG_DIRTY
  610. str x26, [x0, #VCPU_DEBUG_FLAGS]
  611. b 9999f // Don't skip restore
  612. 9998:
  613. // Otherwise load the flags from memory in case we recently
  614. // trapped
  615. skip_debug_state x25, \target
  616. 9999:
  617. .endm
  618. .macro save_guest_32bit_state
  619. skip_32bit_state x3, 1f
  620. add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
  621. mrs x4, spsr_abt
  622. mrs x5, spsr_und
  623. mrs x6, spsr_irq
  624. mrs x7, spsr_fiq
  625. stp x4, x5, [x3]
  626. stp x6, x7, [x3, #16]
  627. add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
  628. mrs x4, dacr32_el2
  629. mrs x5, ifsr32_el2
  630. mrs x6, fpexc32_el2
  631. stp x4, x5, [x3]
  632. str x6, [x3, #16]
  633. skip_debug_state x8, 2f
  634. mrs x7, dbgvcr32_el2
  635. str x7, [x3, #24]
  636. 2:
  637. skip_tee_state x8, 1f
  638. add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
  639. mrs x4, teecr32_el1
  640. mrs x5, teehbr32_el1
  641. stp x4, x5, [x3]
  642. 1:
  643. .endm
  644. .macro restore_guest_32bit_state
  645. skip_32bit_state x3, 1f
  646. add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
  647. ldp x4, x5, [x3]
  648. ldp x6, x7, [x3, #16]
  649. msr spsr_abt, x4
  650. msr spsr_und, x5
  651. msr spsr_irq, x6
  652. msr spsr_fiq, x7
  653. add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
  654. ldp x4, x5, [x3]
  655. ldr x6, [x3, #16]
  656. msr dacr32_el2, x4
  657. msr ifsr32_el2, x5
  658. msr fpexc32_el2, x6
  659. skip_debug_state x8, 2f
  660. ldr x7, [x3, #24]
  661. msr dbgvcr32_el2, x7
  662. 2:
  663. skip_tee_state x8, 1f
  664. add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
  665. ldp x4, x5, [x3]
  666. msr teecr32_el1, x4
  667. msr teehbr32_el1, x5
  668. 1:
  669. .endm
  670. .macro activate_traps
  671. ldr x2, [x0, #VCPU_HCR_EL2]
  672. msr hcr_el2, x2
  673. ldr x2, =(CPTR_EL2_TTA)
  674. msr cptr_el2, x2
  675. ldr x2, =(1 << 15) // Trap CP15 Cr=15
  676. msr hstr_el2, x2
  677. mrs x2, mdcr_el2
  678. and x2, x2, #MDCR_EL2_HPMN_MASK
  679. orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
  680. orr x2, x2, #(MDCR_EL2_TDRA | MDCR_EL2_TDOSA)
  681. // Check for KVM_ARM64_DEBUG_DIRTY, and set debug to trap
  682. // if not dirty.
  683. ldr x3, [x0, #VCPU_DEBUG_FLAGS]
  684. tbnz x3, #KVM_ARM64_DEBUG_DIRTY_SHIFT, 1f
  685. orr x2, x2, #MDCR_EL2_TDA
  686. 1:
  687. msr mdcr_el2, x2
  688. .endm
  689. .macro deactivate_traps
  690. mov x2, #HCR_RW
  691. msr hcr_el2, x2
  692. msr cptr_el2, xzr
  693. msr hstr_el2, xzr
  694. mrs x2, mdcr_el2
  695. and x2, x2, #MDCR_EL2_HPMN_MASK
  696. msr mdcr_el2, x2
  697. .endm
  698. .macro activate_vm
  699. ldr x1, [x0, #VCPU_KVM]
  700. kern_hyp_va x1
  701. ldr x2, [x1, #KVM_VTTBR]
  702. msr vttbr_el2, x2
  703. .endm
  704. .macro deactivate_vm
  705. msr vttbr_el2, xzr
  706. .endm
  707. /*
  708. * Call into the vgic backend for state saving
  709. */
  710. .macro save_vgic_state
  711. adr x24, __vgic_sr_vectors
  712. ldr x24, [x24, VGIC_SAVE_FN]
  713. kern_hyp_va x24
  714. blr x24
  715. mrs x24, hcr_el2
  716. mov x25, #HCR_INT_OVERRIDE
  717. neg x25, x25
  718. and x24, x24, x25
  719. msr hcr_el2, x24
  720. .endm
  721. /*
  722. * Call into the vgic backend for state restoring
  723. */
  724. .macro restore_vgic_state
  725. mrs x24, hcr_el2
  726. ldr x25, [x0, #VCPU_IRQ_LINES]
  727. orr x24, x24, #HCR_INT_OVERRIDE
  728. orr x24, x24, x25
  729. msr hcr_el2, x24
  730. adr x24, __vgic_sr_vectors
  731. ldr x24, [x24, #VGIC_RESTORE_FN]
  732. kern_hyp_va x24
  733. blr x24
  734. .endm
  735. .macro save_timer_state
  736. // x0: vcpu pointer
  737. ldr x2, [x0, #VCPU_KVM]
  738. kern_hyp_va x2
  739. ldr w3, [x2, #KVM_TIMER_ENABLED]
  740. cbz w3, 1f
  741. mrs x3, cntv_ctl_el0
  742. and x3, x3, #3
  743. str w3, [x0, #VCPU_TIMER_CNTV_CTL]
  744. bic x3, x3, #1 // Clear Enable
  745. msr cntv_ctl_el0, x3
  746. isb
  747. mrs x3, cntv_cval_el0
  748. str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
  749. 1:
  750. // Allow physical timer/counter access for the host
  751. mrs x2, cnthctl_el2
  752. orr x2, x2, #3
  753. msr cnthctl_el2, x2
  754. // Clear cntvoff for the host
  755. msr cntvoff_el2, xzr
  756. .endm
  757. .macro restore_timer_state
  758. // x0: vcpu pointer
  759. // Disallow physical timer access for the guest
  760. // Physical counter access is allowed
  761. mrs x2, cnthctl_el2
  762. orr x2, x2, #1
  763. bic x2, x2, #2
  764. msr cnthctl_el2, x2
  765. ldr x2, [x0, #VCPU_KVM]
  766. kern_hyp_va x2
  767. ldr w3, [x2, #KVM_TIMER_ENABLED]
  768. cbz w3, 1f
  769. ldr x3, [x2, #KVM_TIMER_CNTVOFF]
  770. msr cntvoff_el2, x3
  771. ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL]
  772. msr cntv_cval_el0, x2
  773. isb
  774. ldr w2, [x0, #VCPU_TIMER_CNTV_CTL]
  775. and x2, x2, #3
  776. msr cntv_ctl_el0, x2
  777. 1:
  778. .endm
  779. __save_sysregs:
  780. save_sysregs
  781. ret
  782. __restore_sysregs:
  783. restore_sysregs
  784. ret
  785. __save_debug:
  786. save_debug
  787. ret
  788. __restore_debug:
  789. restore_debug
  790. ret
  791. __save_fpsimd:
  792. save_fpsimd
  793. ret
  794. __restore_fpsimd:
  795. restore_fpsimd
  796. ret
  797. /*
  798. * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
  799. *
  800. * This is the world switch. The first half of the function
  801. * deals with entering the guest, and anything from __kvm_vcpu_return
  802. * to the end of the function deals with reentering the host.
  803. * On the enter path, only x0 (vcpu pointer) must be preserved until
  804. * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception
  805. * code) must both be preserved until the epilogue.
  806. * In both cases, x2 points to the CPU context we're saving/restoring from/to.
  807. */
  808. ENTRY(__kvm_vcpu_run)
  809. kern_hyp_va x0
  810. msr tpidr_el2, x0 // Save the vcpu register
  811. // Host context
  812. ldr x2, [x0, #VCPU_HOST_CONTEXT]
  813. kern_hyp_va x2
  814. save_host_regs
  815. bl __save_fpsimd
  816. bl __save_sysregs
  817. compute_debug_state 1f
  818. bl __save_debug
  819. 1:
  820. activate_traps
  821. activate_vm
  822. restore_vgic_state
  823. restore_timer_state
  824. // Guest context
  825. add x2, x0, #VCPU_CONTEXT
  826. bl __restore_sysregs
  827. bl __restore_fpsimd
  828. skip_debug_state x3, 1f
  829. bl __restore_debug
  830. 1:
  831. restore_guest_32bit_state
  832. restore_guest_regs
  833. // That's it, no more messing around.
  834. eret
  835. __kvm_vcpu_return:
  836. // Assume x0 is the vcpu pointer, x1 the return code
  837. // Guest's x0-x3 are on the stack
  838. // Guest context
  839. add x2, x0, #VCPU_CONTEXT
  840. save_guest_regs
  841. bl __save_fpsimd
  842. bl __save_sysregs
  843. skip_debug_state x3, 1f
  844. bl __save_debug
  845. 1:
  846. save_guest_32bit_state
  847. save_timer_state
  848. save_vgic_state
  849. deactivate_traps
  850. deactivate_vm
  851. // Host context
  852. ldr x2, [x0, #VCPU_HOST_CONTEXT]
  853. kern_hyp_va x2
  854. bl __restore_sysregs
  855. bl __restore_fpsimd
  856. skip_debug_state x3, 1f
  857. // Clear the dirty flag for the next run, as all the state has
  858. // already been saved. Note that we nuke the whole 64bit word.
  859. // If we ever add more flags, we'll have to be more careful...
  860. str xzr, [x0, #VCPU_DEBUG_FLAGS]
  861. bl __restore_debug
  862. 1:
  863. restore_host_regs
  864. mov x0, x1
  865. ret
  866. END(__kvm_vcpu_run)
  867. // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
  868. ENTRY(__kvm_tlb_flush_vmid_ipa)
  869. dsb ishst
  870. kern_hyp_va x0
  871. ldr x2, [x0, #KVM_VTTBR]
  872. msr vttbr_el2, x2
  873. isb
  874. /*
  875. * We could do so much better if we had the VA as well.
  876. * Instead, we invalidate Stage-2 for this IPA, and the
  877. * whole of Stage-1. Weep...
  878. */
  879. tlbi ipas2e1is, x1
  880. /*
  881. * We have to ensure completion of the invalidation at Stage-2,
  882. * since a table walk on another CPU could refill a TLB with a
  883. * complete (S1 + S2) walk based on the old Stage-2 mapping if
  884. * the Stage-1 invalidation happened first.
  885. */
  886. dsb ish
  887. tlbi vmalle1is
  888. dsb ish
  889. isb
  890. msr vttbr_el2, xzr
  891. ret
  892. ENDPROC(__kvm_tlb_flush_vmid_ipa)
  893. ENTRY(__kvm_flush_vm_context)
  894. dsb ishst
  895. tlbi alle1is
  896. ic ialluis
  897. dsb ish
  898. ret
  899. ENDPROC(__kvm_flush_vm_context)
  900. // struct vgic_sr_vectors __vgi_sr_vectors;
  901. .align 3
  902. ENTRY(__vgic_sr_vectors)
  903. .skip VGIC_SR_VECTOR_SZ
  904. ENDPROC(__vgic_sr_vectors)
  905. __kvm_hyp_panic:
  906. // Guess the context by looking at VTTBR:
  907. // If zero, then we're already a host.
  908. // Otherwise restore a minimal host context before panicing.
  909. mrs x0, vttbr_el2
  910. cbz x0, 1f
  911. mrs x0, tpidr_el2
  912. deactivate_traps
  913. deactivate_vm
  914. ldr x2, [x0, #VCPU_HOST_CONTEXT]
  915. kern_hyp_va x2
  916. bl __restore_sysregs
  917. 1: adr x0, __hyp_panic_str
  918. adr x1, 2f
  919. ldp x2, x3, [x1]
  920. sub x0, x0, x2
  921. add x0, x0, x3
  922. mrs x1, spsr_el2
  923. mrs x2, elr_el2
  924. mrs x3, esr_el2
  925. mrs x4, far_el2
  926. mrs x5, hpfar_el2
  927. mrs x6, par_el1
  928. mrs x7, tpidr_el2
  929. mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
  930. PSR_MODE_EL1h)
  931. msr spsr_el2, lr
  932. ldr lr, =panic
  933. msr elr_el2, lr
  934. eret
  935. .align 3
  936. 2: .quad HYP_PAGE_OFFSET
  937. .quad PAGE_OFFSET
  938. ENDPROC(__kvm_hyp_panic)
  939. __hyp_panic_str:
  940. .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0"
  941. .align 2
  942. /*
  943. * u64 kvm_call_hyp(void *hypfn, ...);
  944. *
  945. * This is not really a variadic function in the classic C-way and care must
  946. * be taken when calling this to ensure parameters are passed in registers
  947. * only, since the stack will change between the caller and the callee.
  948. *
  949. * Call the function with the first argument containing a pointer to the
  950. * function you wish to call in Hyp mode, and subsequent arguments will be
  951. * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
  952. * function pointer can be passed). The function being called must be mapped
  953. * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
  954. * passed in r0 and r1.
  955. *
  956. * A function pointer with a value of 0 has a special meaning, and is
  957. * used to implement __hyp_get_vectors in the same way as in
  958. * arch/arm64/kernel/hyp_stub.S.
  959. */
  960. ENTRY(kvm_call_hyp)
  961. hvc #0
  962. ret
  963. ENDPROC(kvm_call_hyp)
  964. .macro invalid_vector label, target
  965. .align 2
  966. \label:
  967. b \target
  968. ENDPROC(\label)
  969. .endm
  970. /* None of these should ever happen */
  971. invalid_vector el2t_sync_invalid, __kvm_hyp_panic
  972. invalid_vector el2t_irq_invalid, __kvm_hyp_panic
  973. invalid_vector el2t_fiq_invalid, __kvm_hyp_panic
  974. invalid_vector el2t_error_invalid, __kvm_hyp_panic
  975. invalid_vector el2h_sync_invalid, __kvm_hyp_panic
  976. invalid_vector el2h_irq_invalid, __kvm_hyp_panic
  977. invalid_vector el2h_fiq_invalid, __kvm_hyp_panic
  978. invalid_vector el2h_error_invalid, __kvm_hyp_panic
  979. invalid_vector el1_sync_invalid, __kvm_hyp_panic
  980. invalid_vector el1_irq_invalid, __kvm_hyp_panic
  981. invalid_vector el1_fiq_invalid, __kvm_hyp_panic
  982. invalid_vector el1_error_invalid, __kvm_hyp_panic
  983. el1_sync: // Guest trapped into EL2
  984. push x0, x1
  985. push x2, x3
  986. mrs x1, esr_el2
  987. lsr x2, x1, #ESR_EL2_EC_SHIFT
  988. cmp x2, #ESR_EL2_EC_HVC64
  989. b.ne el1_trap
  990. mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
  991. cbnz x3, el1_trap // called HVC
  992. /* Here, we're pretty sure the host called HVC. */
  993. pop x2, x3
  994. pop x0, x1
  995. /* Check for __hyp_get_vectors */
  996. cbnz x0, 1f
  997. mrs x0, vbar_el2
  998. b 2f
  999. 1: push lr, xzr
  1000. /*
  1001. * Compute the function address in EL2, and shuffle the parameters.
  1002. */
  1003. kern_hyp_va x0
  1004. mov lr, x0
  1005. mov x0, x1
  1006. mov x1, x2
  1007. mov x2, x3
  1008. blr lr
  1009. pop lr, xzr
  1010. 2: eret
  1011. el1_trap:
  1012. /*
  1013. * x1: ESR
  1014. * x2: ESR_EC
  1015. */
  1016. cmp x2, #ESR_EL2_EC_DABT
  1017. mov x0, #ESR_EL2_EC_IABT
  1018. ccmp x2, x0, #4, ne
  1019. b.ne 1f // Not an abort we care about
  1020. /* This is an abort. Check for permission fault */
  1021. and x2, x1, #ESR_EL2_FSC_TYPE
  1022. cmp x2, #FSC_PERM
  1023. b.ne 1f // Not a permission fault
  1024. /*
  1025. * Check for Stage-1 page table walk, which is guaranteed
  1026. * to give a valid HPFAR_EL2.
  1027. */
  1028. tbnz x1, #7, 1f // S1PTW is set
  1029. /* Preserve PAR_EL1 */
  1030. mrs x3, par_el1
  1031. push x3, xzr
  1032. /*
  1033. * Permission fault, HPFAR_EL2 is invalid.
  1034. * Resolve the IPA the hard way using the guest VA.
  1035. * Stage-1 translation already validated the memory access rights.
  1036. * As such, we can use the EL1 translation regime, and don't have
  1037. * to distinguish between EL0 and EL1 access.
  1038. */
  1039. mrs x2, far_el2
  1040. at s1e1r, x2
  1041. isb
  1042. /* Read result */
  1043. mrs x3, par_el1
  1044. pop x0, xzr // Restore PAR_EL1 from the stack
  1045. msr par_el1, x0
  1046. tbnz x3, #0, 3f // Bail out if we failed the translation
  1047. ubfx x3, x3, #12, #36 // Extract IPA
  1048. lsl x3, x3, #4 // and present it like HPFAR
  1049. b 2f
  1050. 1: mrs x3, hpfar_el2
  1051. mrs x2, far_el2
  1052. 2: mrs x0, tpidr_el2
  1053. str w1, [x0, #VCPU_ESR_EL2]
  1054. str x2, [x0, #VCPU_FAR_EL2]
  1055. str x3, [x0, #VCPU_HPFAR_EL2]
  1056. mov x1, #ARM_EXCEPTION_TRAP
  1057. b __kvm_vcpu_return
  1058. /*
  1059. * Translation failed. Just return to the guest and
  1060. * let it fault again. Another CPU is probably playing
  1061. * behind our back.
  1062. */
  1063. 3: pop x2, x3
  1064. pop x0, x1
  1065. eret
  1066. el1_irq:
  1067. push x0, x1
  1068. push x2, x3
  1069. mrs x0, tpidr_el2
  1070. mov x1, #ARM_EXCEPTION_IRQ
  1071. b __kvm_vcpu_return
  1072. .ltorg
  1073. .align 11
  1074. ENTRY(__kvm_hyp_vector)
  1075. ventry el2t_sync_invalid // Synchronous EL2t
  1076. ventry el2t_irq_invalid // IRQ EL2t
  1077. ventry el2t_fiq_invalid // FIQ EL2t
  1078. ventry el2t_error_invalid // Error EL2t
  1079. ventry el2h_sync_invalid // Synchronous EL2h
  1080. ventry el2h_irq_invalid // IRQ EL2h
  1081. ventry el2h_fiq_invalid // FIQ EL2h
  1082. ventry el2h_error_invalid // Error EL2h
  1083. ventry el1_sync // Synchronous 64-bit EL1
  1084. ventry el1_irq // IRQ 64-bit EL1
  1085. ventry el1_fiq_invalid // FIQ 64-bit EL1
  1086. ventry el1_error_invalid // Error 64-bit EL1
  1087. ventry el1_sync // Synchronous 32-bit EL1
  1088. ventry el1_irq // IRQ 32-bit EL1
  1089. ventry el1_fiq_invalid // FIQ 32-bit EL1
  1090. ventry el1_error_invalid // Error 32-bit EL1
  1091. ENDPROC(__kvm_hyp_vector)
  1092. .popsection