book3s_hv_rmhandlers.S 75 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  12. *
  13. * Derived from book3s_rmhandlers.S and other files, which are:
  14. *
  15. * Copyright SUSE Linux Products GmbH 2009
  16. *
  17. * Authors: Alexander Graf <agraf@suse.de>
  18. */
  19. #include <asm/ppc_asm.h>
  20. #include <asm/kvm_asm.h>
  21. #include <asm/reg.h>
  22. #include <asm/mmu.h>
  23. #include <asm/page.h>
  24. #include <asm/ptrace.h>
  25. #include <asm/hvcall.h>
  26. #include <asm/asm-offsets.h>
  27. #include <asm/exception-64s.h>
  28. #include <asm/kvm_book3s_asm.h>
  29. #include <asm/book3s/64/mmu-hash.h>
  30. #include <asm/tm.h>
  31. #include <asm/opal.h>
  32. #include <asm/xive-regs.h>
  33. /* Sign-extend HDEC if not on POWER9 */
  34. #define EXTEND_HDEC(reg) \
  35. BEGIN_FTR_SECTION; \
  36. extsw reg, reg; \
  37. END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
  38. #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
  39. /* Values in HSTATE_NAPPING(r13) */
  40. #define NAPPING_CEDE 1
  41. #define NAPPING_NOVCPU 2
  42. /* Stack frame offsets for kvmppc_hv_entry */
  43. #define SFS 144
  44. #define STACK_SLOT_TRAP (SFS-4)
  45. #define STACK_SLOT_TID (SFS-16)
  46. #define STACK_SLOT_PSSCR (SFS-24)
  47. #define STACK_SLOT_PID (SFS-32)
  48. #define STACK_SLOT_IAMR (SFS-40)
  49. #define STACK_SLOT_CIABR (SFS-48)
  50. #define STACK_SLOT_DAWR (SFS-56)
  51. #define STACK_SLOT_DAWRX (SFS-64)
  52. /*
  53. * Call kvmppc_hv_entry in real mode.
  54. * Must be called with interrupts hard-disabled.
  55. *
  56. * Input Registers:
  57. *
  58. * LR = return address to continue at after eventually re-enabling MMU
  59. */
  60. _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
  61. mflr r0
  62. std r0, PPC_LR_STKOFF(r1)
  63. stdu r1, -112(r1)
  64. mfmsr r10
  65. LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
  66. li r0,MSR_RI
  67. andc r0,r10,r0
  68. li r6,MSR_IR | MSR_DR
  69. andc r6,r10,r6
  70. mtmsrd r0,1 /* clear RI in MSR */
  71. mtsrr0 r5
  72. mtsrr1 r6
  73. RFI
  74. kvmppc_call_hv_entry:
  75. ld r4, HSTATE_KVM_VCPU(r13)
  76. bl kvmppc_hv_entry
  77. /* Back from guest - restore host state and return to caller */
  78. BEGIN_FTR_SECTION
  79. /* Restore host DABR and DABRX */
  80. ld r5,HSTATE_DABR(r13)
  81. li r6,7
  82. mtspr SPRN_DABR,r5
  83. mtspr SPRN_DABRX,r6
  84. END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
  85. /* Restore SPRG3 */
  86. ld r3,PACA_SPRG_VDSO(r13)
  87. mtspr SPRN_SPRG_VDSO_WRITE,r3
  88. /* Reload the host's PMU registers */
  89. ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
  90. lbz r4, LPPACA_PMCINUSE(r3)
  91. cmpwi r4, 0
  92. beq 23f /* skip if not */
  93. BEGIN_FTR_SECTION
  94. ld r3, HSTATE_MMCR0(r13)
  95. andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
  96. cmpwi r4, MMCR0_PMAO
  97. beql kvmppc_fix_pmao
  98. END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
  99. lwz r3, HSTATE_PMC1(r13)
  100. lwz r4, HSTATE_PMC2(r13)
  101. lwz r5, HSTATE_PMC3(r13)
  102. lwz r6, HSTATE_PMC4(r13)
  103. lwz r8, HSTATE_PMC5(r13)
  104. lwz r9, HSTATE_PMC6(r13)
  105. mtspr SPRN_PMC1, r3
  106. mtspr SPRN_PMC2, r4
  107. mtspr SPRN_PMC3, r5
  108. mtspr SPRN_PMC4, r6
  109. mtspr SPRN_PMC5, r8
  110. mtspr SPRN_PMC6, r9
  111. ld r3, HSTATE_MMCR0(r13)
  112. ld r4, HSTATE_MMCR1(r13)
  113. ld r5, HSTATE_MMCRA(r13)
  114. ld r6, HSTATE_SIAR(r13)
  115. ld r7, HSTATE_SDAR(r13)
  116. mtspr SPRN_MMCR1, r4
  117. mtspr SPRN_MMCRA, r5
  118. mtspr SPRN_SIAR, r6
  119. mtspr SPRN_SDAR, r7
  120. BEGIN_FTR_SECTION
  121. ld r8, HSTATE_MMCR2(r13)
  122. ld r9, HSTATE_SIER(r13)
  123. mtspr SPRN_MMCR2, r8
  124. mtspr SPRN_SIER, r9
  125. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
  126. mtspr SPRN_MMCR0, r3
  127. isync
  128. 23:
  129. /*
  130. * Reload DEC. HDEC interrupts were disabled when
  131. * we reloaded the host's LPCR value.
  132. */
  133. ld r3, HSTATE_DECEXP(r13)
  134. mftb r4
  135. subf r4, r4, r3
  136. mtspr SPRN_DEC, r4
  137. /* hwthread_req may have got set by cede or no vcpu, so clear it */
  138. li r0, 0
  139. stb r0, HSTATE_HWTHREAD_REQ(r13)
  140. /*
  141. * For external and machine check interrupts, we need
  142. * to call the Linux handler to process the interrupt.
  143. * We do that by jumping to absolute address 0x500 for
  144. * external interrupts, or the machine_check_fwnmi label
  145. * for machine checks (since firmware might have patched
  146. * the vector area at 0x200). The [h]rfid at the end of the
  147. * handler will return to the book3s_hv_interrupts.S code.
  148. * For other interrupts we do the rfid to get back
  149. * to the book3s_hv_interrupts.S code here.
  150. */
  151. ld r8, 112+PPC_LR_STKOFF(r1)
  152. addi r1, r1, 112
  153. ld r7, HSTATE_HOST_MSR(r13)
  154. /*
  155. * If we came back from the guest via a relocation-on interrupt,
  156. * we will be in virtual mode at this point, which makes it a
  157. * little easier to get back to the caller.
  158. */
  159. mfmsr r0
  160. andi. r0, r0, MSR_IR /* in real mode? */
  161. bne .Lvirt_return
  162. cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
  163. cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
  164. beq 11f
  165. cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
  166. beq 15f /* Invoke the H_DOORBELL handler */
  167. cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI
  168. beq cr2, 14f /* HMI check */
  169. /* RFI into the highmem handler, or branch to interrupt handler */
  170. mfmsr r6
  171. li r0, MSR_RI
  172. andc r6, r6, r0
  173. mtmsrd r6, 1 /* Clear RI in MSR */
  174. mtsrr0 r8
  175. mtsrr1 r7
  176. beq cr1, 13f /* machine check */
  177. RFI
  178. /* On POWER7, we have external interrupts set to use HSRR0/1 */
  179. 11: mtspr SPRN_HSRR0, r8
  180. mtspr SPRN_HSRR1, r7
  181. ba 0x500
  182. 13: b machine_check_fwnmi
  183. 14: mtspr SPRN_HSRR0, r8
  184. mtspr SPRN_HSRR1, r7
  185. b hmi_exception_after_realmode
  186. 15: mtspr SPRN_HSRR0, r8
  187. mtspr SPRN_HSRR1, r7
  188. ba 0xe80
  189. /* Virtual-mode return - can't get here for HMI or machine check */
  190. .Lvirt_return:
  191. cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
  192. beq 16f
  193. cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
  194. beq 17f
  195. andi. r0, r7, MSR_EE /* were interrupts hard-enabled? */
  196. beq 18f
  197. mtmsrd r7, 1 /* if so then re-enable them */
  198. 18: mtlr r8
  199. blr
  200. 16: mtspr SPRN_HSRR0, r8 /* jump to reloc-on external vector */
  201. mtspr SPRN_HSRR1, r7
  202. b exc_virt_0x4500_hardware_interrupt
  203. 17: mtspr SPRN_HSRR0, r8
  204. mtspr SPRN_HSRR1, r7
  205. b exc_virt_0x4e80_h_doorbell
  206. kvmppc_primary_no_guest:
  207. /* We handle this much like a ceded vcpu */
  208. /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
  209. /* HDEC may be larger than DEC for arch >= v3.00, but since the */
  210. /* HDEC value came from DEC in the first place, it will fit */
  211. mfspr r3, SPRN_HDEC
  212. mtspr SPRN_DEC, r3
  213. /*
  214. * Make sure the primary has finished the MMU switch.
  215. * We should never get here on a secondary thread, but
  216. * check it for robustness' sake.
  217. */
  218. ld r5, HSTATE_KVM_VCORE(r13)
  219. 65: lbz r0, VCORE_IN_GUEST(r5)
  220. cmpwi r0, 0
  221. beq 65b
  222. /* Set LPCR. */
  223. ld r8,VCORE_LPCR(r5)
  224. mtspr SPRN_LPCR,r8
  225. isync
  226. /* set our bit in napping_threads */
  227. ld r5, HSTATE_KVM_VCORE(r13)
  228. lbz r7, HSTATE_PTID(r13)
  229. li r0, 1
  230. sld r0, r0, r7
  231. addi r6, r5, VCORE_NAPPING_THREADS
  232. 1: lwarx r3, 0, r6
  233. or r3, r3, r0
  234. stwcx. r3, 0, r6
  235. bne 1b
  236. /* order napping_threads update vs testing entry_exit_map */
  237. isync
  238. li r12, 0
  239. lwz r7, VCORE_ENTRY_EXIT(r5)
  240. cmpwi r7, 0x100
  241. bge kvm_novcpu_exit /* another thread already exiting */
  242. li r3, NAPPING_NOVCPU
  243. stb r3, HSTATE_NAPPING(r13)
  244. li r3, 0 /* Don't wake on privileged (OS) doorbell */
  245. b kvm_do_nap
  246. /*
  247. * kvm_novcpu_wakeup
  248. * Entered from kvm_start_guest if kvm_hstate.napping is set
  249. * to NAPPING_NOVCPU
  250. * r2 = kernel TOC
  251. * r13 = paca
  252. */
  253. kvm_novcpu_wakeup:
  254. ld r1, HSTATE_HOST_R1(r13)
  255. ld r5, HSTATE_KVM_VCORE(r13)
  256. li r0, 0
  257. stb r0, HSTATE_NAPPING(r13)
  258. /* check the wake reason */
  259. bl kvmppc_check_wake_reason
  260. /*
  261. * Restore volatile registers since we could have called
  262. * a C routine in kvmppc_check_wake_reason.
  263. * r5 = VCORE
  264. */
  265. ld r5, HSTATE_KVM_VCORE(r13)
  266. /* see if any other thread is already exiting */
  267. lwz r0, VCORE_ENTRY_EXIT(r5)
  268. cmpwi r0, 0x100
  269. bge kvm_novcpu_exit
  270. /* clear our bit in napping_threads */
  271. lbz r7, HSTATE_PTID(r13)
  272. li r0, 1
  273. sld r0, r0, r7
  274. addi r6, r5, VCORE_NAPPING_THREADS
  275. 4: lwarx r7, 0, r6
  276. andc r7, r7, r0
  277. stwcx. r7, 0, r6
  278. bne 4b
  279. /* See if the wake reason means we need to exit */
  280. cmpdi r3, 0
  281. bge kvm_novcpu_exit
  282. /* See if our timeslice has expired (HDEC is negative) */
  283. mfspr r0, SPRN_HDEC
  284. EXTEND_HDEC(r0)
  285. li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
  286. cmpdi r0, 0
  287. blt kvm_novcpu_exit
  288. /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
  289. ld r4, HSTATE_KVM_VCPU(r13)
  290. cmpdi r4, 0
  291. beq kvmppc_primary_no_guest
  292. #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
  293. addi r3, r4, VCPU_TB_RMENTRY
  294. bl kvmhv_start_timing
  295. #endif
  296. b kvmppc_got_guest
  297. kvm_novcpu_exit:
  298. #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
  299. ld r4, HSTATE_KVM_VCPU(r13)
  300. cmpdi r4, 0
  301. beq 13f
  302. addi r3, r4, VCPU_TB_RMEXIT
  303. bl kvmhv_accumulate_time
  304. #endif
  305. 13: mr r3, r12
  306. stw r12, STACK_SLOT_TRAP(r1)
  307. bl kvmhv_commence_exit
  308. nop
  309. lwz r12, STACK_SLOT_TRAP(r1)
  310. b kvmhv_switch_to_host
  311. /*
  312. * We come in here when wakened from nap mode.
  313. * Relocation is off and most register values are lost.
  314. * r13 points to the PACA.
  315. */
  316. .globl kvm_start_guest
  317. kvm_start_guest:
  318. /* Set runlatch bit the minute you wake up from nap */
  319. mfspr r0, SPRN_CTRLF
  320. ori r0, r0, 1
  321. mtspr SPRN_CTRLT, r0
  322. ld r2,PACATOC(r13)
  323. li r0,KVM_HWTHREAD_IN_KVM
  324. stb r0,HSTATE_HWTHREAD_STATE(r13)
  325. /* NV GPR values from power7_idle() will no longer be valid */
  326. li r0,1
  327. stb r0,PACA_NAPSTATELOST(r13)
  328. /* were we napping due to cede? */
  329. lbz r0,HSTATE_NAPPING(r13)
  330. cmpwi r0,NAPPING_CEDE
  331. beq kvm_end_cede
  332. cmpwi r0,NAPPING_NOVCPU
  333. beq kvm_novcpu_wakeup
  334. ld r1,PACAEMERGSP(r13)
  335. subi r1,r1,STACK_FRAME_OVERHEAD
  336. /*
  337. * We weren't napping due to cede, so this must be a secondary
  338. * thread being woken up to run a guest, or being woken up due
  339. * to a stray IPI. (Or due to some machine check or hypervisor
  340. * maintenance interrupt while the core is in KVM.)
  341. */
  342. /* Check the wake reason in SRR1 to see why we got here */
  343. bl kvmppc_check_wake_reason
  344. /*
  345. * kvmppc_check_wake_reason could invoke a C routine, but we
  346. * have no volatile registers to restore when we return.
  347. */
  348. cmpdi r3, 0
  349. bge kvm_no_guest
  350. /* get vcore pointer, NULL if we have nothing to run */
  351. ld r5,HSTATE_KVM_VCORE(r13)
  352. cmpdi r5,0
  353. /* if we have no vcore to run, go back to sleep */
  354. beq kvm_no_guest
  355. kvm_secondary_got_guest:
  356. /* Set HSTATE_DSCR(r13) to something sensible */
  357. ld r6, PACA_DSCR_DEFAULT(r13)
  358. std r6, HSTATE_DSCR(r13)
  359. /* On thread 0 of a subcore, set HDEC to max */
  360. lbz r4, HSTATE_PTID(r13)
  361. cmpwi r4, 0
  362. bne 63f
  363. LOAD_REG_ADDR(r6, decrementer_max)
  364. ld r6, 0(r6)
  365. mtspr SPRN_HDEC, r6
  366. /* and set per-LPAR registers, if doing dynamic micro-threading */
  367. ld r6, HSTATE_SPLIT_MODE(r13)
  368. cmpdi r6, 0
  369. beq 63f
  370. ld r0, KVM_SPLIT_RPR(r6)
  371. mtspr SPRN_RPR, r0
  372. ld r0, KVM_SPLIT_PMMAR(r6)
  373. mtspr SPRN_PMMAR, r0
  374. ld r0, KVM_SPLIT_LDBAR(r6)
  375. mtspr SPRN_LDBAR, r0
  376. isync
  377. 63:
  378. /* Order load of vcpu after load of vcore */
  379. lwsync
  380. ld r4, HSTATE_KVM_VCPU(r13)
  381. bl kvmppc_hv_entry
  382. /* Back from the guest, go back to nap */
  383. /* Clear our vcpu and vcore pointers so we don't come back in early */
  384. li r0, 0
  385. std r0, HSTATE_KVM_VCPU(r13)
  386. /*
  387. * Once we clear HSTATE_KVM_VCORE(r13), the code in
  388. * kvmppc_run_core() is going to assume that all our vcpu
  389. * state is visible in memory. This lwsync makes sure
  390. * that that is true.
  391. */
  392. lwsync
  393. std r0, HSTATE_KVM_VCORE(r13)
  394. /*
  395. * All secondaries exiting guest will fall through this path.
  396. * Before proceeding, just check for HMI interrupt and
  397. * invoke opal hmi handler. By now we are sure that the
  398. * primary thread on this core/subcore has already made partition
  399. * switch/TB resync and we are good to call opal hmi handler.
  400. */
  401. cmpwi r12, BOOK3S_INTERRUPT_HMI
  402. bne kvm_no_guest
  403. li r3,0 /* NULL argument */
  404. bl hmi_exception_realmode
  405. /*
  406. * At this point we have finished executing in the guest.
  407. * We need to wait for hwthread_req to become zero, since
  408. * we may not turn on the MMU while hwthread_req is non-zero.
  409. * While waiting we also need to check if we get given a vcpu to run.
  410. */
  411. kvm_no_guest:
  412. lbz r3, HSTATE_HWTHREAD_REQ(r13)
  413. cmpwi r3, 0
  414. bne 53f
  415. HMT_MEDIUM
  416. li r0, KVM_HWTHREAD_IN_KERNEL
  417. stb r0, HSTATE_HWTHREAD_STATE(r13)
  418. /* need to recheck hwthread_req after a barrier, to avoid race */
  419. sync
  420. lbz r3, HSTATE_HWTHREAD_REQ(r13)
  421. cmpwi r3, 0
  422. bne 54f
  423. /*
  424. * We jump to pnv_wakeup_loss, which will return to the caller
  425. * of power7_nap in the powernv cpu offline loop. The value we
  426. * put in r3 becomes the return value for power7_nap.
  427. */
  428. li r3, LPCR_PECE0
  429. mfspr r4, SPRN_LPCR
  430. rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
  431. mtspr SPRN_LPCR, r4
  432. li r3, 0
  433. b pnv_wakeup_loss
  434. 53: HMT_LOW
  435. ld r5, HSTATE_KVM_VCORE(r13)
  436. cmpdi r5, 0
  437. bne 60f
  438. ld r3, HSTATE_SPLIT_MODE(r13)
  439. cmpdi r3, 0
  440. beq kvm_no_guest
  441. lbz r0, KVM_SPLIT_DO_NAP(r3)
  442. cmpwi r0, 0
  443. beq kvm_no_guest
  444. HMT_MEDIUM
  445. b kvm_unsplit_nap
  446. 60: HMT_MEDIUM
  447. b kvm_secondary_got_guest
  448. 54: li r0, KVM_HWTHREAD_IN_KVM
  449. stb r0, HSTATE_HWTHREAD_STATE(r13)
  450. b kvm_no_guest
  451. /*
  452. * Here the primary thread is trying to return the core to
  453. * whole-core mode, so we need to nap.
  454. */
  455. kvm_unsplit_nap:
  456. /*
  457. * When secondaries are napping in kvm_unsplit_nap() with
  458. * hwthread_req = 1, HMI goes ignored even though subcores are
  459. * already exited the guest. Hence HMI keeps waking up secondaries
  460. * from nap in a loop and secondaries always go back to nap since
  461. * no vcore is assigned to them. This makes impossible for primary
  462. * thread to get hold of secondary threads resulting into a soft
  463. * lockup in KVM path.
  464. *
  465. * Let us check if HMI is pending and handle it before we go to nap.
  466. */
  467. cmpwi r12, BOOK3S_INTERRUPT_HMI
  468. bne 55f
  469. li r3, 0 /* NULL argument */
  470. bl hmi_exception_realmode
  471. 55:
  472. /*
  473. * Ensure that secondary doesn't nap when it has
  474. * its vcore pointer set.
  475. */
  476. sync /* matches smp_mb() before setting split_info.do_nap */
  477. ld r0, HSTATE_KVM_VCORE(r13)
  478. cmpdi r0, 0
  479. bne kvm_no_guest
  480. /* clear any pending message */
  481. BEGIN_FTR_SECTION
  482. lis r6, (PPC_DBELL_SERVER << (63-36))@h
  483. PPC_MSGCLR(6)
  484. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
  485. /* Set kvm_split_mode.napped[tid] = 1 */
  486. ld r3, HSTATE_SPLIT_MODE(r13)
  487. li r0, 1
  488. lhz r4, PACAPACAINDEX(r13)
  489. clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */
  490. addi r4, r4, KVM_SPLIT_NAPPED
  491. stbx r0, r3, r4
  492. /* Check the do_nap flag again after setting napped[] */
  493. sync
  494. lbz r0, KVM_SPLIT_DO_NAP(r3)
  495. cmpwi r0, 0
  496. beq 57f
  497. li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
  498. mfspr r5, SPRN_LPCR
  499. rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
  500. b kvm_nap_sequence
  501. 57: li r0, 0
  502. stbx r0, r3, r4
  503. b kvm_no_guest
  504. /******************************************************************************
  505. * *
  506. * Entry code *
  507. * *
  508. *****************************************************************************/
  509. .global kvmppc_hv_entry
  510. kvmppc_hv_entry:
  511. /* Required state:
  512. *
  513. * R4 = vcpu pointer (or NULL)
  514. * MSR = ~IR|DR
  515. * R13 = PACA
  516. * R1 = host R1
  517. * R2 = TOC
  518. * all other volatile GPRS = free
  519. * Does not preserve non-volatile GPRs or CR fields
  520. */
  521. mflr r0
  522. std r0, PPC_LR_STKOFF(r1)
  523. stdu r1, -SFS(r1)
  524. /* Save R1 in the PACA */
  525. std r1, HSTATE_HOST_R1(r13)
  526. li r6, KVM_GUEST_MODE_HOST_HV
  527. stb r6, HSTATE_IN_GUEST(r13)
  528. #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
  529. /* Store initial timestamp */
  530. cmpdi r4, 0
  531. beq 1f
  532. addi r3, r4, VCPU_TB_RMENTRY
  533. bl kvmhv_start_timing
  534. 1:
  535. #endif
  536. /* Use cr7 as an indication of radix mode */
  537. ld r5, HSTATE_KVM_VCORE(r13)
  538. ld r9, VCORE_KVM(r5) /* pointer to struct kvm */
  539. lbz r0, KVM_RADIX(r9)
  540. cmpwi cr7, r0, 0
  541. /* Clear out SLB if hash */
  542. bne cr7, 2f
  543. li r6,0
  544. slbmte r6,r6
  545. slbia
  546. ptesync
  547. 2:
  548. /*
  549. * POWER7/POWER8 host -> guest partition switch code.
  550. * We don't have to lock against concurrent tlbies,
  551. * but we do have to coordinate across hardware threads.
  552. */
  553. /* Set bit in entry map iff exit map is zero. */
  554. li r7, 1
  555. lbz r6, HSTATE_PTID(r13)
  556. sld r7, r7, r6
  557. addi r8, r5, VCORE_ENTRY_EXIT
  558. 21: lwarx r3, 0, r8
  559. cmpwi r3, 0x100 /* any threads starting to exit? */
  560. bge secondary_too_late /* if so we're too late to the party */
  561. or r3, r3, r7
  562. stwcx. r3, 0, r8
  563. bne 21b
  564. /* Primary thread switches to guest partition. */
  565. cmpwi r6,0
  566. bne 10f
  567. lwz r7,KVM_LPID(r9)
  568. BEGIN_FTR_SECTION
  569. ld r6,KVM_SDR1(r9)
  570. li r0,LPID_RSVD /* switch to reserved LPID */
  571. mtspr SPRN_LPID,r0
  572. ptesync
  573. mtspr SPRN_SDR1,r6 /* switch to partition page table */
  574. END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
  575. mtspr SPRN_LPID,r7
  576. isync
  577. /* See if we need to flush the TLB */
  578. lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
  579. BEGIN_FTR_SECTION
  580. /*
  581. * On POWER9, individual threads can come in here, but the
  582. * TLB is shared between the 4 threads in a core, hence
  583. * invalidating on one thread invalidates for all.
  584. * Thus we make all 4 threads use the same bit here.
  585. */
  586. clrrdi r6,r6,2
  587. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
  588. clrldi r7,r6,64-6 /* extract bit number (6 bits) */
  589. srdi r6,r6,6 /* doubleword number */
  590. sldi r6,r6,3 /* address offset */
  591. add r6,r6,r9
  592. addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
  593. li r8,1
  594. sld r8,r8,r7
  595. ld r7,0(r6)
  596. and. r7,r7,r8
  597. beq 22f
  598. /* Flush the TLB of any entries for this LPID */
  599. lwz r0,KVM_TLB_SETS(r9)
  600. mtctr r0
  601. li r7,0x800 /* IS field = 0b10 */
  602. ptesync
  603. li r0,0 /* RS for P9 version of tlbiel */
  604. bne cr7, 29f
  605. 28: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */
  606. addi r7,r7,0x1000
  607. bdnz 28b
  608. b 30f
  609. 29: PPC_TLBIEL(7,0,2,1,1) /* for radix, RIC=2, PRS=1, R=1 */
  610. addi r7,r7,0x1000
  611. bdnz 29b
  612. 30: ptesync
  613. 23: ldarx r7,0,r6 /* clear the bit after TLB flushed */
  614. andc r7,r7,r8
  615. stdcx. r7,0,r6
  616. bne 23b
  617. /* Add timebase offset onto timebase */
  618. 22: ld r8,VCORE_TB_OFFSET(r5)
  619. cmpdi r8,0
  620. beq 37f
  621. mftb r6 /* current host timebase */
  622. add r8,r8,r6
  623. mtspr SPRN_TBU40,r8 /* update upper 40 bits */
  624. mftb r7 /* check if lower 24 bits overflowed */
  625. clrldi r6,r6,40
  626. clrldi r7,r7,40
  627. cmpld r7,r6
  628. bge 37f
  629. addis r8,r8,0x100 /* if so, increment upper 40 bits */
  630. mtspr SPRN_TBU40,r8
  631. /* Load guest PCR value to select appropriate compat mode */
  632. 37: ld r7, VCORE_PCR(r5)
  633. cmpdi r7, 0
  634. beq 38f
  635. mtspr SPRN_PCR, r7
  636. 38:
  637. BEGIN_FTR_SECTION
  638. /* DPDES and VTB are shared between threads */
  639. ld r8, VCORE_DPDES(r5)
  640. ld r7, VCORE_VTB(r5)
  641. mtspr SPRN_DPDES, r8
  642. mtspr SPRN_VTB, r7
  643. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
  644. /* Mark the subcore state as inside guest */
  645. bl kvmppc_subcore_enter_guest
  646. nop
  647. ld r5, HSTATE_KVM_VCORE(r13)
  648. ld r4, HSTATE_KVM_VCPU(r13)
  649. li r0,1
  650. stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
  651. /* Do we have a guest vcpu to run? */
  652. 10: cmpdi r4, 0
  653. beq kvmppc_primary_no_guest
  654. kvmppc_got_guest:
  655. /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
  656. lwz r5,VCPU_SLB_MAX(r4)
  657. cmpwi r5,0
  658. beq 9f
  659. mtctr r5
  660. addi r6,r4,VCPU_SLB
  661. 1: ld r8,VCPU_SLB_E(r6)
  662. ld r9,VCPU_SLB_V(r6)
  663. slbmte r9,r8
  664. addi r6,r6,VCPU_SLB_SIZE
  665. bdnz 1b
  666. 9:
  667. /* Increment yield count if they have a VPA */
  668. ld r3, VCPU_VPA(r4)
  669. cmpdi r3, 0
  670. beq 25f
  671. li r6, LPPACA_YIELDCOUNT
  672. LWZX_BE r5, r3, r6
  673. addi r5, r5, 1
  674. STWX_BE r5, r3, r6
  675. li r6, 1
  676. stb r6, VCPU_VPA_DIRTY(r4)
  677. 25:
  678. /* Save purr/spurr */
  679. mfspr r5,SPRN_PURR
  680. mfspr r6,SPRN_SPURR
  681. std r5,HSTATE_PURR(r13)
  682. std r6,HSTATE_SPURR(r13)
  683. ld r7,VCPU_PURR(r4)
  684. ld r8,VCPU_SPURR(r4)
  685. mtspr SPRN_PURR,r7
  686. mtspr SPRN_SPURR,r8
  687. /* Save host values of some registers */
  688. BEGIN_FTR_SECTION
  689. mfspr r5, SPRN_TIDR
  690. mfspr r6, SPRN_PSSCR
  691. mfspr r7, SPRN_PID
  692. mfspr r8, SPRN_IAMR
  693. std r5, STACK_SLOT_TID(r1)
  694. std r6, STACK_SLOT_PSSCR(r1)
  695. std r7, STACK_SLOT_PID(r1)
  696. std r8, STACK_SLOT_IAMR(r1)
  697. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
  698. BEGIN_FTR_SECTION
  699. mfspr r5, SPRN_CIABR
  700. mfspr r6, SPRN_DAWR
  701. mfspr r7, SPRN_DAWRX
  702. std r5, STACK_SLOT_CIABR(r1)
  703. std r6, STACK_SLOT_DAWR(r1)
  704. std r7, STACK_SLOT_DAWRX(r1)
  705. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
  706. BEGIN_FTR_SECTION
  707. /* Set partition DABR */
  708. /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
  709. lwz r5,VCPU_DABRX(r4)
  710. ld r6,VCPU_DABR(r4)
  711. mtspr SPRN_DABRX,r5
  712. mtspr SPRN_DABR,r6
  713. isync
  714. END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
  715. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  716. BEGIN_FTR_SECTION
  717. bl kvmppc_restore_tm
  718. END_FTR_SECTION_IFSET(CPU_FTR_TM)
  719. #endif
  720. /* Load guest PMU registers */
  721. /* R4 is live here (vcpu pointer) */
  722. li r3, 1
  723. sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
  724. mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
  725. isync
  726. BEGIN_FTR_SECTION
  727. ld r3, VCPU_MMCR(r4)
  728. andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
  729. cmpwi r5, MMCR0_PMAO
  730. beql kvmppc_fix_pmao
  731. END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
  732. lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
  733. lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
  734. lwz r6, VCPU_PMC + 8(r4)
  735. lwz r7, VCPU_PMC + 12(r4)
  736. lwz r8, VCPU_PMC + 16(r4)
  737. lwz r9, VCPU_PMC + 20(r4)
  738. mtspr SPRN_PMC1, r3
  739. mtspr SPRN_PMC2, r5
  740. mtspr SPRN_PMC3, r6
  741. mtspr SPRN_PMC4, r7
  742. mtspr SPRN_PMC5, r8
  743. mtspr SPRN_PMC6, r9
  744. ld r3, VCPU_MMCR(r4)
  745. ld r5, VCPU_MMCR + 8(r4)
  746. ld r6, VCPU_MMCR + 16(r4)
  747. ld r7, VCPU_SIAR(r4)
  748. ld r8, VCPU_SDAR(r4)
  749. mtspr SPRN_MMCR1, r5
  750. mtspr SPRN_MMCRA, r6
  751. mtspr SPRN_SIAR, r7
  752. mtspr SPRN_SDAR, r8
  753. BEGIN_FTR_SECTION
  754. ld r5, VCPU_MMCR + 24(r4)
  755. ld r6, VCPU_SIER(r4)
  756. mtspr SPRN_MMCR2, r5
  757. mtspr SPRN_SIER, r6
  758. BEGIN_FTR_SECTION_NESTED(96)
  759. lwz r7, VCPU_PMC + 24(r4)
  760. lwz r8, VCPU_PMC + 28(r4)
  761. ld r9, VCPU_MMCR + 32(r4)
  762. mtspr SPRN_SPMC1, r7
  763. mtspr SPRN_SPMC2, r8
  764. mtspr SPRN_MMCRS, r9
  765. END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
  766. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
  767. mtspr SPRN_MMCR0, r3
  768. isync
  769. /* Load up FP, VMX and VSX registers */
  770. bl kvmppc_load_fp
  771. ld r14, VCPU_GPR(R14)(r4)
  772. ld r15, VCPU_GPR(R15)(r4)
  773. ld r16, VCPU_GPR(R16)(r4)
  774. ld r17, VCPU_GPR(R17)(r4)
  775. ld r18, VCPU_GPR(R18)(r4)
  776. ld r19, VCPU_GPR(R19)(r4)
  777. ld r20, VCPU_GPR(R20)(r4)
  778. ld r21, VCPU_GPR(R21)(r4)
  779. ld r22, VCPU_GPR(R22)(r4)
  780. ld r23, VCPU_GPR(R23)(r4)
  781. ld r24, VCPU_GPR(R24)(r4)
  782. ld r25, VCPU_GPR(R25)(r4)
  783. ld r26, VCPU_GPR(R26)(r4)
  784. ld r27, VCPU_GPR(R27)(r4)
  785. ld r28, VCPU_GPR(R28)(r4)
  786. ld r29, VCPU_GPR(R29)(r4)
  787. ld r30, VCPU_GPR(R30)(r4)
  788. ld r31, VCPU_GPR(R31)(r4)
  789. /* Switch DSCR to guest value */
  790. ld r5, VCPU_DSCR(r4)
  791. mtspr SPRN_DSCR, r5
  792. BEGIN_FTR_SECTION
  793. /* Skip next section on POWER7 */
  794. b 8f
  795. END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
  796. /* Load up POWER8-specific registers */
  797. ld r5, VCPU_IAMR(r4)
  798. lwz r6, VCPU_PSPB(r4)
  799. ld r7, VCPU_FSCR(r4)
  800. mtspr SPRN_IAMR, r5
  801. mtspr SPRN_PSPB, r6
  802. mtspr SPRN_FSCR, r7
  803. ld r5, VCPU_DAWR(r4)
  804. ld r6, VCPU_DAWRX(r4)
  805. ld r7, VCPU_CIABR(r4)
  806. ld r8, VCPU_TAR(r4)
  807. mtspr SPRN_DAWR, r5
  808. mtspr SPRN_DAWRX, r6
  809. mtspr SPRN_CIABR, r7
  810. mtspr SPRN_TAR, r8
  811. ld r5, VCPU_IC(r4)
  812. ld r8, VCPU_EBBHR(r4)
  813. mtspr SPRN_IC, r5
  814. mtspr SPRN_EBBHR, r8
  815. ld r5, VCPU_EBBRR(r4)
  816. ld r6, VCPU_BESCR(r4)
  817. lwz r7, VCPU_GUEST_PID(r4)
  818. ld r8, VCPU_WORT(r4)
  819. mtspr SPRN_EBBRR, r5
  820. mtspr SPRN_BESCR, r6
  821. mtspr SPRN_PID, r7
  822. mtspr SPRN_WORT, r8
  823. BEGIN_FTR_SECTION
  824. PPC_INVALIDATE_ERAT
  825. END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
  826. BEGIN_FTR_SECTION
  827. /* POWER8-only registers */
  828. ld r5, VCPU_TCSCR(r4)
  829. ld r6, VCPU_ACOP(r4)
  830. ld r7, VCPU_CSIGR(r4)
  831. ld r8, VCPU_TACR(r4)
  832. mtspr SPRN_TCSCR, r5
  833. mtspr SPRN_ACOP, r6
  834. mtspr SPRN_CSIGR, r7
  835. mtspr SPRN_TACR, r8
  836. FTR_SECTION_ELSE
  837. /* POWER9-only registers */
  838. ld r5, VCPU_TID(r4)
  839. ld r6, VCPU_PSSCR(r4)
  840. oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
  841. mtspr SPRN_TIDR, r5
  842. mtspr SPRN_PSSCR, r6
  843. ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
  844. 8:
  845. /*
  846. * Set the decrementer to the guest decrementer.
  847. */
  848. ld r8,VCPU_DEC_EXPIRES(r4)
  849. /* r8 is a host timebase value here, convert to guest TB */
  850. ld r5,HSTATE_KVM_VCORE(r13)
  851. ld r6,VCORE_TB_OFFSET(r5)
  852. add r8,r8,r6
  853. mftb r7
  854. subf r3,r7,r8
  855. mtspr SPRN_DEC,r3
  856. std r3,VCPU_DEC(r4)
  857. ld r5, VCPU_SPRG0(r4)
  858. ld r6, VCPU_SPRG1(r4)
  859. ld r7, VCPU_SPRG2(r4)
  860. ld r8, VCPU_SPRG3(r4)
  861. mtspr SPRN_SPRG0, r5
  862. mtspr SPRN_SPRG1, r6
  863. mtspr SPRN_SPRG2, r7
  864. mtspr SPRN_SPRG3, r8
  865. /* Load up DAR and DSISR */
  866. ld r5, VCPU_DAR(r4)
  867. lwz r6, VCPU_DSISR(r4)
  868. mtspr SPRN_DAR, r5
  869. mtspr SPRN_DSISR, r6
  870. /* Restore AMR and UAMOR, set AMOR to all 1s */
  871. ld r5,VCPU_AMR(r4)
  872. ld r6,VCPU_UAMOR(r4)
  873. li r7,-1
  874. mtspr SPRN_AMR,r5
  875. mtspr SPRN_UAMOR,r6
  876. mtspr SPRN_AMOR,r7
  877. /* Restore state of CTRL run bit; assume 1 on entry */
  878. lwz r5,VCPU_CTRL(r4)
  879. andi. r5,r5,1
  880. bne 4f
  881. mfspr r6,SPRN_CTRLF
  882. clrrdi r6,r6,1
  883. mtspr SPRN_CTRLT,r6
  884. 4:
  885. /* Secondary threads wait for primary to have done partition switch */
  886. ld r5, HSTATE_KVM_VCORE(r13)
  887. lbz r6, HSTATE_PTID(r13)
  888. cmpwi r6, 0
  889. beq 21f
  890. lbz r0, VCORE_IN_GUEST(r5)
  891. cmpwi r0, 0
  892. bne 21f
  893. HMT_LOW
  894. 20: lwz r3, VCORE_ENTRY_EXIT(r5)
  895. cmpwi r3, 0x100
  896. bge no_switch_exit
  897. lbz r0, VCORE_IN_GUEST(r5)
  898. cmpwi r0, 0
  899. beq 20b
  900. HMT_MEDIUM
  901. 21:
  902. /* Set LPCR. */
  903. ld r8,VCORE_LPCR(r5)
  904. mtspr SPRN_LPCR,r8
  905. isync
  906. /* Check if HDEC expires soon */
  907. mfspr r3, SPRN_HDEC
  908. EXTEND_HDEC(r3)
  909. cmpdi r3, 512 /* 1 microsecond */
  910. blt hdec_soon
  911. #ifdef CONFIG_KVM_XICS
  912. /* We are entering the guest on that thread, push VCPU to XIVE */
  913. ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
  914. cmpldi cr0, r10, r0
  915. beq no_xive
  916. ld r11, VCPU_XIVE_SAVED_STATE(r4)
  917. li r9, TM_QW1_OS
  918. stdcix r11,r9,r10
  919. eieio
  920. lwz r11, VCPU_XIVE_CAM_WORD(r4)
  921. li r9, TM_QW1_OS + TM_WORD2
  922. stwcix r11,r9,r10
  923. li r9, 1
  924. stw r9, VCPU_XIVE_PUSHED(r4)
  925. no_xive:
  926. #endif /* CONFIG_KVM_XICS */
  927. deliver_guest_interrupt:
  928. ld r6, VCPU_CTR(r4)
  929. ld r7, VCPU_XER(r4)
  930. mtctr r6
  931. mtxer r7
  932. kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
  933. ld r10, VCPU_PC(r4)
  934. ld r11, VCPU_MSR(r4)
  935. ld r6, VCPU_SRR0(r4)
  936. ld r7, VCPU_SRR1(r4)
  937. mtspr SPRN_SRR0, r6
  938. mtspr SPRN_SRR1, r7
  939. /* r11 = vcpu->arch.msr & ~MSR_HV */
  940. rldicl r11, r11, 63 - MSR_HV_LG, 1
  941. rotldi r11, r11, 1 + MSR_HV_LG
  942. ori r11, r11, MSR_ME
  943. /* Check if we can deliver an external or decrementer interrupt now */
  944. ld r0, VCPU_PENDING_EXC(r4)
  945. rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
  946. cmpdi cr1, r0, 0
  947. andi. r8, r11, MSR_EE
  948. mfspr r8, SPRN_LPCR
  949. /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
  950. rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
  951. mtspr SPRN_LPCR, r8
  952. isync
  953. beq 5f
  954. li r0, BOOK3S_INTERRUPT_EXTERNAL
  955. bne cr1, 12f
  956. mfspr r0, SPRN_DEC
  957. BEGIN_FTR_SECTION
  958. /* On POWER9 check whether the guest has large decrementer enabled */
  959. andis. r8, r8, LPCR_LD@h
  960. bne 15f
  961. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
  962. extsw r0, r0
  963. 15: cmpdi r0, 0
  964. li r0, BOOK3S_INTERRUPT_DECREMENTER
  965. bge 5f
  966. 12: mtspr SPRN_SRR0, r10
  967. mr r10,r0
  968. mtspr SPRN_SRR1, r11
  969. mr r9, r4
  970. bl kvmppc_msr_interrupt
  971. 5:
  972. /*
  973. * Required state:
  974. * R4 = vcpu
  975. * R10: value for HSRR0
  976. * R11: value for HSRR1
  977. * R13 = PACA
  978. */
  979. fast_guest_return:
  980. li r0,0
  981. stb r0,VCPU_CEDED(r4) /* cancel cede */
  982. mtspr SPRN_HSRR0,r10
  983. mtspr SPRN_HSRR1,r11
  984. /* Activate guest mode, so faults get handled by KVM */
  985. li r9, KVM_GUEST_MODE_GUEST_HV
  986. stb r9, HSTATE_IN_GUEST(r13)
  987. #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
  988. /* Accumulate timing */
  989. addi r3, r4, VCPU_TB_GUEST
  990. bl kvmhv_accumulate_time
  991. #endif
  992. /* Enter guest */
  993. BEGIN_FTR_SECTION
  994. ld r5, VCPU_CFAR(r4)
  995. mtspr SPRN_CFAR, r5
  996. END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
  997. BEGIN_FTR_SECTION
  998. ld r0, VCPU_PPR(r4)
  999. END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
  1000. ld r5, VCPU_LR(r4)
  1001. lwz r6, VCPU_CR(r4)
  1002. mtlr r5
  1003. mtcr r6
  1004. ld r1, VCPU_GPR(R1)(r4)
  1005. ld r2, VCPU_GPR(R2)(r4)
  1006. ld r3, VCPU_GPR(R3)(r4)
  1007. ld r5, VCPU_GPR(R5)(r4)
  1008. ld r6, VCPU_GPR(R6)(r4)
  1009. ld r7, VCPU_GPR(R7)(r4)
  1010. ld r8, VCPU_GPR(R8)(r4)
  1011. ld r9, VCPU_GPR(R9)(r4)
  1012. ld r10, VCPU_GPR(R10)(r4)
  1013. ld r11, VCPU_GPR(R11)(r4)
  1014. ld r12, VCPU_GPR(R12)(r4)
  1015. ld r13, VCPU_GPR(R13)(r4)
  1016. BEGIN_FTR_SECTION
  1017. mtspr SPRN_PPR, r0
  1018. END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
  1019. ld r0, VCPU_GPR(R0)(r4)
  1020. ld r4, VCPU_GPR(R4)(r4)
  1021. hrfid
  1022. b .
  1023. secondary_too_late:
  1024. li r12, 0
  1025. cmpdi r4, 0
  1026. beq 11f
  1027. stw r12, VCPU_TRAP(r4)
  1028. #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
  1029. addi r3, r4, VCPU_TB_RMEXIT
  1030. bl kvmhv_accumulate_time
  1031. #endif
  1032. 11: b kvmhv_switch_to_host
  1033. no_switch_exit:
  1034. HMT_MEDIUM
  1035. li r12, 0
  1036. b 12f
  1037. hdec_soon:
  1038. li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
  1039. 12: stw r12, VCPU_TRAP(r4)
  1040. mr r9, r4
  1041. #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
  1042. addi r3, r4, VCPU_TB_RMEXIT
  1043. bl kvmhv_accumulate_time
  1044. #endif
  1045. b guest_exit_cont
  1046. /******************************************************************************
  1047. * *
  1048. * Exit code *
  1049. * *
  1050. *****************************************************************************/
  1051. /*
  1052. * We come here from the first-level interrupt handlers.
  1053. */
  1054. .globl kvmppc_interrupt_hv
  1055. kvmppc_interrupt_hv:
  1056. /*
  1057. * Register contents:
  1058. * R12 = (guest CR << 32) | interrupt vector
  1059. * R13 = PACA
  1060. * guest R12 saved in shadow VCPU SCRATCH0
  1061. * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE
  1062. * guest R13 saved in SPRN_SCRATCH0
  1063. */
  1064. std r9, HSTATE_SCRATCH2(r13)
  1065. lbz r9, HSTATE_IN_GUEST(r13)
  1066. cmpwi r9, KVM_GUEST_MODE_HOST_HV
  1067. beq kvmppc_bad_host_intr
  1068. #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
  1069. cmpwi r9, KVM_GUEST_MODE_GUEST
  1070. ld r9, HSTATE_SCRATCH2(r13)
  1071. beq kvmppc_interrupt_pr
  1072. #endif
  1073. /* We're now back in the host but in guest MMU context */
  1074. li r9, KVM_GUEST_MODE_HOST_HV
  1075. stb r9, HSTATE_IN_GUEST(r13)
  1076. ld r9, HSTATE_KVM_VCPU(r13)
  1077. /* Save registers */
  1078. std r0, VCPU_GPR(R0)(r9)
  1079. std r1, VCPU_GPR(R1)(r9)
  1080. std r2, VCPU_GPR(R2)(r9)
  1081. std r3, VCPU_GPR(R3)(r9)
  1082. std r4, VCPU_GPR(R4)(r9)
  1083. std r5, VCPU_GPR(R5)(r9)
  1084. std r6, VCPU_GPR(R6)(r9)
  1085. std r7, VCPU_GPR(R7)(r9)
  1086. std r8, VCPU_GPR(R8)(r9)
  1087. ld r0, HSTATE_SCRATCH2(r13)
  1088. std r0, VCPU_GPR(R9)(r9)
  1089. std r10, VCPU_GPR(R10)(r9)
  1090. std r11, VCPU_GPR(R11)(r9)
  1091. ld r3, HSTATE_SCRATCH0(r13)
  1092. std r3, VCPU_GPR(R12)(r9)
  1093. /* CR is in the high half of r12 */
  1094. srdi r4, r12, 32
  1095. stw r4, VCPU_CR(r9)
  1096. BEGIN_FTR_SECTION
  1097. ld r3, HSTATE_CFAR(r13)
  1098. std r3, VCPU_CFAR(r9)
  1099. END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
  1100. BEGIN_FTR_SECTION
  1101. ld r4, HSTATE_PPR(r13)
  1102. std r4, VCPU_PPR(r9)
  1103. END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
  1104. /* Restore R1/R2 so we can handle faults */
  1105. ld r1, HSTATE_HOST_R1(r13)
  1106. ld r2, PACATOC(r13)
  1107. mfspr r10, SPRN_SRR0
  1108. mfspr r11, SPRN_SRR1
  1109. std r10, VCPU_SRR0(r9)
  1110. std r11, VCPU_SRR1(r9)
  1111. /* trap is in the low half of r12, clear CR from the high half */
  1112. clrldi r12, r12, 32
  1113. andi. r0, r12, 2 /* need to read HSRR0/1? */
  1114. beq 1f
  1115. mfspr r10, SPRN_HSRR0
  1116. mfspr r11, SPRN_HSRR1
  1117. clrrdi r12, r12, 2
  1118. 1: std r10, VCPU_PC(r9)
  1119. std r11, VCPU_MSR(r9)
  1120. GET_SCRATCH0(r3)
  1121. mflr r4
  1122. std r3, VCPU_GPR(R13)(r9)
  1123. std r4, VCPU_LR(r9)
  1124. stw r12,VCPU_TRAP(r9)
  1125. #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
  1126. addi r3, r9, VCPU_TB_RMINTR
  1127. mr r4, r9
  1128. bl kvmhv_accumulate_time
  1129. ld r5, VCPU_GPR(R5)(r9)
  1130. ld r6, VCPU_GPR(R6)(r9)
  1131. ld r7, VCPU_GPR(R7)(r9)
  1132. ld r8, VCPU_GPR(R8)(r9)
  1133. #endif
  1134. /* Save HEIR (HV emulation assist reg) in emul_inst
  1135. if this is an HEI (HV emulation interrupt, e40) */
  1136. li r3,KVM_INST_FETCH_FAILED
  1137. stw r3,VCPU_LAST_INST(r9)
  1138. cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
  1139. bne 11f
  1140. mfspr r3,SPRN_HEIR
  1141. 11: stw r3,VCPU_HEIR(r9)
  1142. /* these are volatile across C function calls */
  1143. #ifdef CONFIG_RELOCATABLE
  1144. ld r3, HSTATE_SCRATCH1(r13)
  1145. mtctr r3
  1146. #else
  1147. mfctr r3
  1148. #endif
  1149. mfxer r4
  1150. std r3, VCPU_CTR(r9)
  1151. std r4, VCPU_XER(r9)
  1152. /* If this is a page table miss then see if it's theirs or ours */
  1153. cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
  1154. beq kvmppc_hdsi
  1155. cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
  1156. beq kvmppc_hisi
  1157. /* See if this is a leftover HDEC interrupt */
  1158. cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
  1159. bne 2f
  1160. mfspr r3,SPRN_HDEC
  1161. cmpwi r3,0
  1162. mr r4,r9
  1163. bge fast_guest_return
  1164. 2:
  1165. /* See if this is an hcall we can handle in real mode */
  1166. cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
  1167. beq hcall_try_real_mode
  1168. /* Hypervisor doorbell - exit only if host IPI flag set */
  1169. cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
  1170. bne 3f
  1171. lbz r0, HSTATE_HOST_IPI(r13)
  1172. cmpwi r0, 0
  1173. beq 4f
  1174. b guest_exit_cont
  1175. 3:
  1176. /* External interrupt ? */
  1177. cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
  1178. bne+ guest_exit_cont
  1179. /* External interrupt, first check for host_ipi. If this is
  1180. * set, we know the host wants us out so let's do it now
  1181. */
  1182. bl kvmppc_read_intr
  1183. /*
  1184. * Restore the active volatile registers after returning from
  1185. * a C function.
  1186. */
  1187. ld r9, HSTATE_KVM_VCPU(r13)
  1188. li r12, BOOK3S_INTERRUPT_EXTERNAL
  1189. /*
  1190. * kvmppc_read_intr return codes:
  1191. *
  1192. * Exit to host (r3 > 0)
  1193. * 1 An interrupt is pending that needs to be handled by the host
  1194. * Exit guest and return to host by branching to guest_exit_cont
  1195. *
  1196. * 2 Passthrough that needs completion in the host
  1197. * Exit guest and return to host by branching to guest_exit_cont
  1198. * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
  1199. * to indicate to the host to complete handling the interrupt
  1200. *
  1201. * Before returning to guest, we check if any CPU is heading out
  1202. * to the host and if so, we head out also. If no CPUs are heading
  1203. * check return values <= 0.
  1204. *
  1205. * Return to guest (r3 <= 0)
  1206. * 0 No external interrupt is pending
  1207. * -1 A guest wakeup IPI (which has now been cleared)
  1208. * In either case, we return to guest to deliver any pending
  1209. * guest interrupts.
  1210. *
  1211. * -2 A PCI passthrough external interrupt was handled
  1212. * (interrupt was delivered directly to guest)
  1213. * Return to guest to deliver any pending guest interrupts.
  1214. */
  1215. cmpdi r3, 1
  1216. ble 1f
  1217. /* Return code = 2 */
  1218. li r12, BOOK3S_INTERRUPT_HV_RM_HARD
  1219. stw r12, VCPU_TRAP(r9)
  1220. b guest_exit_cont
  1221. 1: /* Return code <= 1 */
  1222. cmpdi r3, 0
  1223. bgt guest_exit_cont
  1224. /* Return code <= 0 */
  1225. 4: ld r5, HSTATE_KVM_VCORE(r13)
  1226. lwz r0, VCORE_ENTRY_EXIT(r5)
  1227. cmpwi r0, 0x100
  1228. mr r4, r9
  1229. blt deliver_guest_interrupt
  1230. guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
  1231. #ifdef CONFIG_KVM_XICS
  1232. /* We are exiting, pull the VP from the XIVE */
  1233. lwz r0, VCPU_XIVE_PUSHED(r9)
  1234. cmpwi cr0, r0, 0
  1235. beq 1f
  1236. li r7, TM_SPC_PULL_OS_CTX
  1237. li r6, TM_QW1_OS
  1238. mfmsr r0
  1239. andi. r0, r0, MSR_IR /* in real mode? */
  1240. beq 2f
  1241. ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
  1242. cmpldi cr0, r10, 0
  1243. beq 1f
  1244. /* First load to pull the context, we ignore the value */
  1245. lwzx r11, r7, r10
  1246. eieio
  1247. /* Second load to recover the context state (Words 0 and 1) */
  1248. ldx r11, r6, r10
  1249. b 3f
  1250. 2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
  1251. cmpldi cr0, r10, 0
  1252. beq 1f
  1253. /* First load to pull the context, we ignore the value */
  1254. lwzcix r11, r7, r10
  1255. eieio
  1256. /* Second load to recover the context state (Words 0 and 1) */
  1257. ldcix r11, r6, r10
  1258. 3: std r11, VCPU_XIVE_SAVED_STATE(r9)
  1259. /* Fixup some of the state for the next load */
  1260. li r10, 0
  1261. li r0, 0xff
  1262. stw r10, VCPU_XIVE_PUSHED(r9)
  1263. stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
  1264. stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
  1265. 1:
  1266. #endif /* CONFIG_KVM_XICS */
  1267. /* Save more register state */
  1268. mfdar r6
  1269. mfdsisr r7
  1270. std r6, VCPU_DAR(r9)
  1271. stw r7, VCPU_DSISR(r9)
  1272. /* don't overwrite fault_dar/fault_dsisr if HDSI */
  1273. cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
  1274. beq mc_cont
  1275. std r6, VCPU_FAULT_DAR(r9)
  1276. stw r7, VCPU_FAULT_DSISR(r9)
  1277. /* See if it is a machine check */
  1278. cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
  1279. beq machine_check_realmode
  1280. mc_cont:
  1281. #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
  1282. addi r3, r9, VCPU_TB_RMEXIT
  1283. mr r4, r9
  1284. bl kvmhv_accumulate_time
  1285. #endif
  1286. mr r3, r12
  1287. /* Increment exit count, poke other threads to exit */
  1288. bl kvmhv_commence_exit
  1289. nop
  1290. ld r9, HSTATE_KVM_VCPU(r13)
  1291. lwz r12, VCPU_TRAP(r9)
  1292. /* Stop others sending VCPU interrupts to this physical CPU */
  1293. li r0, -1
  1294. stw r0, VCPU_CPU(r9)
  1295. stw r0, VCPU_THREAD_CPU(r9)
  1296. /* Save guest CTRL register, set runlatch to 1 */
  1297. mfspr r6,SPRN_CTRLF
  1298. stw r6,VCPU_CTRL(r9)
  1299. andi. r0,r6,1
  1300. bne 4f
  1301. ori r6,r6,1
  1302. mtspr SPRN_CTRLT,r6
  1303. 4:
  1304. /* Read the guest SLB and save it away */
  1305. ld r5, VCPU_KVM(r9)
  1306. lbz r0, KVM_RADIX(r5)
  1307. cmpwi r0, 0
  1308. li r5, 0
  1309. bne 3f /* for radix, save 0 entries */
  1310. lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
  1311. mtctr r0
  1312. li r6,0
  1313. addi r7,r9,VCPU_SLB
  1314. 1: slbmfee r8,r6
  1315. andis. r0,r8,SLB_ESID_V@h
  1316. beq 2f
  1317. add r8,r8,r6 /* put index in */
  1318. slbmfev r3,r6
  1319. std r8,VCPU_SLB_E(r7)
  1320. std r3,VCPU_SLB_V(r7)
  1321. addi r7,r7,VCPU_SLB_SIZE
  1322. addi r5,r5,1
  1323. 2: addi r6,r6,1
  1324. bdnz 1b
  1325. 3: stw r5,VCPU_SLB_MAX(r9)
  1326. /*
  1327. * Save the guest PURR/SPURR
  1328. */
  1329. mfspr r5,SPRN_PURR
  1330. mfspr r6,SPRN_SPURR
  1331. ld r7,VCPU_PURR(r9)
  1332. ld r8,VCPU_SPURR(r9)
  1333. std r5,VCPU_PURR(r9)
  1334. std r6,VCPU_SPURR(r9)
  1335. subf r5,r7,r5
  1336. subf r6,r8,r6
  1337. /*
  1338. * Restore host PURR/SPURR and add guest times
  1339. * so that the time in the guest gets accounted.
  1340. */
  1341. ld r3,HSTATE_PURR(r13)
  1342. ld r4,HSTATE_SPURR(r13)
  1343. add r3,r3,r5
  1344. add r4,r4,r6
  1345. mtspr SPRN_PURR,r3
  1346. mtspr SPRN_SPURR,r4
  1347. /* Save DEC */
  1348. ld r3, HSTATE_KVM_VCORE(r13)
  1349. mfspr r5,SPRN_DEC
  1350. mftb r6
  1351. /* On P9, if the guest has large decr enabled, don't sign extend */
  1352. BEGIN_FTR_SECTION
  1353. ld r4, VCORE_LPCR(r3)
  1354. andis. r4, r4, LPCR_LD@h
  1355. bne 16f
  1356. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
  1357. extsw r5,r5
  1358. 16: add r5,r5,r6
  1359. /* r5 is a guest timebase value here, convert to host TB */
  1360. ld r4,VCORE_TB_OFFSET(r3)
  1361. subf r5,r4,r5
  1362. std r5,VCPU_DEC_EXPIRES(r9)
  1363. BEGIN_FTR_SECTION
  1364. b 8f
  1365. END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
  1366. /* Save POWER8-specific registers */
  1367. mfspr r5, SPRN_IAMR
  1368. mfspr r6, SPRN_PSPB
  1369. mfspr r7, SPRN_FSCR
  1370. std r5, VCPU_IAMR(r9)
  1371. stw r6, VCPU_PSPB(r9)
  1372. std r7, VCPU_FSCR(r9)
  1373. mfspr r5, SPRN_IC
  1374. mfspr r7, SPRN_TAR
  1375. std r5, VCPU_IC(r9)
  1376. std r7, VCPU_TAR(r9)
  1377. mfspr r8, SPRN_EBBHR
  1378. std r8, VCPU_EBBHR(r9)
  1379. mfspr r5, SPRN_EBBRR
  1380. mfspr r6, SPRN_BESCR
  1381. mfspr r7, SPRN_PID
  1382. mfspr r8, SPRN_WORT
  1383. std r5, VCPU_EBBRR(r9)
  1384. std r6, VCPU_BESCR(r9)
  1385. stw r7, VCPU_GUEST_PID(r9)
  1386. std r8, VCPU_WORT(r9)
  1387. BEGIN_FTR_SECTION
  1388. mfspr r5, SPRN_TCSCR
  1389. mfspr r6, SPRN_ACOP
  1390. mfspr r7, SPRN_CSIGR
  1391. mfspr r8, SPRN_TACR
  1392. std r5, VCPU_TCSCR(r9)
  1393. std r6, VCPU_ACOP(r9)
  1394. std r7, VCPU_CSIGR(r9)
  1395. std r8, VCPU_TACR(r9)
  1396. FTR_SECTION_ELSE
  1397. mfspr r5, SPRN_TIDR
  1398. mfspr r6, SPRN_PSSCR
  1399. std r5, VCPU_TID(r9)
  1400. rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
  1401. rotldi r6, r6, 60
  1402. std r6, VCPU_PSSCR(r9)
  1403. ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
  1404. /*
  1405. * Restore various registers to 0, where non-zero values
  1406. * set by the guest could disrupt the host.
  1407. */
  1408. li r0, 0
  1409. mtspr SPRN_PSPB, r0
  1410. mtspr SPRN_WORT, r0
  1411. BEGIN_FTR_SECTION
  1412. mtspr SPRN_IAMR, r0
  1413. mtspr SPRN_TCSCR, r0
  1414. /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
  1415. li r0, 1
  1416. sldi r0, r0, 31
  1417. mtspr SPRN_MMCRS, r0
  1418. END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
  1419. 8:
  1420. /* Save and reset AMR and UAMOR before turning on the MMU */
  1421. mfspr r5,SPRN_AMR
  1422. mfspr r6,SPRN_UAMOR
  1423. std r5,VCPU_AMR(r9)
  1424. std r6,VCPU_UAMOR(r9)
  1425. li r6,0
  1426. mtspr SPRN_AMR,r6
  1427. mtspr SPRN_UAMOR, r6
  1428. /* Switch DSCR back to host value */
  1429. mfspr r8, SPRN_DSCR
  1430. ld r7, HSTATE_DSCR(r13)
  1431. std r8, VCPU_DSCR(r9)
  1432. mtspr SPRN_DSCR, r7
  1433. /* Save non-volatile GPRs */
  1434. std r14, VCPU_GPR(R14)(r9)
  1435. std r15, VCPU_GPR(R15)(r9)
  1436. std r16, VCPU_GPR(R16)(r9)
  1437. std r17, VCPU_GPR(R17)(r9)
  1438. std r18, VCPU_GPR(R18)(r9)
  1439. std r19, VCPU_GPR(R19)(r9)
  1440. std r20, VCPU_GPR(R20)(r9)
  1441. std r21, VCPU_GPR(R21)(r9)
  1442. std r22, VCPU_GPR(R22)(r9)
  1443. std r23, VCPU_GPR(R23)(r9)
  1444. std r24, VCPU_GPR(R24)(r9)
  1445. std r25, VCPU_GPR(R25)(r9)
  1446. std r26, VCPU_GPR(R26)(r9)
  1447. std r27, VCPU_GPR(R27)(r9)
  1448. std r28, VCPU_GPR(R28)(r9)
  1449. std r29, VCPU_GPR(R29)(r9)
  1450. std r30, VCPU_GPR(R30)(r9)
  1451. std r31, VCPU_GPR(R31)(r9)
  1452. /* Save SPRGs */
  1453. mfspr r3, SPRN_SPRG0
  1454. mfspr r4, SPRN_SPRG1
  1455. mfspr r5, SPRN_SPRG2
  1456. mfspr r6, SPRN_SPRG3
  1457. std r3, VCPU_SPRG0(r9)
  1458. std r4, VCPU_SPRG1(r9)
  1459. std r5, VCPU_SPRG2(r9)
  1460. std r6, VCPU_SPRG3(r9)
  1461. /* save FP state */
  1462. mr r3, r9
  1463. bl kvmppc_save_fp
  1464. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  1465. BEGIN_FTR_SECTION
  1466. bl kvmppc_save_tm
  1467. END_FTR_SECTION_IFSET(CPU_FTR_TM)
  1468. #endif
  1469. /* Increment yield count if they have a VPA */
  1470. ld r8, VCPU_VPA(r9) /* do they have a VPA? */
  1471. cmpdi r8, 0
  1472. beq 25f
  1473. li r4, LPPACA_YIELDCOUNT
  1474. LWZX_BE r3, r8, r4
  1475. addi r3, r3, 1
  1476. STWX_BE r3, r8, r4
  1477. li r3, 1
  1478. stb r3, VCPU_VPA_DIRTY(r9)
  1479. 25:
  1480. /* Save PMU registers if requested */
  1481. /* r8 and cr0.eq are live here */
  1482. BEGIN_FTR_SECTION
  1483. /*
  1484. * POWER8 seems to have a hardware bug where setting
  1485. * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
  1486. * when some counters are already negative doesn't seem
  1487. * to cause a performance monitor alert (and hence interrupt).
  1488. * The effect of this is that when saving the PMU state,
  1489. * if there is no PMU alert pending when we read MMCR0
  1490. * before freezing the counters, but one becomes pending
  1491. * before we read the counters, we lose it.
  1492. * To work around this, we need a way to freeze the counters
  1493. * before reading MMCR0. Normally, freezing the counters
  1494. * is done by writing MMCR0 (to set MMCR0[FC]) which
  1495. * unavoidably writes MMCR0[PMA0] as well. On POWER8,
  1496. * we can also freeze the counters using MMCR2, by writing
  1497. * 1s to all the counter freeze condition bits (there are
  1498. * 9 bits each for 6 counters).
  1499. */
  1500. li r3, -1 /* set all freeze bits */
  1501. clrrdi r3, r3, 10
  1502. mfspr r10, SPRN_MMCR2
  1503. mtspr SPRN_MMCR2, r3
  1504. isync
  1505. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
  1506. li r3, 1
  1507. sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
  1508. mfspr r4, SPRN_MMCR0 /* save MMCR0 */
  1509. mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
  1510. mfspr r6, SPRN_MMCRA
  1511. /* Clear MMCRA in order to disable SDAR updates */
  1512. li r7, 0
  1513. mtspr SPRN_MMCRA, r7
  1514. isync
  1515. beq 21f /* if no VPA, save PMU stuff anyway */
  1516. lbz r7, LPPACA_PMCINUSE(r8)
  1517. cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
  1518. bne 21f
  1519. std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
  1520. b 22f
  1521. 21: mfspr r5, SPRN_MMCR1
  1522. mfspr r7, SPRN_SIAR
  1523. mfspr r8, SPRN_SDAR
  1524. std r4, VCPU_MMCR(r9)
  1525. std r5, VCPU_MMCR + 8(r9)
  1526. std r6, VCPU_MMCR + 16(r9)
  1527. BEGIN_FTR_SECTION
  1528. std r10, VCPU_MMCR + 24(r9)
  1529. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
  1530. std r7, VCPU_SIAR(r9)
  1531. std r8, VCPU_SDAR(r9)
  1532. mfspr r3, SPRN_PMC1
  1533. mfspr r4, SPRN_PMC2
  1534. mfspr r5, SPRN_PMC3
  1535. mfspr r6, SPRN_PMC4
  1536. mfspr r7, SPRN_PMC5
  1537. mfspr r8, SPRN_PMC6
  1538. stw r3, VCPU_PMC(r9)
  1539. stw r4, VCPU_PMC + 4(r9)
  1540. stw r5, VCPU_PMC + 8(r9)
  1541. stw r6, VCPU_PMC + 12(r9)
  1542. stw r7, VCPU_PMC + 16(r9)
  1543. stw r8, VCPU_PMC + 20(r9)
  1544. BEGIN_FTR_SECTION
  1545. mfspr r5, SPRN_SIER
  1546. std r5, VCPU_SIER(r9)
  1547. BEGIN_FTR_SECTION_NESTED(96)
  1548. mfspr r6, SPRN_SPMC1
  1549. mfspr r7, SPRN_SPMC2
  1550. mfspr r8, SPRN_MMCRS
  1551. stw r6, VCPU_PMC + 24(r9)
  1552. stw r7, VCPU_PMC + 28(r9)
  1553. std r8, VCPU_MMCR + 32(r9)
  1554. lis r4, 0x8000
  1555. mtspr SPRN_MMCRS, r4
  1556. END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
  1557. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
  1558. 22:
  1559. /* Clear out SLB */
  1560. li r5,0
  1561. slbmte r5,r5
  1562. slbia
  1563. ptesync
  1564. /* Restore host values of some registers */
  1565. BEGIN_FTR_SECTION
  1566. ld r5, STACK_SLOT_CIABR(r1)
  1567. ld r6, STACK_SLOT_DAWR(r1)
  1568. ld r7, STACK_SLOT_DAWRX(r1)
  1569. mtspr SPRN_CIABR, r5
  1570. mtspr SPRN_DAWR, r6
  1571. mtspr SPRN_DAWRX, r7
  1572. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
  1573. BEGIN_FTR_SECTION
  1574. ld r5, STACK_SLOT_TID(r1)
  1575. ld r6, STACK_SLOT_PSSCR(r1)
  1576. ld r7, STACK_SLOT_PID(r1)
  1577. ld r8, STACK_SLOT_IAMR(r1)
  1578. mtspr SPRN_TIDR, r5
  1579. mtspr SPRN_PSSCR, r6
  1580. mtspr SPRN_PID, r7
  1581. mtspr SPRN_IAMR, r8
  1582. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
  1583. BEGIN_FTR_SECTION
  1584. PPC_INVALIDATE_ERAT
  1585. END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
  1586. /*
  1587. * POWER7/POWER8 guest -> host partition switch code.
  1588. * We don't have to lock against tlbies but we do
  1589. * have to coordinate the hardware threads.
  1590. */
  1591. kvmhv_switch_to_host:
  1592. /* Secondary threads wait for primary to do partition switch */
  1593. ld r5,HSTATE_KVM_VCORE(r13)
  1594. ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
  1595. lbz r3,HSTATE_PTID(r13)
  1596. cmpwi r3,0
  1597. beq 15f
  1598. HMT_LOW
  1599. 13: lbz r3,VCORE_IN_GUEST(r5)
  1600. cmpwi r3,0
  1601. bne 13b
  1602. HMT_MEDIUM
  1603. b 16f
  1604. /* Primary thread waits for all the secondaries to exit guest */
  1605. 15: lwz r3,VCORE_ENTRY_EXIT(r5)
  1606. rlwinm r0,r3,32-8,0xff
  1607. clrldi r3,r3,56
  1608. cmpw r3,r0
  1609. bne 15b
  1610. isync
  1611. /* Did we actually switch to the guest at all? */
  1612. lbz r6, VCORE_IN_GUEST(r5)
  1613. cmpwi r6, 0
  1614. beq 19f
  1615. /* Primary thread switches back to host partition */
  1616. lwz r7,KVM_HOST_LPID(r4)
  1617. BEGIN_FTR_SECTION
  1618. ld r6,KVM_HOST_SDR1(r4)
  1619. li r8,LPID_RSVD /* switch to reserved LPID */
  1620. mtspr SPRN_LPID,r8
  1621. ptesync
  1622. mtspr SPRN_SDR1,r6 /* switch to host page table */
  1623. END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
  1624. mtspr SPRN_LPID,r7
  1625. isync
  1626. BEGIN_FTR_SECTION
  1627. /* DPDES and VTB are shared between threads */
  1628. mfspr r7, SPRN_DPDES
  1629. mfspr r8, SPRN_VTB
  1630. std r7, VCORE_DPDES(r5)
  1631. std r8, VCORE_VTB(r5)
  1632. /* clear DPDES so we don't get guest doorbells in the host */
  1633. li r8, 0
  1634. mtspr SPRN_DPDES, r8
  1635. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
  1636. /* If HMI, call kvmppc_realmode_hmi_handler() */
  1637. cmpwi r12, BOOK3S_INTERRUPT_HMI
  1638. bne 27f
  1639. bl kvmppc_realmode_hmi_handler
  1640. nop
  1641. li r12, BOOK3S_INTERRUPT_HMI
  1642. /*
  1643. * At this point kvmppc_realmode_hmi_handler would have resync-ed
  1644. * the TB. Hence it is not required to subtract guest timebase
  1645. * offset from timebase. So, skip it.
  1646. *
  1647. * Also, do not call kvmppc_subcore_exit_guest() because it has
  1648. * been invoked as part of kvmppc_realmode_hmi_handler().
  1649. */
  1650. b 30f
  1651. 27:
  1652. /* Subtract timebase offset from timebase */
  1653. ld r8,VCORE_TB_OFFSET(r5)
  1654. cmpdi r8,0
  1655. beq 17f
  1656. mftb r6 /* current guest timebase */
  1657. subf r8,r8,r6
  1658. mtspr SPRN_TBU40,r8 /* update upper 40 bits */
  1659. mftb r7 /* check if lower 24 bits overflowed */
  1660. clrldi r6,r6,40
  1661. clrldi r7,r7,40
  1662. cmpld r7,r6
  1663. bge 17f
  1664. addis r8,r8,0x100 /* if so, increment upper 40 bits */
  1665. mtspr SPRN_TBU40,r8
  1666. 17: bl kvmppc_subcore_exit_guest
  1667. nop
  1668. 30: ld r5,HSTATE_KVM_VCORE(r13)
  1669. ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
  1670. /* Reset PCR */
  1671. ld r0, VCORE_PCR(r5)
  1672. cmpdi r0, 0
  1673. beq 18f
  1674. li r0, 0
  1675. mtspr SPRN_PCR, r0
  1676. 18:
  1677. /* Signal secondary CPUs to continue */
  1678. stb r0,VCORE_IN_GUEST(r5)
  1679. 19: lis r8,0x7fff /* MAX_INT@h */
  1680. mtspr SPRN_HDEC,r8
  1681. 16: ld r8,KVM_HOST_LPCR(r4)
  1682. mtspr SPRN_LPCR,r8
  1683. isync
  1684. /* load host SLB entries */
  1685. BEGIN_MMU_FTR_SECTION
  1686. b 0f
  1687. END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
  1688. ld r8,PACA_SLBSHADOWPTR(r13)
  1689. .rept SLB_NUM_BOLTED
  1690. li r3, SLBSHADOW_SAVEAREA
  1691. LDX_BE r5, r8, r3
  1692. addi r3, r3, 8
  1693. LDX_BE r6, r8, r3
  1694. andis. r7,r5,SLB_ESID_V@h
  1695. beq 1f
  1696. slbmte r6,r5
  1697. 1: addi r8,r8,16
  1698. .endr
  1699. 0:
  1700. #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
  1701. /* Finish timing, if we have a vcpu */
  1702. ld r4, HSTATE_KVM_VCPU(r13)
  1703. cmpdi r4, 0
  1704. li r3, 0
  1705. beq 2f
  1706. bl kvmhv_accumulate_time
  1707. 2:
  1708. #endif
  1709. /* Unset guest mode */
  1710. li r0, KVM_GUEST_MODE_NONE
  1711. stb r0, HSTATE_IN_GUEST(r13)
  1712. ld r0, SFS+PPC_LR_STKOFF(r1)
  1713. addi r1, r1, SFS
  1714. mtlr r0
  1715. blr
  1716. /*
  1717. * Check whether an HDSI is an HPTE not found fault or something else.
  1718. * If it is an HPTE not found fault that is due to the guest accessing
  1719. * a page that they have mapped but which we have paged out, then
  1720. * we continue on with the guest exit path. In all other cases,
  1721. * reflect the HDSI to the guest as a DSI.
  1722. */
  1723. kvmppc_hdsi:
  1724. ld r3, VCPU_KVM(r9)
  1725. lbz r0, KVM_RADIX(r3)
  1726. cmpwi r0, 0
  1727. mfspr r4, SPRN_HDAR
  1728. mfspr r6, SPRN_HDSISR
  1729. bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
  1730. /* HPTE not found fault or protection fault? */
  1731. andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
  1732. beq 1f /* if not, send it to the guest */
  1733. andi. r0, r11, MSR_DR /* data relocation enabled? */
  1734. beq 3f
  1735. BEGIN_FTR_SECTION
  1736. mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
  1737. b 4f
  1738. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
  1739. clrrdi r0, r4, 28
  1740. PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
  1741. li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
  1742. bne 7f /* if no SLB entry found */
  1743. 4: std r4, VCPU_FAULT_DAR(r9)
  1744. stw r6, VCPU_FAULT_DSISR(r9)
  1745. /* Search the hash table. */
  1746. mr r3, r9 /* vcpu pointer */
  1747. li r7, 1 /* data fault */
  1748. bl kvmppc_hpte_hv_fault
  1749. ld r9, HSTATE_KVM_VCPU(r13)
  1750. ld r10, VCPU_PC(r9)
  1751. ld r11, VCPU_MSR(r9)
  1752. li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
  1753. cmpdi r3, 0 /* retry the instruction */
  1754. beq 6f
  1755. cmpdi r3, -1 /* handle in kernel mode */
  1756. beq guest_exit_cont
  1757. cmpdi r3, -2 /* MMIO emulation; need instr word */
  1758. beq 2f
  1759. /* Synthesize a DSI (or DSegI) for the guest */
  1760. ld r4, VCPU_FAULT_DAR(r9)
  1761. mr r6, r3
  1762. 1: li r0, BOOK3S_INTERRUPT_DATA_STORAGE
  1763. mtspr SPRN_DSISR, r6
  1764. 7: mtspr SPRN_DAR, r4
  1765. mtspr SPRN_SRR0, r10
  1766. mtspr SPRN_SRR1, r11
  1767. mr r10, r0
  1768. bl kvmppc_msr_interrupt
  1769. fast_interrupt_c_return:
  1770. 6: ld r7, VCPU_CTR(r9)
  1771. ld r8, VCPU_XER(r9)
  1772. mtctr r7
  1773. mtxer r8
  1774. mr r4, r9
  1775. b fast_guest_return
  1776. 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
  1777. ld r5, KVM_VRMA_SLB_V(r5)
  1778. b 4b
  1779. /* If this is for emulated MMIO, load the instruction word */
  1780. 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
  1781. /* Set guest mode to 'jump over instruction' so if lwz faults
  1782. * we'll just continue at the next IP. */
  1783. li r0, KVM_GUEST_MODE_SKIP
  1784. stb r0, HSTATE_IN_GUEST(r13)
  1785. /* Do the access with MSR:DR enabled */
  1786. mfmsr r3
  1787. ori r4, r3, MSR_DR /* Enable paging for data */
  1788. mtmsrd r4
  1789. lwz r8, 0(r10)
  1790. mtmsrd r3
  1791. /* Store the result */
  1792. stw r8, VCPU_LAST_INST(r9)
  1793. /* Unset guest mode. */
  1794. li r0, KVM_GUEST_MODE_HOST_HV
  1795. stb r0, HSTATE_IN_GUEST(r13)
  1796. b guest_exit_cont
  1797. .Lradix_hdsi:
  1798. std r4, VCPU_FAULT_DAR(r9)
  1799. stw r6, VCPU_FAULT_DSISR(r9)
  1800. .Lradix_hisi:
  1801. mfspr r5, SPRN_ASDR
  1802. std r5, VCPU_FAULT_GPA(r9)
  1803. b guest_exit_cont
  1804. /*
  1805. * Similarly for an HISI, reflect it to the guest as an ISI unless
  1806. * it is an HPTE not found fault for a page that we have paged out.
  1807. */
  1808. kvmppc_hisi:
  1809. ld r3, VCPU_KVM(r9)
  1810. lbz r0, KVM_RADIX(r3)
  1811. cmpwi r0, 0
  1812. bne .Lradix_hisi /* for radix, just save ASDR */
  1813. andis. r0, r11, SRR1_ISI_NOPT@h
  1814. beq 1f
  1815. andi. r0, r11, MSR_IR /* instruction relocation enabled? */
  1816. beq 3f
  1817. BEGIN_FTR_SECTION
  1818. mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
  1819. b 4f
  1820. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
  1821. clrrdi r0, r10, 28
  1822. PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
  1823. li r0, BOOK3S_INTERRUPT_INST_SEGMENT
  1824. bne 7f /* if no SLB entry found */
  1825. 4:
  1826. /* Search the hash table. */
  1827. mr r3, r9 /* vcpu pointer */
  1828. mr r4, r10
  1829. mr r6, r11
  1830. li r7, 0 /* instruction fault */
  1831. bl kvmppc_hpte_hv_fault
  1832. ld r9, HSTATE_KVM_VCPU(r13)
  1833. ld r10, VCPU_PC(r9)
  1834. ld r11, VCPU_MSR(r9)
  1835. li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
  1836. cmpdi r3, 0 /* retry the instruction */
  1837. beq fast_interrupt_c_return
  1838. cmpdi r3, -1 /* handle in kernel mode */
  1839. beq guest_exit_cont
  1840. /* Synthesize an ISI (or ISegI) for the guest */
  1841. mr r11, r3
  1842. 1: li r0, BOOK3S_INTERRUPT_INST_STORAGE
  1843. 7: mtspr SPRN_SRR0, r10
  1844. mtspr SPRN_SRR1, r11
  1845. mr r10, r0
  1846. bl kvmppc_msr_interrupt
  1847. b fast_interrupt_c_return
  1848. 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
  1849. ld r5, KVM_VRMA_SLB_V(r6)
  1850. b 4b
  1851. /*
  1852. * Try to handle an hcall in real mode.
  1853. * Returns to the guest if we handle it, or continues on up to
  1854. * the kernel if we can't (i.e. if we don't have a handler for
  1855. * it, or if the handler returns H_TOO_HARD).
  1856. *
  1857. * r5 - r8 contain hcall args,
  1858. * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
  1859. */
  1860. hcall_try_real_mode:
  1861. ld r3,VCPU_GPR(R3)(r9)
  1862. andi. r0,r11,MSR_PR
  1863. /* sc 1 from userspace - reflect to guest syscall */
  1864. bne sc_1_fast_return
  1865. clrrdi r3,r3,2
  1866. cmpldi r3,hcall_real_table_end - hcall_real_table
  1867. bge guest_exit_cont
  1868. /* See if this hcall is enabled for in-kernel handling */
  1869. ld r4, VCPU_KVM(r9)
  1870. srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
  1871. sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
  1872. add r4, r4, r0
  1873. ld r0, KVM_ENABLED_HCALLS(r4)
  1874. rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
  1875. srd r0, r0, r4
  1876. andi. r0, r0, 1
  1877. beq guest_exit_cont
  1878. /* Get pointer to handler, if any, and call it */
  1879. LOAD_REG_ADDR(r4, hcall_real_table)
  1880. lwax r3,r3,r4
  1881. cmpwi r3,0
  1882. beq guest_exit_cont
  1883. add r12,r3,r4
  1884. mtctr r12
  1885. mr r3,r9 /* get vcpu pointer */
  1886. ld r4,VCPU_GPR(R4)(r9)
  1887. bctrl
  1888. cmpdi r3,H_TOO_HARD
  1889. beq hcall_real_fallback
  1890. ld r4,HSTATE_KVM_VCPU(r13)
  1891. std r3,VCPU_GPR(R3)(r4)
  1892. ld r10,VCPU_PC(r4)
  1893. ld r11,VCPU_MSR(r4)
  1894. b fast_guest_return
  1895. sc_1_fast_return:
  1896. mtspr SPRN_SRR0,r10
  1897. mtspr SPRN_SRR1,r11
  1898. li r10, BOOK3S_INTERRUPT_SYSCALL
  1899. bl kvmppc_msr_interrupt
  1900. mr r4,r9
  1901. b fast_guest_return
  1902. /* We've attempted a real mode hcall, but it's punted it back
  1903. * to userspace. We need to restore some clobbered volatiles
  1904. * before resuming the pass-it-to-qemu path */
  1905. hcall_real_fallback:
  1906. li r12,BOOK3S_INTERRUPT_SYSCALL
  1907. ld r9, HSTATE_KVM_VCPU(r13)
  1908. b guest_exit_cont
  1909. .globl hcall_real_table
  1910. hcall_real_table:
  1911. .long 0 /* 0 - unused */
  1912. .long DOTSYM(kvmppc_h_remove) - hcall_real_table
  1913. .long DOTSYM(kvmppc_h_enter) - hcall_real_table
  1914. .long DOTSYM(kvmppc_h_read) - hcall_real_table
  1915. .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
  1916. .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
  1917. .long DOTSYM(kvmppc_h_protect) - hcall_real_table
  1918. .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
  1919. .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
  1920. .long 0 /* 0x24 - H_SET_SPRG0 */
  1921. .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
  1922. .long 0 /* 0x2c */
  1923. .long 0 /* 0x30 */
  1924. .long 0 /* 0x34 */
  1925. .long 0 /* 0x38 */
  1926. .long 0 /* 0x3c */
  1927. .long 0 /* 0x40 */
  1928. .long 0 /* 0x44 */
  1929. .long 0 /* 0x48 */
  1930. .long 0 /* 0x4c */
  1931. .long 0 /* 0x50 */
  1932. .long 0 /* 0x54 */
  1933. .long 0 /* 0x58 */
  1934. .long 0 /* 0x5c */
  1935. .long 0 /* 0x60 */
  1936. #ifdef CONFIG_KVM_XICS
  1937. .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
  1938. .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
  1939. .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
  1940. .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
  1941. .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
  1942. #else
  1943. .long 0 /* 0x64 - H_EOI */
  1944. .long 0 /* 0x68 - H_CPPR */
  1945. .long 0 /* 0x6c - H_IPI */
  1946. .long 0 /* 0x70 - H_IPOLL */
  1947. .long 0 /* 0x74 - H_XIRR */
  1948. #endif
  1949. .long 0 /* 0x78 */
  1950. .long 0 /* 0x7c */
  1951. .long 0 /* 0x80 */
  1952. .long 0 /* 0x84 */
  1953. .long 0 /* 0x88 */
  1954. .long 0 /* 0x8c */
  1955. .long 0 /* 0x90 */
  1956. .long 0 /* 0x94 */
  1957. .long 0 /* 0x98 */
  1958. .long 0 /* 0x9c */
  1959. .long 0 /* 0xa0 */
  1960. .long 0 /* 0xa4 */
  1961. .long 0 /* 0xa8 */
  1962. .long 0 /* 0xac */
  1963. .long 0 /* 0xb0 */
  1964. .long 0 /* 0xb4 */
  1965. .long 0 /* 0xb8 */
  1966. .long 0 /* 0xbc */
  1967. .long 0 /* 0xc0 */
  1968. .long 0 /* 0xc4 */
  1969. .long 0 /* 0xc8 */
  1970. .long 0 /* 0xcc */
  1971. .long 0 /* 0xd0 */
  1972. .long 0 /* 0xd4 */
  1973. .long 0 /* 0xd8 */
  1974. .long 0 /* 0xdc */
  1975. .long DOTSYM(kvmppc_h_cede) - hcall_real_table
  1976. .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
  1977. .long 0 /* 0xe8 */
  1978. .long 0 /* 0xec */
  1979. .long 0 /* 0xf0 */
  1980. .long 0 /* 0xf4 */
  1981. .long 0 /* 0xf8 */
  1982. .long 0 /* 0xfc */
  1983. .long 0 /* 0x100 */
  1984. .long 0 /* 0x104 */
  1985. .long 0 /* 0x108 */
  1986. .long 0 /* 0x10c */
  1987. .long 0 /* 0x110 */
  1988. .long 0 /* 0x114 */
  1989. .long 0 /* 0x118 */
  1990. .long 0 /* 0x11c */
  1991. .long 0 /* 0x120 */
  1992. .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
  1993. .long 0 /* 0x128 */
  1994. .long 0 /* 0x12c */
  1995. .long 0 /* 0x130 */
  1996. .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
  1997. .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
  1998. .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
  1999. .long 0 /* 0x140 */
  2000. .long 0 /* 0x144 */
  2001. .long 0 /* 0x148 */
  2002. .long 0 /* 0x14c */
  2003. .long 0 /* 0x150 */
  2004. .long 0 /* 0x154 */
  2005. .long 0 /* 0x158 */
  2006. .long 0 /* 0x15c */
  2007. .long 0 /* 0x160 */
  2008. .long 0 /* 0x164 */
  2009. .long 0 /* 0x168 */
  2010. .long 0 /* 0x16c */
  2011. .long 0 /* 0x170 */
  2012. .long 0 /* 0x174 */
  2013. .long 0 /* 0x178 */
  2014. .long 0 /* 0x17c */
  2015. .long 0 /* 0x180 */
  2016. .long 0 /* 0x184 */
  2017. .long 0 /* 0x188 */
  2018. .long 0 /* 0x18c */
  2019. .long 0 /* 0x190 */
  2020. .long 0 /* 0x194 */
  2021. .long 0 /* 0x198 */
  2022. .long 0 /* 0x19c */
  2023. .long 0 /* 0x1a0 */
  2024. .long 0 /* 0x1a4 */
  2025. .long 0 /* 0x1a8 */
  2026. .long 0 /* 0x1ac */
  2027. .long 0 /* 0x1b0 */
  2028. .long 0 /* 0x1b4 */
  2029. .long 0 /* 0x1b8 */
  2030. .long 0 /* 0x1bc */
  2031. .long 0 /* 0x1c0 */
  2032. .long 0 /* 0x1c4 */
  2033. .long 0 /* 0x1c8 */
  2034. .long 0 /* 0x1cc */
  2035. .long 0 /* 0x1d0 */
  2036. .long 0 /* 0x1d4 */
  2037. .long 0 /* 0x1d8 */
  2038. .long 0 /* 0x1dc */
  2039. .long 0 /* 0x1e0 */
  2040. .long 0 /* 0x1e4 */
  2041. .long 0 /* 0x1e8 */
  2042. .long 0 /* 0x1ec */
  2043. .long 0 /* 0x1f0 */
  2044. .long 0 /* 0x1f4 */
  2045. .long 0 /* 0x1f8 */
  2046. .long 0 /* 0x1fc */
  2047. .long 0 /* 0x200 */
  2048. .long 0 /* 0x204 */
  2049. .long 0 /* 0x208 */
  2050. .long 0 /* 0x20c */
  2051. .long 0 /* 0x210 */
  2052. .long 0 /* 0x214 */
  2053. .long 0 /* 0x218 */
  2054. .long 0 /* 0x21c */
  2055. .long 0 /* 0x220 */
  2056. .long 0 /* 0x224 */
  2057. .long 0 /* 0x228 */
  2058. .long 0 /* 0x22c */
  2059. .long 0 /* 0x230 */
  2060. .long 0 /* 0x234 */
  2061. .long 0 /* 0x238 */
  2062. .long 0 /* 0x23c */
  2063. .long 0 /* 0x240 */
  2064. .long 0 /* 0x244 */
  2065. .long 0 /* 0x248 */
  2066. .long 0 /* 0x24c */
  2067. .long 0 /* 0x250 */
  2068. .long 0 /* 0x254 */
  2069. .long 0 /* 0x258 */
  2070. .long 0 /* 0x25c */
  2071. .long 0 /* 0x260 */
  2072. .long 0 /* 0x264 */
  2073. .long 0 /* 0x268 */
  2074. .long 0 /* 0x26c */
  2075. .long 0 /* 0x270 */
  2076. .long 0 /* 0x274 */
  2077. .long 0 /* 0x278 */
  2078. .long 0 /* 0x27c */
  2079. .long 0 /* 0x280 */
  2080. .long 0 /* 0x284 */
  2081. .long 0 /* 0x288 */
  2082. .long 0 /* 0x28c */
  2083. .long 0 /* 0x290 */
  2084. .long 0 /* 0x294 */
  2085. .long 0 /* 0x298 */
  2086. .long 0 /* 0x29c */
  2087. .long 0 /* 0x2a0 */
  2088. .long 0 /* 0x2a4 */
  2089. .long 0 /* 0x2a8 */
  2090. .long 0 /* 0x2ac */
  2091. .long 0 /* 0x2b0 */
  2092. .long 0 /* 0x2b4 */
  2093. .long 0 /* 0x2b8 */
  2094. .long 0 /* 0x2bc */
  2095. .long 0 /* 0x2c0 */
  2096. .long 0 /* 0x2c4 */
  2097. .long 0 /* 0x2c8 */
  2098. .long 0 /* 0x2cc */
  2099. .long 0 /* 0x2d0 */
  2100. .long 0 /* 0x2d4 */
  2101. .long 0 /* 0x2d8 */
  2102. .long 0 /* 0x2dc */
  2103. .long 0 /* 0x2e0 */
  2104. .long 0 /* 0x2e4 */
  2105. .long 0 /* 0x2e8 */
  2106. .long 0 /* 0x2ec */
  2107. .long 0 /* 0x2f0 */
  2108. .long 0 /* 0x2f4 */
  2109. .long 0 /* 0x2f8 */
  2110. #ifdef CONFIG_KVM_XICS
  2111. .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
  2112. #else
  2113. .long 0 /* 0x2fc - H_XIRR_X*/
  2114. #endif
  2115. .long DOTSYM(kvmppc_h_random) - hcall_real_table
  2116. .globl hcall_real_table_end
  2117. hcall_real_table_end:
  2118. _GLOBAL(kvmppc_h_set_xdabr)
  2119. andi. r0, r5, DABRX_USER | DABRX_KERNEL
  2120. beq 6f
  2121. li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
  2122. andc. r0, r5, r0
  2123. beq 3f
  2124. 6: li r3, H_PARAMETER
  2125. blr
  2126. _GLOBAL(kvmppc_h_set_dabr)
  2127. li r5, DABRX_USER | DABRX_KERNEL
  2128. 3:
  2129. BEGIN_FTR_SECTION
  2130. b 2f
  2131. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
  2132. std r4,VCPU_DABR(r3)
  2133. stw r5, VCPU_DABRX(r3)
  2134. mtspr SPRN_DABRX, r5
  2135. /* Work around P7 bug where DABR can get corrupted on mtspr */
  2136. 1: mtspr SPRN_DABR,r4
  2137. mfspr r5, SPRN_DABR
  2138. cmpd r4, r5
  2139. bne 1b
  2140. isync
  2141. li r3,0
  2142. blr
  2143. /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
  2144. 2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
  2145. rlwimi r5, r4, 2, DAWRX_WT
  2146. clrrdi r4, r4, 3
  2147. std r4, VCPU_DAWR(r3)
  2148. std r5, VCPU_DAWRX(r3)
  2149. mtspr SPRN_DAWR, r4
  2150. mtspr SPRN_DAWRX, r5
  2151. li r3, 0
  2152. blr
  2153. _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
  2154. ori r11,r11,MSR_EE
  2155. std r11,VCPU_MSR(r3)
  2156. li r0,1
  2157. stb r0,VCPU_CEDED(r3)
  2158. sync /* order setting ceded vs. testing prodded */
  2159. lbz r5,VCPU_PRODDED(r3)
  2160. cmpwi r5,0
  2161. bne kvm_cede_prodded
  2162. li r12,0 /* set trap to 0 to say hcall is handled */
  2163. stw r12,VCPU_TRAP(r3)
  2164. li r0,H_SUCCESS
  2165. std r0,VCPU_GPR(R3)(r3)
  2166. /*
  2167. * Set our bit in the bitmask of napping threads unless all the
  2168. * other threads are already napping, in which case we send this
  2169. * up to the host.
  2170. */
  2171. ld r5,HSTATE_KVM_VCORE(r13)
  2172. lbz r6,HSTATE_PTID(r13)
  2173. lwz r8,VCORE_ENTRY_EXIT(r5)
  2174. clrldi r8,r8,56
  2175. li r0,1
  2176. sld r0,r0,r6
  2177. addi r6,r5,VCORE_NAPPING_THREADS
  2178. 31: lwarx r4,0,r6
  2179. or r4,r4,r0
  2180. cmpw r4,r8
  2181. beq kvm_cede_exit
  2182. stwcx. r4,0,r6
  2183. bne 31b
  2184. /* order napping_threads update vs testing entry_exit_map */
  2185. isync
  2186. li r0,NAPPING_CEDE
  2187. stb r0,HSTATE_NAPPING(r13)
  2188. lwz r7,VCORE_ENTRY_EXIT(r5)
  2189. cmpwi r7,0x100
  2190. bge 33f /* another thread already exiting */
  2191. /*
  2192. * Although not specifically required by the architecture, POWER7
  2193. * preserves the following registers in nap mode, even if an SMT mode
  2194. * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
  2195. * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
  2196. */
  2197. /* Save non-volatile GPRs */
  2198. std r14, VCPU_GPR(R14)(r3)
  2199. std r15, VCPU_GPR(R15)(r3)
  2200. std r16, VCPU_GPR(R16)(r3)
  2201. std r17, VCPU_GPR(R17)(r3)
  2202. std r18, VCPU_GPR(R18)(r3)
  2203. std r19, VCPU_GPR(R19)(r3)
  2204. std r20, VCPU_GPR(R20)(r3)
  2205. std r21, VCPU_GPR(R21)(r3)
  2206. std r22, VCPU_GPR(R22)(r3)
  2207. std r23, VCPU_GPR(R23)(r3)
  2208. std r24, VCPU_GPR(R24)(r3)
  2209. std r25, VCPU_GPR(R25)(r3)
  2210. std r26, VCPU_GPR(R26)(r3)
  2211. std r27, VCPU_GPR(R27)(r3)
  2212. std r28, VCPU_GPR(R28)(r3)
  2213. std r29, VCPU_GPR(R29)(r3)
  2214. std r30, VCPU_GPR(R30)(r3)
  2215. std r31, VCPU_GPR(R31)(r3)
  2216. /* save FP state */
  2217. bl kvmppc_save_fp
  2218. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  2219. BEGIN_FTR_SECTION
  2220. ld r9, HSTATE_KVM_VCPU(r13)
  2221. bl kvmppc_save_tm
  2222. END_FTR_SECTION_IFSET(CPU_FTR_TM)
  2223. #endif
  2224. /*
  2225. * Set DEC to the smaller of DEC and HDEC, so that we wake
  2226. * no later than the end of our timeslice (HDEC interrupts
  2227. * don't wake us from nap).
  2228. */
  2229. mfspr r3, SPRN_DEC
  2230. mfspr r4, SPRN_HDEC
  2231. mftb r5
  2232. BEGIN_FTR_SECTION
  2233. /* On P9 check whether the guest has large decrementer mode enabled */
  2234. ld r6, HSTATE_KVM_VCORE(r13)
  2235. ld r6, VCORE_LPCR(r6)
  2236. andis. r6, r6, LPCR_LD@h
  2237. bne 68f
  2238. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
  2239. extsw r3, r3
  2240. 68: EXTEND_HDEC(r4)
  2241. cmpd r3, r4
  2242. ble 67f
  2243. mtspr SPRN_DEC, r4
  2244. 67:
  2245. /* save expiry time of guest decrementer */
  2246. add r3, r3, r5
  2247. ld r4, HSTATE_KVM_VCPU(r13)
  2248. ld r5, HSTATE_KVM_VCORE(r13)
  2249. ld r6, VCORE_TB_OFFSET(r5)
  2250. subf r3, r6, r3 /* convert to host TB value */
  2251. std r3, VCPU_DEC_EXPIRES(r4)
  2252. #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
  2253. ld r4, HSTATE_KVM_VCPU(r13)
  2254. addi r3, r4, VCPU_TB_CEDE
  2255. bl kvmhv_accumulate_time
  2256. #endif
  2257. lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
  2258. /*
  2259. * Take a nap until a decrementer or external or doobell interrupt
  2260. * occurs, with PECE1 and PECE0 set in LPCR.
  2261. * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
  2262. * Also clear the runlatch bit before napping.
  2263. */
  2264. kvm_do_nap:
  2265. mfspr r0, SPRN_CTRLF
  2266. clrrdi r0, r0, 1
  2267. mtspr SPRN_CTRLT, r0
  2268. li r0,1
  2269. stb r0,HSTATE_HWTHREAD_REQ(r13)
  2270. mfspr r5,SPRN_LPCR
  2271. ori r5,r5,LPCR_PECE0 | LPCR_PECE1
  2272. BEGIN_FTR_SECTION
  2273. ori r5, r5, LPCR_PECEDH
  2274. rlwimi r5, r3, 0, LPCR_PECEDP
  2275. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
  2276. kvm_nap_sequence: /* desired LPCR value in r5 */
  2277. BEGIN_FTR_SECTION
  2278. /*
  2279. * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset)
  2280. * enable state loss = 1 (allow SMT mode switch)
  2281. * requested level = 0 (just stop dispatching)
  2282. */
  2283. lis r3, (PSSCR_EC | PSSCR_ESL)@h
  2284. mtspr SPRN_PSSCR, r3
  2285. /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
  2286. li r4, LPCR_PECE_HVEE@higher
  2287. sldi r4, r4, 32
  2288. or r5, r5, r4
  2289. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
  2290. mtspr SPRN_LPCR,r5
  2291. isync
  2292. li r0, 0
  2293. std r0, HSTATE_SCRATCH0(r13)
  2294. ptesync
  2295. ld r0, HSTATE_SCRATCH0(r13)
  2296. 1: cmpd r0, r0
  2297. bne 1b
  2298. BEGIN_FTR_SECTION
  2299. nap
  2300. FTR_SECTION_ELSE
  2301. PPC_STOP
  2302. ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
  2303. b .
  2304. 33: mr r4, r3
  2305. li r3, 0
  2306. li r12, 0
  2307. b 34f
  2308. kvm_end_cede:
  2309. /* get vcpu pointer */
  2310. ld r4, HSTATE_KVM_VCPU(r13)
  2311. /* Woken by external or decrementer interrupt */
  2312. ld r1, HSTATE_HOST_R1(r13)
  2313. #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
  2314. addi r3, r4, VCPU_TB_RMINTR
  2315. bl kvmhv_accumulate_time
  2316. #endif
  2317. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  2318. BEGIN_FTR_SECTION
  2319. bl kvmppc_restore_tm
  2320. END_FTR_SECTION_IFSET(CPU_FTR_TM)
  2321. #endif
  2322. /* load up FP state */
  2323. bl kvmppc_load_fp
  2324. /* Restore guest decrementer */
  2325. ld r3, VCPU_DEC_EXPIRES(r4)
  2326. ld r5, HSTATE_KVM_VCORE(r13)
  2327. ld r6, VCORE_TB_OFFSET(r5)
  2328. add r3, r3, r6 /* convert host TB to guest TB value */
  2329. mftb r7
  2330. subf r3, r7, r3
  2331. mtspr SPRN_DEC, r3
  2332. /* Load NV GPRS */
  2333. ld r14, VCPU_GPR(R14)(r4)
  2334. ld r15, VCPU_GPR(R15)(r4)
  2335. ld r16, VCPU_GPR(R16)(r4)
  2336. ld r17, VCPU_GPR(R17)(r4)
  2337. ld r18, VCPU_GPR(R18)(r4)
  2338. ld r19, VCPU_GPR(R19)(r4)
  2339. ld r20, VCPU_GPR(R20)(r4)
  2340. ld r21, VCPU_GPR(R21)(r4)
  2341. ld r22, VCPU_GPR(R22)(r4)
  2342. ld r23, VCPU_GPR(R23)(r4)
  2343. ld r24, VCPU_GPR(R24)(r4)
  2344. ld r25, VCPU_GPR(R25)(r4)
  2345. ld r26, VCPU_GPR(R26)(r4)
  2346. ld r27, VCPU_GPR(R27)(r4)
  2347. ld r28, VCPU_GPR(R28)(r4)
  2348. ld r29, VCPU_GPR(R29)(r4)
  2349. ld r30, VCPU_GPR(R30)(r4)
  2350. ld r31, VCPU_GPR(R31)(r4)
  2351. /* Check the wake reason in SRR1 to see why we got here */
  2352. bl kvmppc_check_wake_reason
  2353. /*
  2354. * Restore volatile registers since we could have called a
  2355. * C routine in kvmppc_check_wake_reason
  2356. * r4 = VCPU
  2357. * r3 tells us whether we need to return to host or not
  2358. * WARNING: it gets checked further down:
  2359. * should not modify r3 until this check is done.
  2360. */
  2361. ld r4, HSTATE_KVM_VCPU(r13)
  2362. /* clear our bit in vcore->napping_threads */
  2363. 34: ld r5,HSTATE_KVM_VCORE(r13)
  2364. lbz r7,HSTATE_PTID(r13)
  2365. li r0,1
  2366. sld r0,r0,r7
  2367. addi r6,r5,VCORE_NAPPING_THREADS
  2368. 32: lwarx r7,0,r6
  2369. andc r7,r7,r0
  2370. stwcx. r7,0,r6
  2371. bne 32b
  2372. li r0,0
  2373. stb r0,HSTATE_NAPPING(r13)
  2374. /* See if the wake reason saved in r3 means we need to exit */
  2375. stw r12, VCPU_TRAP(r4)
  2376. mr r9, r4
  2377. cmpdi r3, 0
  2378. bgt guest_exit_cont
  2379. /* see if any other thread is already exiting */
  2380. lwz r0,VCORE_ENTRY_EXIT(r5)
  2381. cmpwi r0,0x100
  2382. bge guest_exit_cont
  2383. b kvmppc_cede_reentry /* if not go back to guest */
  2384. /* cede when already previously prodded case */
  2385. kvm_cede_prodded:
  2386. li r0,0
  2387. stb r0,VCPU_PRODDED(r3)
  2388. sync /* order testing prodded vs. clearing ceded */
  2389. stb r0,VCPU_CEDED(r3)
  2390. li r3,H_SUCCESS
  2391. blr
  2392. /* we've ceded but we want to give control to the host */
  2393. kvm_cede_exit:
  2394. ld r9, HSTATE_KVM_VCPU(r13)
  2395. b guest_exit_cont
  2396. /* Try to handle a machine check in real mode */
  2397. machine_check_realmode:
  2398. mr r3, r9 /* get vcpu pointer */
  2399. bl kvmppc_realmode_machine_check
  2400. nop
  2401. ld r9, HSTATE_KVM_VCPU(r13)
  2402. li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
  2403. /*
  2404. * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through
  2405. * machine check interrupt (set HSRR0 to 0x200). And for handled
  2406. * errors (no-fatal), just go back to guest execution with current
  2407. * HSRR0 instead of exiting guest. This new approach will inject
  2408. * machine check to guest for fatal error causing guest to crash.
  2409. *
  2410. * The old code used to return to host for unhandled errors which
  2411. * was causing guest to hang with soft lockups inside guest and
  2412. * makes it difficult to recover guest instance.
  2413. *
  2414. * if we receive machine check with MSR(RI=0) then deliver it to
  2415. * guest as machine check causing guest to crash.
  2416. */
  2417. ld r11, VCPU_MSR(r9)
  2418. rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
  2419. bne mc_cont /* if so, exit to host */
  2420. andi. r10, r11, MSR_RI /* check for unrecoverable exception */
  2421. beq 1f /* Deliver a machine check to guest */
  2422. ld r10, VCPU_PC(r9)
  2423. cmpdi r3, 0 /* Did we handle MCE ? */
  2424. bne 2f /* Continue guest execution. */
  2425. /* If not, deliver a machine check. SRR0/1 are already set */
  2426. 1: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
  2427. bl kvmppc_msr_interrupt
  2428. 2: b fast_interrupt_c_return
  2429. /*
  2430. * Check the reason we woke from nap, and take appropriate action.
  2431. * Returns (in r3):
  2432. * 0 if nothing needs to be done
  2433. * 1 if something happened that needs to be handled by the host
  2434. * -1 if there was a guest wakeup (IPI or msgsnd)
  2435. * -2 if we handled a PCI passthrough interrupt (returned by
  2436. * kvmppc_read_intr only)
  2437. *
  2438. * Also sets r12 to the interrupt vector for any interrupt that needs
  2439. * to be handled now by the host (0x500 for external interrupt), or zero.
  2440. * Modifies all volatile registers (since it may call a C function).
  2441. * This routine calls kvmppc_read_intr, a C function, if an external
  2442. * interrupt is pending.
  2443. */
  2444. kvmppc_check_wake_reason:
  2445. mfspr r6, SPRN_SRR1
  2446. BEGIN_FTR_SECTION
  2447. rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
  2448. FTR_SECTION_ELSE
  2449. rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
  2450. ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
  2451. cmpwi r6, 8 /* was it an external interrupt? */
  2452. beq 7f /* if so, see what it was */
  2453. li r3, 0
  2454. li r12, 0
  2455. cmpwi r6, 6 /* was it the decrementer? */
  2456. beq 0f
  2457. BEGIN_FTR_SECTION
  2458. cmpwi r6, 5 /* privileged doorbell? */
  2459. beq 0f
  2460. cmpwi r6, 3 /* hypervisor doorbell? */
  2461. beq 3f
  2462. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
  2463. cmpwi r6, 0xa /* Hypervisor maintenance ? */
  2464. beq 4f
  2465. li r3, 1 /* anything else, return 1 */
  2466. 0: blr
  2467. /* hypervisor doorbell */
  2468. 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
  2469. /*
  2470. * Clear the doorbell as we will invoke the handler
  2471. * explicitly in the guest exit path.
  2472. */
  2473. lis r6, (PPC_DBELL_SERVER << (63-36))@h
  2474. PPC_MSGCLR(6)
  2475. /* see if it's a host IPI */
  2476. li r3, 1
  2477. lbz r0, HSTATE_HOST_IPI(r13)
  2478. cmpwi r0, 0
  2479. bnelr
  2480. /* if not, return -1 */
  2481. li r3, -1
  2482. blr
  2483. /* Woken up due to Hypervisor maintenance interrupt */
  2484. 4: li r12, BOOK3S_INTERRUPT_HMI
  2485. li r3, 1
  2486. blr
  2487. /* external interrupt - create a stack frame so we can call C */
  2488. 7: mflr r0
  2489. std r0, PPC_LR_STKOFF(r1)
  2490. stdu r1, -PPC_MIN_STKFRM(r1)
  2491. bl kvmppc_read_intr
  2492. nop
  2493. li r12, BOOK3S_INTERRUPT_EXTERNAL
  2494. cmpdi r3, 1
  2495. ble 1f
  2496. /*
  2497. * Return code of 2 means PCI passthrough interrupt, but
  2498. * we need to return back to host to complete handling the
  2499. * interrupt. Trap reason is expected in r12 by guest
  2500. * exit code.
  2501. */
  2502. li r12, BOOK3S_INTERRUPT_HV_RM_HARD
  2503. 1:
  2504. ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
  2505. addi r1, r1, PPC_MIN_STKFRM
  2506. mtlr r0
  2507. blr
  2508. /*
  2509. * Save away FP, VMX and VSX registers.
  2510. * r3 = vcpu pointer
  2511. * N.B. r30 and r31 are volatile across this function,
  2512. * thus it is not callable from C.
  2513. */
  2514. kvmppc_save_fp:
  2515. mflr r30
  2516. mr r31,r3
  2517. mfmsr r5
  2518. ori r8,r5,MSR_FP
  2519. #ifdef CONFIG_ALTIVEC
  2520. BEGIN_FTR_SECTION
  2521. oris r8,r8,MSR_VEC@h
  2522. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  2523. #endif
  2524. #ifdef CONFIG_VSX
  2525. BEGIN_FTR_SECTION
  2526. oris r8,r8,MSR_VSX@h
  2527. END_FTR_SECTION_IFSET(CPU_FTR_VSX)
  2528. #endif
  2529. mtmsrd r8
  2530. addi r3,r3,VCPU_FPRS
  2531. bl store_fp_state
  2532. #ifdef CONFIG_ALTIVEC
  2533. BEGIN_FTR_SECTION
  2534. addi r3,r31,VCPU_VRS
  2535. bl store_vr_state
  2536. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  2537. #endif
  2538. mfspr r6,SPRN_VRSAVE
  2539. stw r6,VCPU_VRSAVE(r31)
  2540. mtlr r30
  2541. blr
  2542. /*
  2543. * Load up FP, VMX and VSX registers
  2544. * r4 = vcpu pointer
  2545. * N.B. r30 and r31 are volatile across this function,
  2546. * thus it is not callable from C.
  2547. */
  2548. kvmppc_load_fp:
  2549. mflr r30
  2550. mr r31,r4
  2551. mfmsr r9
  2552. ori r8,r9,MSR_FP
  2553. #ifdef CONFIG_ALTIVEC
  2554. BEGIN_FTR_SECTION
  2555. oris r8,r8,MSR_VEC@h
  2556. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  2557. #endif
  2558. #ifdef CONFIG_VSX
  2559. BEGIN_FTR_SECTION
  2560. oris r8,r8,MSR_VSX@h
  2561. END_FTR_SECTION_IFSET(CPU_FTR_VSX)
  2562. #endif
  2563. mtmsrd r8
  2564. addi r3,r4,VCPU_FPRS
  2565. bl load_fp_state
  2566. #ifdef CONFIG_ALTIVEC
  2567. BEGIN_FTR_SECTION
  2568. addi r3,r31,VCPU_VRS
  2569. bl load_vr_state
  2570. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  2571. #endif
  2572. lwz r7,VCPU_VRSAVE(r31)
  2573. mtspr SPRN_VRSAVE,r7
  2574. mtlr r30
  2575. mr r4,r31
  2576. blr
  2577. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  2578. /*
  2579. * Save transactional state and TM-related registers.
  2580. * Called with r9 pointing to the vcpu struct.
  2581. * This can modify all checkpointed registers, but
  2582. * restores r1, r2 and r9 (vcpu pointer) before exit.
  2583. */
  2584. kvmppc_save_tm:
  2585. mflr r0
  2586. std r0, PPC_LR_STKOFF(r1)
  2587. /* Turn on TM. */
  2588. mfmsr r8
  2589. li r0, 1
  2590. rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
  2591. mtmsrd r8
  2592. ld r5, VCPU_MSR(r9)
  2593. rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
  2594. beq 1f /* TM not active in guest. */
  2595. std r1, HSTATE_HOST_R1(r13)
  2596. li r3, TM_CAUSE_KVM_RESCHED
  2597. /* Clear the MSR RI since r1, r13 are all going to be foobar. */
  2598. li r5, 0
  2599. mtmsrd r5, 1
  2600. /* All GPRs are volatile at this point. */
  2601. TRECLAIM(R3)
  2602. /* Temporarily store r13 and r9 so we have some regs to play with */
  2603. SET_SCRATCH0(r13)
  2604. GET_PACA(r13)
  2605. std r9, PACATMSCRATCH(r13)
  2606. ld r9, HSTATE_KVM_VCPU(r13)
  2607. /* Get a few more GPRs free. */
  2608. std r29, VCPU_GPRS_TM(29)(r9)
  2609. std r30, VCPU_GPRS_TM(30)(r9)
  2610. std r31, VCPU_GPRS_TM(31)(r9)
  2611. /* Save away PPR and DSCR soon so don't run with user values. */
  2612. mfspr r31, SPRN_PPR
  2613. HMT_MEDIUM
  2614. mfspr r30, SPRN_DSCR
  2615. ld r29, HSTATE_DSCR(r13)
  2616. mtspr SPRN_DSCR, r29
  2617. /* Save all but r9, r13 & r29-r31 */
  2618. reg = 0
  2619. .rept 29
  2620. .if (reg != 9) && (reg != 13)
  2621. std reg, VCPU_GPRS_TM(reg)(r9)
  2622. .endif
  2623. reg = reg + 1
  2624. .endr
  2625. /* ... now save r13 */
  2626. GET_SCRATCH0(r4)
  2627. std r4, VCPU_GPRS_TM(13)(r9)
  2628. /* ... and save r9 */
  2629. ld r4, PACATMSCRATCH(r13)
  2630. std r4, VCPU_GPRS_TM(9)(r9)
  2631. /* Reload stack pointer and TOC. */
  2632. ld r1, HSTATE_HOST_R1(r13)
  2633. ld r2, PACATOC(r13)
  2634. /* Set MSR RI now we have r1 and r13 back. */
  2635. li r5, MSR_RI
  2636. mtmsrd r5, 1
  2637. /* Save away checkpinted SPRs. */
  2638. std r31, VCPU_PPR_TM(r9)
  2639. std r30, VCPU_DSCR_TM(r9)
  2640. mflr r5
  2641. mfcr r6
  2642. mfctr r7
  2643. mfspr r8, SPRN_AMR
  2644. mfspr r10, SPRN_TAR
  2645. mfxer r11
  2646. std r5, VCPU_LR_TM(r9)
  2647. stw r6, VCPU_CR_TM(r9)
  2648. std r7, VCPU_CTR_TM(r9)
  2649. std r8, VCPU_AMR_TM(r9)
  2650. std r10, VCPU_TAR_TM(r9)
  2651. std r11, VCPU_XER_TM(r9)
  2652. /* Restore r12 as trap number. */
  2653. lwz r12, VCPU_TRAP(r9)
  2654. /* Save FP/VSX. */
  2655. addi r3, r9, VCPU_FPRS_TM
  2656. bl store_fp_state
  2657. addi r3, r9, VCPU_VRS_TM
  2658. bl store_vr_state
  2659. mfspr r6, SPRN_VRSAVE
  2660. stw r6, VCPU_VRSAVE_TM(r9)
  2661. 1:
  2662. /*
  2663. * We need to save these SPRs after the treclaim so that the software
  2664. * error code is recorded correctly in the TEXASR. Also the user may
  2665. * change these outside of a transaction, so they must always be
  2666. * context switched.
  2667. */
  2668. mfspr r5, SPRN_TFHAR
  2669. mfspr r6, SPRN_TFIAR
  2670. mfspr r7, SPRN_TEXASR
  2671. std r5, VCPU_TFHAR(r9)
  2672. std r6, VCPU_TFIAR(r9)
  2673. std r7, VCPU_TEXASR(r9)
  2674. ld r0, PPC_LR_STKOFF(r1)
  2675. mtlr r0
  2676. blr
  2677. /*
  2678. * Restore transactional state and TM-related registers.
  2679. * Called with r4 pointing to the vcpu struct.
  2680. * This potentially modifies all checkpointed registers.
  2681. * It restores r1, r2, r4 from the PACA.
  2682. */
  2683. kvmppc_restore_tm:
  2684. mflr r0
  2685. std r0, PPC_LR_STKOFF(r1)
  2686. /* Turn on TM/FP/VSX/VMX so we can restore them. */
  2687. mfmsr r5
  2688. li r6, MSR_TM >> 32
  2689. sldi r6, r6, 32
  2690. or r5, r5, r6
  2691. ori r5, r5, MSR_FP
  2692. oris r5, r5, (MSR_VEC | MSR_VSX)@h
  2693. mtmsrd r5
  2694. /*
  2695. * The user may change these outside of a transaction, so they must
  2696. * always be context switched.
  2697. */
  2698. ld r5, VCPU_TFHAR(r4)
  2699. ld r6, VCPU_TFIAR(r4)
  2700. ld r7, VCPU_TEXASR(r4)
  2701. mtspr SPRN_TFHAR, r5
  2702. mtspr SPRN_TFIAR, r6
  2703. mtspr SPRN_TEXASR, r7
  2704. ld r5, VCPU_MSR(r4)
  2705. rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
  2706. beqlr /* TM not active in guest */
  2707. std r1, HSTATE_HOST_R1(r13)
  2708. /* Make sure the failure summary is set, otherwise we'll program check
  2709. * when we trechkpt. It's possible that this might have been not set
  2710. * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
  2711. * host.
  2712. */
  2713. oris r7, r7, (TEXASR_FS)@h
  2714. mtspr SPRN_TEXASR, r7
  2715. /*
  2716. * We need to load up the checkpointed state for the guest.
  2717. * We need to do this early as it will blow away any GPRs, VSRs and
  2718. * some SPRs.
  2719. */
  2720. mr r31, r4
  2721. addi r3, r31, VCPU_FPRS_TM
  2722. bl load_fp_state
  2723. addi r3, r31, VCPU_VRS_TM
  2724. bl load_vr_state
  2725. mr r4, r31
  2726. lwz r7, VCPU_VRSAVE_TM(r4)
  2727. mtspr SPRN_VRSAVE, r7
  2728. ld r5, VCPU_LR_TM(r4)
  2729. lwz r6, VCPU_CR_TM(r4)
  2730. ld r7, VCPU_CTR_TM(r4)
  2731. ld r8, VCPU_AMR_TM(r4)
  2732. ld r9, VCPU_TAR_TM(r4)
  2733. ld r10, VCPU_XER_TM(r4)
  2734. mtlr r5
  2735. mtcr r6
  2736. mtctr r7
  2737. mtspr SPRN_AMR, r8
  2738. mtspr SPRN_TAR, r9
  2739. mtxer r10
  2740. /*
  2741. * Load up PPR and DSCR values but don't put them in the actual SPRs
  2742. * till the last moment to avoid running with userspace PPR and DSCR for
  2743. * too long.
  2744. */
  2745. ld r29, VCPU_DSCR_TM(r4)
  2746. ld r30, VCPU_PPR_TM(r4)
  2747. std r2, PACATMSCRATCH(r13) /* Save TOC */
  2748. /* Clear the MSR RI since r1, r13 are all going to be foobar. */
  2749. li r5, 0
  2750. mtmsrd r5, 1
  2751. /* Load GPRs r0-r28 */
  2752. reg = 0
  2753. .rept 29
  2754. ld reg, VCPU_GPRS_TM(reg)(r31)
  2755. reg = reg + 1
  2756. .endr
  2757. mtspr SPRN_DSCR, r29
  2758. mtspr SPRN_PPR, r30
  2759. /* Load final GPRs */
  2760. ld 29, VCPU_GPRS_TM(29)(r31)
  2761. ld 30, VCPU_GPRS_TM(30)(r31)
  2762. ld 31, VCPU_GPRS_TM(31)(r31)
  2763. /* TM checkpointed state is now setup. All GPRs are now volatile. */
  2764. TRECHKPT
  2765. /* Now let's get back the state we need. */
  2766. HMT_MEDIUM
  2767. GET_PACA(r13)
  2768. ld r29, HSTATE_DSCR(r13)
  2769. mtspr SPRN_DSCR, r29
  2770. ld r4, HSTATE_KVM_VCPU(r13)
  2771. ld r1, HSTATE_HOST_R1(r13)
  2772. ld r2, PACATMSCRATCH(r13)
  2773. /* Set the MSR RI since we have our registers back. */
  2774. li r5, MSR_RI
  2775. mtmsrd r5, 1
  2776. ld r0, PPC_LR_STKOFF(r1)
  2777. mtlr r0
  2778. blr
  2779. #endif
  2780. /*
  2781. * We come here if we get any exception or interrupt while we are
  2782. * executing host real mode code while in guest MMU context.
  2783. * For now just spin, but we should do something better.
  2784. */
  2785. kvmppc_bad_host_intr:
  2786. b .
  2787. /*
  2788. * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
  2789. * from VCPU_INTR_MSR and is modified based on the required TM state changes.
  2790. * r11 has the guest MSR value (in/out)
  2791. * r9 has a vcpu pointer (in)
  2792. * r0 is used as a scratch register
  2793. */
  2794. kvmppc_msr_interrupt:
  2795. rldicl r0, r11, 64 - MSR_TS_S_LG, 62
  2796. cmpwi r0, 2 /* Check if we are in transactional state.. */
  2797. ld r11, VCPU_INTR_MSR(r9)
  2798. bne 1f
  2799. /* ... if transactional, change to suspended */
  2800. li r0, 1
  2801. 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
  2802. blr
  2803. /*
  2804. * This works around a hardware bug on POWER8E processors, where
  2805. * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
  2806. * performance monitor interrupt. Instead, when we need to have
  2807. * an interrupt pending, we have to arrange for a counter to overflow.
  2808. */
  2809. kvmppc_fix_pmao:
  2810. li r3, 0
  2811. mtspr SPRN_MMCR2, r3
  2812. lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
  2813. ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
  2814. mtspr SPRN_MMCR0, r3
  2815. lis r3, 0x7fff
  2816. ori r3, r3, 0xffff
  2817. mtspr SPRN_PMC6, r3
  2818. isync
  2819. blr
  2820. #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
  2821. /*
  2822. * Start timing an activity
  2823. * r3 = pointer to time accumulation struct, r4 = vcpu
  2824. */
  2825. kvmhv_start_timing:
  2826. ld r5, HSTATE_KVM_VCORE(r13)
  2827. lbz r6, VCORE_IN_GUEST(r5)
  2828. cmpwi r6, 0
  2829. beq 5f /* if in guest, need to */
  2830. ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
  2831. 5: mftb r5
  2832. subf r5, r6, r5
  2833. std r3, VCPU_CUR_ACTIVITY(r4)
  2834. std r5, VCPU_ACTIVITY_START(r4)
  2835. blr
  2836. /*
  2837. * Accumulate time to one activity and start another.
  2838. * r3 = pointer to new time accumulation struct, r4 = vcpu
  2839. */
  2840. kvmhv_accumulate_time:
  2841. ld r5, HSTATE_KVM_VCORE(r13)
  2842. lbz r8, VCORE_IN_GUEST(r5)
  2843. cmpwi r8, 0
  2844. beq 4f /* if in guest, need to */
  2845. ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
  2846. 4: ld r5, VCPU_CUR_ACTIVITY(r4)
  2847. ld r6, VCPU_ACTIVITY_START(r4)
  2848. std r3, VCPU_CUR_ACTIVITY(r4)
  2849. mftb r7
  2850. subf r7, r8, r7
  2851. std r7, VCPU_ACTIVITY_START(r4)
  2852. cmpdi r5, 0
  2853. beqlr
  2854. subf r3, r6, r7
  2855. ld r8, TAS_SEQCOUNT(r5)
  2856. cmpdi r8, 0
  2857. addi r8, r8, 1
  2858. std r8, TAS_SEQCOUNT(r5)
  2859. lwsync
  2860. ld r7, TAS_TOTAL(r5)
  2861. add r7, r7, r3
  2862. std r7, TAS_TOTAL(r5)
  2863. ld r6, TAS_MIN(r5)
  2864. ld r7, TAS_MAX(r5)
  2865. beq 3f
  2866. cmpd r3, r6
  2867. bge 1f
  2868. 3: std r3, TAS_MIN(r5)
  2869. 1: cmpd r3, r7
  2870. ble 2f
  2871. std r3, TAS_MAX(r5)
  2872. 2: lwsync
  2873. addi r8, r8, 1
  2874. std r8, TAS_SEQCOUNT(r5)
  2875. blr
  2876. #endif