idle_book3s.S 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976
  1. /*
  2. * This file contains idle entry/exit functions for POWER7,
  3. * POWER8 and POWER9 CPUs.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; either version
  8. * 2 of the License, or (at your option) any later version.
  9. */
  10. #include <linux/threads.h>
  11. #include <asm/processor.h>
  12. #include <asm/page.h>
  13. #include <asm/cputable.h>
  14. #include <asm/thread_info.h>
  15. #include <asm/ppc_asm.h>
  16. #include <asm/asm-offsets.h>
  17. #include <asm/ppc-opcode.h>
  18. #include <asm/hw_irq.h>
  19. #include <asm/kvm_book3s_asm.h>
  20. #include <asm/opal.h>
  21. #include <asm/cpuidle.h>
  22. #include <asm/exception-64s.h>
  23. #include <asm/book3s/64/mmu-hash.h>
  24. #include <asm/mmu.h>
  25. #include <asm/asm-compat.h>
  26. #include <asm/feature-fixups.h>
  27. #undef DEBUG
  28. /*
  29. * Use unused space in the interrupt stack to save and restore
  30. * registers for winkle support.
  31. */
  32. #define _MMCR0 GPR0
  33. #define _SDR1 GPR3
  34. #define _PTCR GPR3
  35. #define _RPR GPR4
  36. #define _SPURR GPR5
  37. #define _PURR GPR6
  38. #define _TSCR GPR7
  39. #define _DSCR GPR8
  40. #define _AMOR GPR9
  41. #define _WORT GPR10
  42. #define _WORC GPR11
  43. #define _LPCR GPR12
  44. #define PSSCR_EC_ESL_MASK_SHIFTED (PSSCR_EC | PSSCR_ESL) >> 16
  45. .text
  46. /*
  47. * Used by threads before entering deep idle states. Saves SPRs
  48. * in interrupt stack frame
  49. */
  50. save_sprs_to_stack:
  51. /*
  52. * Note all register i.e per-core, per-subcore or per-thread is saved
  53. * here since any thread in the core might wake up first
  54. */
  55. BEGIN_FTR_SECTION
  56. /*
  57. * Note - SDR1 is dropped in Power ISA v3. Hence not restoring
  58. * SDR1 here
  59. */
  60. mfspr r3,SPRN_PTCR
  61. std r3,_PTCR(r1)
  62. mfspr r3,SPRN_LPCR
  63. std r3,_LPCR(r1)
  64. FTR_SECTION_ELSE
  65. mfspr r3,SPRN_SDR1
  66. std r3,_SDR1(r1)
  67. ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
  68. mfspr r3,SPRN_RPR
  69. std r3,_RPR(r1)
  70. mfspr r3,SPRN_SPURR
  71. std r3,_SPURR(r1)
  72. mfspr r3,SPRN_PURR
  73. std r3,_PURR(r1)
  74. mfspr r3,SPRN_TSCR
  75. std r3,_TSCR(r1)
  76. mfspr r3,SPRN_DSCR
  77. std r3,_DSCR(r1)
  78. mfspr r3,SPRN_AMOR
  79. std r3,_AMOR(r1)
  80. mfspr r3,SPRN_WORT
  81. std r3,_WORT(r1)
  82. mfspr r3,SPRN_WORC
  83. std r3,_WORC(r1)
  84. /*
  85. * On POWER9, there are idle states such as stop4, invoked via cpuidle,
  86. * that lose hypervisor resources. In such cases, we need to save
  87. * additional SPRs before entering those idle states so that they can
  88. * be restored to their older values on wakeup from the idle state.
  89. *
  90. * On POWER8, the only such deep idle state is winkle which is used
  91. * only in the context of CPU-Hotplug, where these additional SPRs are
  92. * reinitiazed to a sane value. Hence there is no need to save/restore
  93. * these SPRs.
  94. */
  95. BEGIN_FTR_SECTION
  96. blr
  97. END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
  98. power9_save_additional_sprs:
  99. mfspr r3, SPRN_PID
  100. mfspr r4, SPRN_LDBAR
  101. std r3, STOP_PID(r13)
  102. std r4, STOP_LDBAR(r13)
  103. mfspr r3, SPRN_FSCR
  104. mfspr r4, SPRN_HFSCR
  105. std r3, STOP_FSCR(r13)
  106. std r4, STOP_HFSCR(r13)
  107. mfspr r3, SPRN_MMCRA
  108. mfspr r4, SPRN_MMCR0
  109. std r3, STOP_MMCRA(r13)
  110. std r4, _MMCR0(r1)
  111. mfspr r3, SPRN_MMCR1
  112. mfspr r4, SPRN_MMCR2
  113. std r3, STOP_MMCR1(r13)
  114. std r4, STOP_MMCR2(r13)
  115. blr
  116. power9_restore_additional_sprs:
  117. ld r3,_LPCR(r1)
  118. ld r4, STOP_PID(r13)
  119. mtspr SPRN_LPCR,r3
  120. mtspr SPRN_PID, r4
  121. ld r3, STOP_LDBAR(r13)
  122. ld r4, STOP_FSCR(r13)
  123. mtspr SPRN_LDBAR, r3
  124. mtspr SPRN_FSCR, r4
  125. ld r3, STOP_HFSCR(r13)
  126. ld r4, STOP_MMCRA(r13)
  127. mtspr SPRN_HFSCR, r3
  128. mtspr SPRN_MMCRA, r4
  129. ld r3, _MMCR0(r1)
  130. ld r4, STOP_MMCR1(r13)
  131. mtspr SPRN_MMCR0, r3
  132. mtspr SPRN_MMCR1, r4
  133. ld r3, STOP_MMCR2(r13)
  134. ld r4, PACA_SPRG_VDSO(r13)
  135. mtspr SPRN_MMCR2, r3
  136. mtspr SPRN_SPRG3, r4
  137. blr
  138. /*
  139. * Used by threads when the lock bit of core_idle_state is set.
  140. * Threads will spin in HMT_LOW until the lock bit is cleared.
  141. * r14 - pointer to core_idle_state
  142. * r15 - used to load contents of core_idle_state
  143. * r9 - used as a temporary variable
  144. */
  145. core_idle_lock_held:
  146. HMT_LOW
  147. 3: lwz r15,0(r14)
  148. andis. r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
  149. bne 3b
  150. HMT_MEDIUM
  151. lwarx r15,0,r14
  152. andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h
  153. bne- core_idle_lock_held
  154. blr
  155. /* Reuse an unused pt_regs slot for IAMR */
  156. #define PNV_POWERSAVE_IAMR _DAR
  157. /*
  158. * Pass requested state in r3:
  159. * r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8
  160. * - Requested PSSCR value in POWER9
  161. *
  162. * Address of idle handler to branch to in realmode in r4
  163. */
  164. pnv_powersave_common:
  165. /* Use r3 to pass state nap/sleep/winkle */
  166. /* NAP is a state loss, we create a regs frame on the
  167. * stack, fill it up with the state we care about and
  168. * stick a pointer to it in PACAR1. We really only
  169. * need to save PC, some CR bits and the NV GPRs,
  170. * but for now an interrupt frame will do.
  171. */
  172. mtctr r4
  173. mflr r0
  174. std r0,16(r1)
  175. stdu r1,-INT_FRAME_SIZE(r1)
  176. std r0,_LINK(r1)
  177. std r0,_NIP(r1)
  178. /* We haven't lost state ... yet */
  179. li r0,0
  180. stb r0,PACA_NAPSTATELOST(r13)
  181. /* Continue saving state */
  182. SAVE_GPR(2, r1)
  183. SAVE_NVGPRS(r1)
  184. BEGIN_FTR_SECTION
  185. mfspr r5, SPRN_IAMR
  186. std r5, PNV_POWERSAVE_IAMR(r1)
  187. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
  188. mfcr r5
  189. std r5,_CCR(r1)
  190. std r1,PACAR1(r13)
  191. BEGIN_FTR_SECTION
  192. /*
  193. * POWER9 does not require real mode to stop, and presently does not
  194. * set hwthread_state for KVM (threads don't share MMU context), so
  195. * we can remain in virtual mode for this.
  196. */
  197. bctr
  198. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
  199. /*
  200. * POWER8
  201. * Go to real mode to do the nap, as required by the architecture.
  202. * Also, we need to be in real mode before setting hwthread_state,
  203. * because as soon as we do that, another thread can switch
  204. * the MMU context to the guest.
  205. */
  206. LOAD_REG_IMMEDIATE(r7, MSR_IDLE)
  207. mtmsrd r7,0
  208. bctr
  209. /*
  210. * This is the sequence required to execute idle instructions, as
  211. * specified in ISA v2.07 (and earlier). MSR[IR] and MSR[DR] must be 0.
  212. */
  213. #define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \
  214. /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \
  215. std r0,0(r1); \
  216. ptesync; \
  217. ld r0,0(r1); \
  218. 236: cmpd cr0,r0,r0; \
  219. bne 236b; \
  220. IDLE_INST;
  221. .globl pnv_enter_arch207_idle_mode
  222. pnv_enter_arch207_idle_mode:
  223. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  224. /* Tell KVM we're entering idle */
  225. li r4,KVM_HWTHREAD_IN_IDLE
  226. /******************************************************/
  227. /* N O T E W E L L ! ! ! N O T E W E L L */
  228. /* The following store to HSTATE_HWTHREAD_STATE(r13) */
  229. /* MUST occur in real mode, i.e. with the MMU off, */
  230. /* and the MMU must stay off until we clear this flag */
  231. /* and test HSTATE_HWTHREAD_REQ(r13) in */
  232. /* pnv_powersave_wakeup in this file. */
  233. /* The reason is that another thread can switch the */
  234. /* MMU to a guest context whenever this flag is set */
  235. /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */
  236. /* that would potentially cause this thread to start */
  237. /* executing instructions from guest memory in */
  238. /* hypervisor mode, leading to a host crash or data */
  239. /* corruption, or worse. */
  240. /******************************************************/
  241. stb r4,HSTATE_HWTHREAD_STATE(r13)
  242. #endif
  243. stb r3,PACA_THREAD_IDLE_STATE(r13)
  244. cmpwi cr3,r3,PNV_THREAD_SLEEP
  245. bge cr3,2f
  246. IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP)
  247. /* No return */
  248. 2:
  249. /* Sleep or winkle */
  250. lbz r7,PACA_THREAD_MASK(r13)
  251. ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
  252. li r5,0
  253. beq cr3,3f
  254. lis r5,PNV_CORE_IDLE_WINKLE_COUNT@h
  255. 3:
  256. lwarx_loop1:
  257. lwarx r15,0,r14
  258. andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h
  259. bnel- core_idle_lock_held
  260. add r15,r15,r5 /* Add if winkle */
  261. andc r15,r15,r7 /* Clear thread bit */
  262. andi. r9,r15,PNV_CORE_IDLE_THREAD_BITS
  263. /*
  264. * If cr0 = 0, then current thread is the last thread of the core entering
  265. * sleep. Last thread needs to execute the hardware bug workaround code if
  266. * required by the platform.
  267. * Make the workaround call unconditionally here. The below branch call is
  268. * patched out when the idle states are discovered if the platform does not
  269. * require it.
  270. */
  271. .global pnv_fastsleep_workaround_at_entry
  272. pnv_fastsleep_workaround_at_entry:
  273. beq fastsleep_workaround_at_entry
  274. stwcx. r15,0,r14
  275. bne- lwarx_loop1
  276. isync
  277. common_enter: /* common code for all the threads entering sleep or winkle */
  278. bgt cr3,enter_winkle
  279. IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP)
  280. fastsleep_workaround_at_entry:
  281. oris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
  282. stwcx. r15,0,r14
  283. bne- lwarx_loop1
  284. isync
  285. /* Fast sleep workaround */
  286. li r3,1
  287. li r4,1
  288. bl opal_config_cpu_idle_state
  289. /* Unlock */
  290. xoris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
  291. lwsync
  292. stw r15,0(r14)
  293. b common_enter
  294. enter_winkle:
  295. bl save_sprs_to_stack
  296. IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE)
  297. /*
  298. * r3 - PSSCR value corresponding to the requested stop state.
  299. */
  300. power_enter_stop:
  301. /*
  302. * Check if we are executing the lite variant with ESL=EC=0
  303. */
  304. andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED
  305. clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */
  306. bne .Lhandle_esl_ec_set
  307. PPC_STOP
  308. li r3,0 /* Since we didn't lose state, return 0 */
  309. std r3, PACA_REQ_PSSCR(r13)
  310. /*
  311. * pnv_wakeup_noloss() expects r12 to contain the SRR1 value so
  312. * it can determine if the wakeup reason is an HMI in
  313. * CHECK_HMI_INTERRUPT.
  314. *
  315. * However, when we wakeup with ESL=0, SRR1 will not contain the wakeup
  316. * reason, so there is no point setting r12 to SRR1.
  317. *
  318. * Further, we clear r12 here, so that we don't accidentally enter the
  319. * HMI in pnv_wakeup_noloss() if the value of r12[42:45] == WAKE_HMI.
  320. */
  321. li r12, 0
  322. b pnv_wakeup_noloss
  323. .Lhandle_esl_ec_set:
  324. BEGIN_FTR_SECTION
  325. /*
  326. * POWER9 DD2.0 or earlier can incorrectly set PMAO when waking up after
  327. * a state-loss idle. Saving and restoring MMCR0 over idle is a
  328. * workaround.
  329. */
  330. mfspr r4,SPRN_MMCR0
  331. std r4,_MMCR0(r1)
  332. END_FTR_SECTION_IFCLR(CPU_FTR_POWER9_DD2_1)
  333. /*
  334. * Check if the requested state is a deep idle state.
  335. */
  336. LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
  337. ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
  338. cmpd r3,r4
  339. bge .Lhandle_deep_stop
  340. PPC_STOP /* Does not return (system reset interrupt) */
  341. .Lhandle_deep_stop:
  342. /*
  343. * Entering deep idle state.
  344. * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to
  345. * stack and enter stop
  346. */
  347. lbz r7,PACA_THREAD_MASK(r13)
  348. ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
  349. lwarx_loop_stop:
  350. lwarx r15,0,r14
  351. andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h
  352. bnel- core_idle_lock_held
  353. andc r15,r15,r7 /* Clear thread bit */
  354. stwcx. r15,0,r14
  355. bne- lwarx_loop_stop
  356. isync
  357. bl save_sprs_to_stack
  358. PPC_STOP /* Does not return (system reset interrupt) */
  359. /*
  360. * Entered with MSR[EE]=0 and no soft-masked interrupts pending.
  361. * r3 contains desired idle state (PNV_THREAD_NAP/SLEEP/WINKLE).
  362. */
  363. _GLOBAL(power7_idle_insn)
  364. /* Now check if user or arch enabled NAP mode */
  365. LOAD_REG_ADDR(r4, pnv_enter_arch207_idle_mode)
  366. b pnv_powersave_common
  367. #define CHECK_HMI_INTERRUPT \
  368. BEGIN_FTR_SECTION_NESTED(66); \
  369. rlwinm r0,r12,45-31,0xf; /* extract wake reason field (P8) */ \
  370. FTR_SECTION_ELSE_NESTED(66); \
  371. rlwinm r0,r12,45-31,0xe; /* P7 wake reason field is 3 bits */ \
  372. ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \
  373. cmpwi r0,0xa; /* Hypervisor maintenance ? */ \
  374. bne+ 20f; \
  375. /* Invoke opal call to handle hmi */ \
  376. ld r2,PACATOC(r13); \
  377. ld r1,PACAR1(r13); \
  378. std r3,ORIG_GPR3(r1); /* Save original r3 */ \
  379. li r3,0; /* NULL argument */ \
  380. bl hmi_exception_realmode; \
  381. nop; \
  382. ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \
  383. 20: nop;
  384. /*
  385. * Entered with MSR[EE]=0 and no soft-masked interrupts pending.
  386. * r3 contains desired PSSCR register value.
  387. *
  388. * Offline (CPU unplug) case also must notify KVM that the CPU is
  389. * idle.
  390. */
  391. _GLOBAL(power9_offline_stop)
  392. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  393. /*
  394. * Tell KVM we're entering idle.
  395. * This does not have to be done in real mode because the P9 MMU
  396. * is independent per-thread. Some steppings share radix/hash mode
  397. * between threads, but in that case KVM has a barrier sync in real
  398. * mode before and after switching between radix and hash.
  399. */
  400. li r4,KVM_HWTHREAD_IN_IDLE
  401. stb r4,HSTATE_HWTHREAD_STATE(r13)
  402. #endif
  403. /* fall through */
  404. _GLOBAL(power9_idle_stop)
  405. std r3, PACA_REQ_PSSCR(r13)
  406. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  407. BEGIN_FTR_SECTION
  408. sync
  409. lwz r5, PACA_DONT_STOP(r13)
  410. cmpwi r5, 0
  411. bne 1f
  412. END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
  413. #endif
  414. mtspr SPRN_PSSCR,r3
  415. LOAD_REG_ADDR(r4,power_enter_stop)
  416. b pnv_powersave_common
  417. /* No return */
  418. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  419. 1:
  420. /*
  421. * We get here when TM / thread reconfiguration bug workaround
  422. * code wants to get the CPU into SMT4 mode, and therefore
  423. * we are being asked not to stop.
  424. */
  425. li r3, 0
  426. std r3, PACA_REQ_PSSCR(r13)
  427. blr /* return 0 for wakeup cause / SRR1 value */
  428. #endif
  429. /*
  430. * Called from machine check handler for powersave wakeups.
  431. * Low level machine check processing has already been done. Now just
  432. * go through the wake up path to get everything in order.
  433. *
  434. * r3 - The original SRR1 value.
  435. * Original SRR[01] have been clobbered.
  436. * MSR_RI is clear.
  437. */
  438. .global pnv_powersave_wakeup_mce
  439. pnv_powersave_wakeup_mce:
  440. /* Set cr3 for pnv_powersave_wakeup */
  441. rlwinm r11,r3,47-31,30,31
  442. cmpwi cr3,r11,2
  443. /*
  444. * Now put the original SRR1 with SRR1_WAKEMCE_RESVD as the wake
  445. * reason into r12, which allows reuse of the system reset wakeup
  446. * code without being mistaken for another type of wakeup.
  447. */
  448. oris r12,r3,SRR1_WAKEMCE_RESVD@h
  449. b pnv_powersave_wakeup
  450. /*
  451. * Called from reset vector for powersave wakeups.
  452. * cr3 - set to gt if waking up with partial/complete hypervisor state loss
  453. * r12 - SRR1
  454. */
  455. .global pnv_powersave_wakeup
  456. pnv_powersave_wakeup:
  457. ld r2, PACATOC(r13)
  458. BEGIN_FTR_SECTION
  459. bl pnv_restore_hyp_resource_arch300
  460. FTR_SECTION_ELSE
  461. bl pnv_restore_hyp_resource_arch207
  462. ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
  463. li r0,PNV_THREAD_RUNNING
  464. stb r0,PACA_THREAD_IDLE_STATE(r13) /* Clear thread state */
  465. mr r3,r12
  466. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  467. lbz r0,HSTATE_HWTHREAD_STATE(r13)
  468. cmpwi r0,KVM_HWTHREAD_IN_KERNEL
  469. beq 0f
  470. li r0,KVM_HWTHREAD_IN_KERNEL
  471. stb r0,HSTATE_HWTHREAD_STATE(r13)
  472. /* Order setting hwthread_state vs. testing hwthread_req */
  473. sync
  474. 0: lbz r0,HSTATE_HWTHREAD_REQ(r13)
  475. cmpwi r0,0
  476. beq 1f
  477. b kvm_start_guest
  478. 1:
  479. #endif
  480. /* Return SRR1 from power7_nap() */
  481. blt cr3,pnv_wakeup_noloss
  482. b pnv_wakeup_loss
  483. /*
  484. * Check whether we have woken up with hypervisor state loss.
  485. * If yes, restore hypervisor state and return back to link.
  486. *
  487. * cr3 - set to gt if waking up with partial/complete hypervisor state loss
  488. */
  489. pnv_restore_hyp_resource_arch300:
  490. /*
  491. * Workaround for POWER9, if we lost resources, the ERAT
  492. * might have been mixed up and needs flushing. We also need
  493. * to reload MMCR0 (see comment above). We also need to set
  494. * then clear bit 60 in MMCRA to ensure the PMU starts running.
  495. */
  496. blt cr3,1f
  497. BEGIN_FTR_SECTION
  498. PPC_INVALIDATE_ERAT
  499. ld r1,PACAR1(r13)
  500. ld r4,_MMCR0(r1)
  501. mtspr SPRN_MMCR0,r4
  502. END_FTR_SECTION_IFCLR(CPU_FTR_POWER9_DD2_1)
  503. mfspr r4,SPRN_MMCRA
  504. ori r4,r4,(1 << (63-60))
  505. mtspr SPRN_MMCRA,r4
  506. xori r4,r4,(1 << (63-60))
  507. mtspr SPRN_MMCRA,r4
  508. 1:
  509. /*
  510. * POWER ISA 3. Use PSSCR to determine if we
  511. * are waking up from deep idle state
  512. */
  513. LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
  514. ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
  515. /*
  516. * 0-3 bits correspond to Power-Saving Level Status
  517. * which indicates the idle state we are waking up from
  518. */
  519. mfspr r5, SPRN_PSSCR
  520. rldicl r5,r5,4,60
  521. li r0, 0 /* clear requested_psscr to say we're awake */
  522. std r0, PACA_REQ_PSSCR(r13)
  523. cmpd cr4,r5,r4
  524. bge cr4,pnv_wakeup_tb_loss /* returns to caller */
  525. blr /* Waking up without hypervisor state loss. */
  526. /* Same calling convention as arch300 */
  527. pnv_restore_hyp_resource_arch207:
  528. /*
  529. * POWER ISA 2.07 or less.
  530. * Check if we slept with sleep or winkle.
  531. */
  532. lbz r4,PACA_THREAD_IDLE_STATE(r13)
  533. cmpwi cr2,r4,PNV_THREAD_NAP
  534. bgt cr2,pnv_wakeup_tb_loss /* Either sleep or Winkle */
  535. /*
  536. * We fall through here if PACA_THREAD_IDLE_STATE shows we are waking
  537. * up from nap. At this stage CR3 shouldn't contains 'gt' since that
  538. * indicates we are waking with hypervisor state loss from nap.
  539. */
  540. bgt cr3,.
  541. blr /* Waking up without hypervisor state loss */
  542. /*
  543. * Called if waking up from idle state which can cause either partial or
  544. * complete hyp state loss.
  545. * In POWER8, called if waking up from fastsleep or winkle
  546. * In POWER9, called if waking up from stop state >= pnv_first_deep_stop_state
  547. *
  548. * r13 - PACA
  549. * cr3 - gt if waking up with partial/complete hypervisor state loss
  550. *
  551. * If ISA300:
  552. * cr4 - gt or eq if waking up from complete hypervisor state loss.
  553. *
  554. * If ISA207:
  555. * r4 - PACA_THREAD_IDLE_STATE
  556. */
  557. pnv_wakeup_tb_loss:
  558. ld r1,PACAR1(r13)
  559. /*
  560. * Before entering any idle state, the NVGPRs are saved in the stack.
  561. * If there was a state loss, or PACA_NAPSTATELOST was set, then the
  562. * NVGPRs are restored. If we are here, it is likely that state is lost,
  563. * but not guaranteed -- neither ISA207 nor ISA300 tests to reach
  564. * here are the same as the test to restore NVGPRS:
  565. * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300,
  566. * and SRR1 test for restoring NVGPRs.
  567. *
  568. * We are about to clobber NVGPRs now, so set NAPSTATELOST to
  569. * guarantee they will always be restored. This might be tightened
  570. * with careful reading of specs (particularly for ISA300) but this
  571. * is already a slow wakeup path and it's simpler to be safe.
  572. */
  573. li r0,1
  574. stb r0,PACA_NAPSTATELOST(r13)
  575. /*
  576. *
  577. * Save SRR1 and LR in NVGPRs as they might be clobbered in
  578. * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required
  579. * to determine the wakeup reason if we branch to kvm_start_guest. LR
  580. * is required to return back to reset vector after hypervisor state
  581. * restore is complete.
  582. */
  583. mr r19,r12
  584. mr r18,r4
  585. mflr r17
  586. BEGIN_FTR_SECTION
  587. CHECK_HMI_INTERRUPT
  588. END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
  589. ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
  590. lbz r7,PACA_THREAD_MASK(r13)
  591. /*
  592. * Take the core lock to synchronize against other threads.
  593. *
  594. * Lock bit is set in one of the 2 cases-
  595. * a. In the sleep/winkle enter path, the last thread is executing
  596. * fastsleep workaround code.
  597. * b. In the wake up path, another thread is executing fastsleep
  598. * workaround undo code or resyncing timebase or restoring context
  599. * In either case loop until the lock bit is cleared.
  600. */
  601. 1:
  602. lwarx r15,0,r14
  603. andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h
  604. bnel- core_idle_lock_held
  605. oris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
  606. stwcx. r15,0,r14
  607. bne- 1b
  608. isync
  609. andi. r9,r15,PNV_CORE_IDLE_THREAD_BITS
  610. cmpwi cr2,r9,0
  611. /*
  612. * At this stage
  613. * cr2 - eq if first thread to wakeup in core
  614. * cr3- gt if waking up with partial/complete hypervisor state loss
  615. * ISA300:
  616. * cr4 - gt or eq if waking up from complete hypervisor state loss.
  617. */
  618. BEGIN_FTR_SECTION
  619. /*
  620. * Were we in winkle?
  621. * If yes, check if all threads were in winkle, decrement our
  622. * winkle count, set all thread winkle bits if all were in winkle.
  623. * Check if our thread has a winkle bit set, and set cr4 accordingly
  624. * (to match ISA300, above). Pseudo-code for core idle state
  625. * transitions for ISA207 is as follows (everything happens atomically
  626. * due to store conditional and/or lock bit):
  627. *
  628. * nap_idle() { }
  629. * nap_wake() { }
  630. *
  631. * sleep_idle()
  632. * {
  633. * core_idle_state &= ~thread_in_core
  634. * }
  635. *
  636. * sleep_wake()
  637. * {
  638. * bool first_in_core, first_in_subcore;
  639. *
  640. * first_in_core = (core_idle_state & IDLE_THREAD_BITS) == 0;
  641. * first_in_subcore = (core_idle_state & SUBCORE_SIBLING_MASK) == 0;
  642. *
  643. * core_idle_state |= thread_in_core;
  644. * }
  645. *
  646. * winkle_idle()
  647. * {
  648. * core_idle_state &= ~thread_in_core;
  649. * core_idle_state += 1 << WINKLE_COUNT_SHIFT;
  650. * }
  651. *
  652. * winkle_wake()
  653. * {
  654. * bool first_in_core, first_in_subcore, winkle_state_lost;
  655. *
  656. * first_in_core = (core_idle_state & IDLE_THREAD_BITS) == 0;
  657. * first_in_subcore = (core_idle_state & SUBCORE_SIBLING_MASK) == 0;
  658. *
  659. * core_idle_state |= thread_in_core;
  660. *
  661. * if ((core_idle_state & WINKLE_MASK) == (8 << WINKLE_COUNT_SIHFT))
  662. * core_idle_state |= THREAD_WINKLE_BITS;
  663. * core_idle_state -= 1 << WINKLE_COUNT_SHIFT;
  664. *
  665. * winkle_state_lost = core_idle_state &
  666. * (thread_in_core << WINKLE_THREAD_SHIFT);
  667. * core_idle_state &= ~(thread_in_core << WINKLE_THREAD_SHIFT);
  668. * }
  669. *
  670. */
  671. cmpwi r18,PNV_THREAD_WINKLE
  672. bne 2f
  673. andis. r9,r15,PNV_CORE_IDLE_WINKLE_COUNT_ALL_BIT@h
  674. subis r15,r15,PNV_CORE_IDLE_WINKLE_COUNT@h
  675. beq 2f
  676. ori r15,r15,PNV_CORE_IDLE_THREAD_WINKLE_BITS /* all were winkle */
  677. 2:
  678. /* Shift thread bit to winkle mask, then test if this thread is set,
  679. * and remove it from the winkle bits */
  680. slwi r8,r7,8
  681. and r8,r8,r15
  682. andc r15,r15,r8
  683. cmpwi cr4,r8,1 /* cr4 will be gt if our bit is set, lt if not */
  684. lbz r4,PACA_SUBCORE_SIBLING_MASK(r13)
  685. and r4,r4,r15
  686. cmpwi r4,0 /* Check if first in subcore */
  687. or r15,r15,r7 /* Set thread bit */
  688. beq first_thread_in_subcore
  689. END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
  690. or r15,r15,r7 /* Set thread bit */
  691. beq cr2,first_thread_in_core
  692. /* Not first thread in core or subcore to wake up */
  693. b clear_lock
  694. first_thread_in_subcore:
  695. /*
  696. * If waking up from sleep, subcore state is not lost. Hence
  697. * skip subcore state restore
  698. */
  699. blt cr4,subcore_state_restored
  700. /* Restore per-subcore state */
  701. ld r4,_SDR1(r1)
  702. mtspr SPRN_SDR1,r4
  703. ld r4,_RPR(r1)
  704. mtspr SPRN_RPR,r4
  705. ld r4,_AMOR(r1)
  706. mtspr SPRN_AMOR,r4
  707. subcore_state_restored:
  708. /*
  709. * Check if the thread is also the first thread in the core. If not,
  710. * skip to clear_lock.
  711. */
  712. bne cr2,clear_lock
  713. first_thread_in_core:
  714. /*
  715. * First thread in the core waking up from any state which can cause
  716. * partial or complete hypervisor state loss. It needs to
  717. * call the fastsleep workaround code if the platform requires it.
  718. * Call it unconditionally here. The below branch instruction will
  719. * be patched out if the platform does not have fastsleep or does not
  720. * require the workaround. Patching will be performed during the
  721. * discovery of idle-states.
  722. */
  723. .global pnv_fastsleep_workaround_at_exit
  724. pnv_fastsleep_workaround_at_exit:
  725. b fastsleep_workaround_at_exit
  726. timebase_resync:
  727. /*
  728. * Use cr3 which indicates that we are waking up with atleast partial
  729. * hypervisor state loss to determine if TIMEBASE RESYNC is needed.
  730. */
  731. ble cr3,.Ltb_resynced
  732. /* Time base re-sync */
  733. bl opal_resync_timebase;
  734. /*
  735. * If waking up from sleep (POWER8), per core state
  736. * is not lost, skip to clear_lock.
  737. */
  738. .Ltb_resynced:
  739. blt cr4,clear_lock
  740. /*
  741. * First thread in the core to wake up and its waking up with
  742. * complete hypervisor state loss. Restore per core hypervisor
  743. * state.
  744. */
  745. BEGIN_FTR_SECTION
  746. ld r4,_PTCR(r1)
  747. mtspr SPRN_PTCR,r4
  748. ld r4,_RPR(r1)
  749. mtspr SPRN_RPR,r4
  750. ld r4,_AMOR(r1)
  751. mtspr SPRN_AMOR,r4
  752. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
  753. ld r4,_TSCR(r1)
  754. mtspr SPRN_TSCR,r4
  755. ld r4,_WORC(r1)
  756. mtspr SPRN_WORC,r4
  757. clear_lock:
  758. xoris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
  759. lwsync
  760. stw r15,0(r14)
  761. common_exit:
  762. /*
  763. * Common to all threads.
  764. *
  765. * If waking up from sleep, hypervisor state is not lost. Hence
  766. * skip hypervisor state restore.
  767. */
  768. blt cr4,hypervisor_state_restored
  769. /* Waking up from winkle */
  770. BEGIN_MMU_FTR_SECTION
  771. b no_segments
  772. END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
  773. /* Restore SLB from PACA */
  774. ld r8,PACA_SLBSHADOWPTR(r13)
  775. .rept SLB_NUM_BOLTED
  776. li r3, SLBSHADOW_SAVEAREA
  777. LDX_BE r5, r8, r3
  778. addi r3, r3, 8
  779. LDX_BE r6, r8, r3
  780. andis. r7,r5,SLB_ESID_V@h
  781. beq 1f
  782. slbmte r6,r5
  783. 1: addi r8,r8,16
  784. .endr
  785. no_segments:
  786. /* Restore per thread state */
  787. ld r4,_SPURR(r1)
  788. mtspr SPRN_SPURR,r4
  789. ld r4,_PURR(r1)
  790. mtspr SPRN_PURR,r4
  791. ld r4,_DSCR(r1)
  792. mtspr SPRN_DSCR,r4
  793. ld r4,_WORT(r1)
  794. mtspr SPRN_WORT,r4
  795. /* Call cur_cpu_spec->cpu_restore() */
  796. LOAD_REG_ADDR(r4, cur_cpu_spec)
  797. ld r4,0(r4)
  798. ld r12,CPU_SPEC_RESTORE(r4)
  799. #ifdef PPC64_ELF_ABI_v1
  800. ld r12,0(r12)
  801. #endif
  802. mtctr r12
  803. bctrl
  804. /*
  805. * On POWER9, we can come here on wakeup from a cpuidle stop state.
  806. * Hence restore the additional SPRs to the saved value.
  807. *
  808. * On POWER8, we come here only on winkle. Since winkle is used
  809. * only in the case of CPU-Hotplug, we don't need to restore
  810. * the additional SPRs.
  811. */
  812. BEGIN_FTR_SECTION
  813. bl power9_restore_additional_sprs
  814. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
  815. hypervisor_state_restored:
  816. mr r12,r19
  817. mtlr r17
  818. blr /* return to pnv_powersave_wakeup */
  819. fastsleep_workaround_at_exit:
  820. li r3,1
  821. li r4,0
  822. bl opal_config_cpu_idle_state
  823. b timebase_resync
  824. /*
  825. * R3 here contains the value that will be returned to the caller
  826. * of power7_nap.
  827. * R12 contains SRR1 for CHECK_HMI_INTERRUPT.
  828. */
  829. .global pnv_wakeup_loss
  830. pnv_wakeup_loss:
  831. ld r1,PACAR1(r13)
  832. BEGIN_FTR_SECTION
  833. CHECK_HMI_INTERRUPT
  834. END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
  835. REST_NVGPRS(r1)
  836. REST_GPR(2, r1)
  837. BEGIN_FTR_SECTION
  838. /* IAMR was saved in pnv_powersave_common() */
  839. ld r5, PNV_POWERSAVE_IAMR(r1)
  840. mtspr SPRN_IAMR, r5
  841. /*
  842. * We don't need an isync here because the upcoming mtmsrd is
  843. * execution synchronizing.
  844. */
  845. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
  846. ld r4,PACAKMSR(r13)
  847. ld r5,_LINK(r1)
  848. ld r6,_CCR(r1)
  849. addi r1,r1,INT_FRAME_SIZE
  850. mtlr r5
  851. mtcr r6
  852. mtmsrd r4
  853. blr
  854. /*
  855. * R3 here contains the value that will be returned to the caller
  856. * of power7_nap.
  857. * R12 contains SRR1 for CHECK_HMI_INTERRUPT.
  858. */
  859. pnv_wakeup_noloss:
  860. lbz r0,PACA_NAPSTATELOST(r13)
  861. cmpwi r0,0
  862. bne pnv_wakeup_loss
  863. ld r1,PACAR1(r13)
  864. BEGIN_FTR_SECTION
  865. CHECK_HMI_INTERRUPT
  866. END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
  867. ld r4,PACAKMSR(r13)
  868. ld r5,_NIP(r1)
  869. ld r6,_CCR(r1)
  870. addi r1,r1,INT_FRAME_SIZE
  871. mtlr r5
  872. mtcr r6
  873. mtmsrd r4
  874. blr