idle_book3s.S 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956
  1. /*
  2. * This file contains idle entry/exit functions for POWER7,
  3. * POWER8 and POWER9 CPUs.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; either version
  8. * 2 of the License, or (at your option) any later version.
  9. */
  10. #include <linux/threads.h>
  11. #include <asm/processor.h>
  12. #include <asm/page.h>
  13. #include <asm/cputable.h>
  14. #include <asm/thread_info.h>
  15. #include <asm/ppc_asm.h>
  16. #include <asm/asm-offsets.h>
  17. #include <asm/ppc-opcode.h>
  18. #include <asm/hw_irq.h>
  19. #include <asm/kvm_book3s_asm.h>
  20. #include <asm/opal.h>
  21. #include <asm/cpuidle.h>
  22. #include <asm/exception-64s.h>
  23. #include <asm/book3s/64/mmu-hash.h>
  24. #include <asm/mmu.h>
  25. #include <asm/asm-compat.h>
  26. #include <asm/feature-fixups.h>
  27. #undef DEBUG
  28. /*
  29. * Use unused space in the interrupt stack to save and restore
  30. * registers for winkle support.
  31. */
  32. #define _MMCR0 GPR0
  33. #define _SDR1 GPR3
  34. #define _PTCR GPR3
  35. #define _RPR GPR4
  36. #define _SPURR GPR5
  37. #define _PURR GPR6
  38. #define _TSCR GPR7
  39. #define _DSCR GPR8
  40. #define _AMOR GPR9
  41. #define _WORT GPR10
  42. #define _WORC GPR11
  43. #define _LPCR GPR12
  44. #define PSSCR_EC_ESL_MASK_SHIFTED (PSSCR_EC | PSSCR_ESL) >> 16
  45. .text
  46. /*
  47. * Used by threads before entering deep idle states. Saves SPRs
  48. * in interrupt stack frame
  49. */
  50. save_sprs_to_stack:
  51. /*
  52. * Note all register i.e per-core, per-subcore or per-thread is saved
  53. * here since any thread in the core might wake up first
  54. */
  55. BEGIN_FTR_SECTION
  56. /*
  57. * Note - SDR1 is dropped in Power ISA v3. Hence not restoring
  58. * SDR1 here
  59. */
  60. mfspr r3,SPRN_PTCR
  61. std r3,_PTCR(r1)
  62. mfspr r3,SPRN_LPCR
  63. std r3,_LPCR(r1)
  64. FTR_SECTION_ELSE
  65. mfspr r3,SPRN_SDR1
  66. std r3,_SDR1(r1)
  67. ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
  68. mfspr r3,SPRN_RPR
  69. std r3,_RPR(r1)
  70. mfspr r3,SPRN_SPURR
  71. std r3,_SPURR(r1)
  72. mfspr r3,SPRN_PURR
  73. std r3,_PURR(r1)
  74. mfspr r3,SPRN_TSCR
  75. std r3,_TSCR(r1)
  76. mfspr r3,SPRN_DSCR
  77. std r3,_DSCR(r1)
  78. mfspr r3,SPRN_AMOR
  79. std r3,_AMOR(r1)
  80. mfspr r3,SPRN_WORT
  81. std r3,_WORT(r1)
  82. mfspr r3,SPRN_WORC
  83. std r3,_WORC(r1)
  84. /*
  85. * On POWER9, there are idle states such as stop4, invoked via cpuidle,
  86. * that lose hypervisor resources. In such cases, we need to save
  87. * additional SPRs before entering those idle states so that they can
  88. * be restored to their older values on wakeup from the idle state.
  89. *
  90. * On POWER8, the only such deep idle state is winkle which is used
  91. * only in the context of CPU-Hotplug, where these additional SPRs are
  92. * reinitiazed to a sane value. Hence there is no need to save/restore
  93. * these SPRs.
  94. */
  95. BEGIN_FTR_SECTION
  96. blr
  97. END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
  98. power9_save_additional_sprs:
  99. mfspr r3, SPRN_PID
  100. mfspr r4, SPRN_LDBAR
  101. std r3, STOP_PID(r13)
  102. std r4, STOP_LDBAR(r13)
  103. mfspr r3, SPRN_FSCR
  104. mfspr r4, SPRN_HFSCR
  105. std r3, STOP_FSCR(r13)
  106. std r4, STOP_HFSCR(r13)
  107. mfspr r3, SPRN_MMCRA
  108. mfspr r4, SPRN_MMCR0
  109. std r3, STOP_MMCRA(r13)
  110. std r4, _MMCR0(r1)
  111. mfspr r3, SPRN_MMCR1
  112. mfspr r4, SPRN_MMCR2
  113. std r3, STOP_MMCR1(r13)
  114. std r4, STOP_MMCR2(r13)
  115. blr
  116. power9_restore_additional_sprs:
  117. ld r3,_LPCR(r1)
  118. ld r4, STOP_PID(r13)
  119. mtspr SPRN_LPCR,r3
  120. mtspr SPRN_PID, r4
  121. ld r3, STOP_LDBAR(r13)
  122. ld r4, STOP_FSCR(r13)
  123. mtspr SPRN_LDBAR, r3
  124. mtspr SPRN_FSCR, r4
  125. ld r3, STOP_HFSCR(r13)
  126. ld r4, STOP_MMCRA(r13)
  127. mtspr SPRN_HFSCR, r3
  128. mtspr SPRN_MMCRA, r4
  129. ld r3, _MMCR0(r1)
  130. ld r4, STOP_MMCR1(r13)
  131. mtspr SPRN_MMCR0, r3
  132. mtspr SPRN_MMCR1, r4
  133. ld r3, STOP_MMCR2(r13)
  134. ld r4, PACA_SPRG_VDSO(r13)
  135. mtspr SPRN_MMCR2, r3
  136. mtspr SPRN_SPRG3, r4
  137. blr
  138. /*
  139. * Used by threads when the lock bit of core_idle_state is set.
  140. * Threads will spin in HMT_LOW until the lock bit is cleared.
  141. * r14 - pointer to core_idle_state
  142. * r15 - used to load contents of core_idle_state
  143. * r9 - used as a temporary variable
  144. */
  145. core_idle_lock_held:
  146. HMT_LOW
  147. 3: lwz r15,0(r14)
  148. andis. r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
  149. bne 3b
  150. HMT_MEDIUM
  151. lwarx r15,0,r14
  152. andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h
  153. bne- core_idle_lock_held
  154. blr
  155. /*
  156. * Pass requested state in r3:
  157. * r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8
  158. * - Requested PSSCR value in POWER9
  159. *
  160. * Address of idle handler to branch to in realmode in r4
  161. */
  162. pnv_powersave_common:
  163. /* Use r3 to pass state nap/sleep/winkle */
  164. /* NAP is a state loss, we create a regs frame on the
  165. * stack, fill it up with the state we care about and
  166. * stick a pointer to it in PACAR1. We really only
  167. * need to save PC, some CR bits and the NV GPRs,
  168. * but for now an interrupt frame will do.
  169. */
  170. mtctr r4
  171. mflr r0
  172. std r0,16(r1)
  173. stdu r1,-INT_FRAME_SIZE(r1)
  174. std r0,_LINK(r1)
  175. std r0,_NIP(r1)
  176. /* We haven't lost state ... yet */
  177. li r0,0
  178. stb r0,PACA_NAPSTATELOST(r13)
  179. /* Continue saving state */
  180. SAVE_GPR(2, r1)
  181. SAVE_NVGPRS(r1)
  182. mfcr r5
  183. std r5,_CCR(r1)
  184. std r1,PACAR1(r13)
  185. BEGIN_FTR_SECTION
  186. /*
  187. * POWER9 does not require real mode to stop, and presently does not
  188. * set hwthread_state for KVM (threads don't share MMU context), so
  189. * we can remain in virtual mode for this.
  190. */
  191. bctr
  192. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
  193. /*
  194. * POWER8
  195. * Go to real mode to do the nap, as required by the architecture.
  196. * Also, we need to be in real mode before setting hwthread_state,
  197. * because as soon as we do that, another thread can switch
  198. * the MMU context to the guest.
  199. */
  200. LOAD_REG_IMMEDIATE(r7, MSR_IDLE)
  201. mtmsrd r7,0
  202. bctr
  203. /*
  204. * This is the sequence required to execute idle instructions, as
  205. * specified in ISA v2.07 (and earlier). MSR[IR] and MSR[DR] must be 0.
  206. */
  207. #define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \
  208. /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \
  209. std r0,0(r1); \
  210. ptesync; \
  211. ld r0,0(r1); \
  212. 236: cmpd cr0,r0,r0; \
  213. bne 236b; \
  214. IDLE_INST;
  215. .globl pnv_enter_arch207_idle_mode
  216. pnv_enter_arch207_idle_mode:
  217. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  218. /* Tell KVM we're entering idle */
  219. li r4,KVM_HWTHREAD_IN_IDLE
  220. /******************************************************/
  221. /* N O T E W E L L ! ! ! N O T E W E L L */
  222. /* The following store to HSTATE_HWTHREAD_STATE(r13) */
  223. /* MUST occur in real mode, i.e. with the MMU off, */
  224. /* and the MMU must stay off until we clear this flag */
  225. /* and test HSTATE_HWTHREAD_REQ(r13) in */
  226. /* pnv_powersave_wakeup in this file. */
  227. /* The reason is that another thread can switch the */
  228. /* MMU to a guest context whenever this flag is set */
  229. /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */
  230. /* that would potentially cause this thread to start */
  231. /* executing instructions from guest memory in */
  232. /* hypervisor mode, leading to a host crash or data */
  233. /* corruption, or worse. */
  234. /******************************************************/
  235. stb r4,HSTATE_HWTHREAD_STATE(r13)
  236. #endif
  237. stb r3,PACA_THREAD_IDLE_STATE(r13)
  238. cmpwi cr3,r3,PNV_THREAD_SLEEP
  239. bge cr3,2f
  240. IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP)
  241. /* No return */
  242. 2:
  243. /* Sleep or winkle */
  244. lbz r7,PACA_THREAD_MASK(r13)
  245. ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
  246. li r5,0
  247. beq cr3,3f
  248. lis r5,PNV_CORE_IDLE_WINKLE_COUNT@h
  249. 3:
  250. lwarx_loop1:
  251. lwarx r15,0,r14
  252. andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h
  253. bnel- core_idle_lock_held
  254. add r15,r15,r5 /* Add if winkle */
  255. andc r15,r15,r7 /* Clear thread bit */
  256. andi. r9,r15,PNV_CORE_IDLE_THREAD_BITS
  257. /*
  258. * If cr0 = 0, then current thread is the last thread of the core entering
  259. * sleep. Last thread needs to execute the hardware bug workaround code if
  260. * required by the platform.
  261. * Make the workaround call unconditionally here. The below branch call is
  262. * patched out when the idle states are discovered if the platform does not
  263. * require it.
  264. */
  265. .global pnv_fastsleep_workaround_at_entry
  266. pnv_fastsleep_workaround_at_entry:
  267. beq fastsleep_workaround_at_entry
  268. stwcx. r15,0,r14
  269. bne- lwarx_loop1
  270. isync
  271. common_enter: /* common code for all the threads entering sleep or winkle */
  272. bgt cr3,enter_winkle
  273. IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP)
  274. fastsleep_workaround_at_entry:
  275. oris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
  276. stwcx. r15,0,r14
  277. bne- lwarx_loop1
  278. isync
  279. /* Fast sleep workaround */
  280. li r3,1
  281. li r4,1
  282. bl opal_config_cpu_idle_state
  283. /* Unlock */
  284. xoris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
  285. lwsync
  286. stw r15,0(r14)
  287. b common_enter
  288. enter_winkle:
  289. bl save_sprs_to_stack
  290. IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE)
  291. /*
  292. * r3 - PSSCR value corresponding to the requested stop state.
  293. */
  294. power_enter_stop:
  295. /*
  296. * Check if we are executing the lite variant with ESL=EC=0
  297. */
  298. andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED
  299. clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */
  300. bne .Lhandle_esl_ec_set
  301. PPC_STOP
  302. li r3,0 /* Since we didn't lose state, return 0 */
  303. std r3, PACA_REQ_PSSCR(r13)
  304. /*
  305. * pnv_wakeup_noloss() expects r12 to contain the SRR1 value so
  306. * it can determine if the wakeup reason is an HMI in
  307. * CHECK_HMI_INTERRUPT.
  308. *
  309. * However, when we wakeup with ESL=0, SRR1 will not contain the wakeup
  310. * reason, so there is no point setting r12 to SRR1.
  311. *
  312. * Further, we clear r12 here, so that we don't accidentally enter the
  313. * HMI in pnv_wakeup_noloss() if the value of r12[42:45] == WAKE_HMI.
  314. */
  315. li r12, 0
  316. b pnv_wakeup_noloss
  317. .Lhandle_esl_ec_set:
  318. BEGIN_FTR_SECTION
  319. /*
  320. * POWER9 DD2.0 or earlier can incorrectly set PMAO when waking up after
  321. * a state-loss idle. Saving and restoring MMCR0 over idle is a
  322. * workaround.
  323. */
  324. mfspr r4,SPRN_MMCR0
  325. std r4,_MMCR0(r1)
  326. END_FTR_SECTION_IFCLR(CPU_FTR_POWER9_DD2_1)
  327. /*
  328. * Check if the requested state is a deep idle state.
  329. */
  330. LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
  331. ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
  332. cmpd r3,r4
  333. bge .Lhandle_deep_stop
  334. PPC_STOP /* Does not return (system reset interrupt) */
  335. .Lhandle_deep_stop:
  336. /*
  337. * Entering deep idle state.
  338. * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to
  339. * stack and enter stop
  340. */
  341. lbz r7,PACA_THREAD_MASK(r13)
  342. ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
  343. lwarx_loop_stop:
  344. lwarx r15,0,r14
  345. andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h
  346. bnel- core_idle_lock_held
  347. andc r15,r15,r7 /* Clear thread bit */
  348. stwcx. r15,0,r14
  349. bne- lwarx_loop_stop
  350. isync
  351. bl save_sprs_to_stack
  352. PPC_STOP /* Does not return (system reset interrupt) */
  353. /*
  354. * Entered with MSR[EE]=0 and no soft-masked interrupts pending.
  355. * r3 contains desired idle state (PNV_THREAD_NAP/SLEEP/WINKLE).
  356. */
  357. _GLOBAL(power7_idle_insn)
  358. /* Now check if user or arch enabled NAP mode */
  359. LOAD_REG_ADDR(r4, pnv_enter_arch207_idle_mode)
  360. b pnv_powersave_common
  361. #define CHECK_HMI_INTERRUPT \
  362. BEGIN_FTR_SECTION_NESTED(66); \
  363. rlwinm r0,r12,45-31,0xf; /* extract wake reason field (P8) */ \
  364. FTR_SECTION_ELSE_NESTED(66); \
  365. rlwinm r0,r12,45-31,0xe; /* P7 wake reason field is 3 bits */ \
  366. ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \
  367. cmpwi r0,0xa; /* Hypervisor maintenance ? */ \
  368. bne+ 20f; \
  369. /* Invoke opal call to handle hmi */ \
  370. ld r2,PACATOC(r13); \
  371. ld r1,PACAR1(r13); \
  372. std r3,ORIG_GPR3(r1); /* Save original r3 */ \
  373. li r3,0; /* NULL argument */ \
  374. bl hmi_exception_realmode; \
  375. nop; \
  376. ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \
  377. 20: nop;
  378. /*
  379. * Entered with MSR[EE]=0 and no soft-masked interrupts pending.
  380. * r3 contains desired PSSCR register value.
  381. *
  382. * Offline (CPU unplug) case also must notify KVM that the CPU is
  383. * idle.
  384. */
  385. _GLOBAL(power9_offline_stop)
  386. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  387. /*
  388. * Tell KVM we're entering idle.
  389. * This does not have to be done in real mode because the P9 MMU
  390. * is independent per-thread. Some steppings share radix/hash mode
  391. * between threads, but in that case KVM has a barrier sync in real
  392. * mode before and after switching between radix and hash.
  393. */
  394. li r4,KVM_HWTHREAD_IN_IDLE
  395. stb r4,HSTATE_HWTHREAD_STATE(r13)
  396. #endif
  397. /* fall through */
  398. _GLOBAL(power9_idle_stop)
  399. std r3, PACA_REQ_PSSCR(r13)
  400. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  401. BEGIN_FTR_SECTION
  402. sync
  403. lwz r5, PACA_DONT_STOP(r13)
  404. cmpwi r5, 0
  405. bne 1f
  406. END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
  407. #endif
  408. mtspr SPRN_PSSCR,r3
  409. LOAD_REG_ADDR(r4,power_enter_stop)
  410. b pnv_powersave_common
  411. /* No return */
  412. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  413. 1:
  414. /*
  415. * We get here when TM / thread reconfiguration bug workaround
  416. * code wants to get the CPU into SMT4 mode, and therefore
  417. * we are being asked not to stop.
  418. */
  419. li r3, 0
  420. std r3, PACA_REQ_PSSCR(r13)
  421. blr /* return 0 for wakeup cause / SRR1 value */
  422. #endif
  423. /*
  424. * Called from machine check handler for powersave wakeups.
  425. * Low level machine check processing has already been done. Now just
  426. * go through the wake up path to get everything in order.
  427. *
  428. * r3 - The original SRR1 value.
  429. * Original SRR[01] have been clobbered.
  430. * MSR_RI is clear.
  431. */
  432. .global pnv_powersave_wakeup_mce
  433. pnv_powersave_wakeup_mce:
  434. /* Set cr3 for pnv_powersave_wakeup */
  435. rlwinm r11,r3,47-31,30,31
  436. cmpwi cr3,r11,2
  437. /*
  438. * Now put the original SRR1 with SRR1_WAKEMCE_RESVD as the wake
  439. * reason into r12, which allows reuse of the system reset wakeup
  440. * code without being mistaken for another type of wakeup.
  441. */
  442. oris r12,r3,SRR1_WAKEMCE_RESVD@h
  443. b pnv_powersave_wakeup
  444. /*
  445. * Called from reset vector for powersave wakeups.
  446. * cr3 - set to gt if waking up with partial/complete hypervisor state loss
  447. * r12 - SRR1
  448. */
  449. .global pnv_powersave_wakeup
  450. pnv_powersave_wakeup:
  451. ld r2, PACATOC(r13)
  452. BEGIN_FTR_SECTION
  453. bl pnv_restore_hyp_resource_arch300
  454. FTR_SECTION_ELSE
  455. bl pnv_restore_hyp_resource_arch207
  456. ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
  457. li r0,PNV_THREAD_RUNNING
  458. stb r0,PACA_THREAD_IDLE_STATE(r13) /* Clear thread state */
  459. mr r3,r12
  460. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  461. lbz r0,HSTATE_HWTHREAD_STATE(r13)
  462. cmpwi r0,KVM_HWTHREAD_IN_KERNEL
  463. beq 0f
  464. li r0,KVM_HWTHREAD_IN_KERNEL
  465. stb r0,HSTATE_HWTHREAD_STATE(r13)
  466. /* Order setting hwthread_state vs. testing hwthread_req */
  467. sync
  468. 0: lbz r0,HSTATE_HWTHREAD_REQ(r13)
  469. cmpwi r0,0
  470. beq 1f
  471. b kvm_start_guest
  472. 1:
  473. #endif
  474. /* Return SRR1 from power7_nap() */
  475. blt cr3,pnv_wakeup_noloss
  476. b pnv_wakeup_loss
  477. /*
  478. * Check whether we have woken up with hypervisor state loss.
  479. * If yes, restore hypervisor state and return back to link.
  480. *
  481. * cr3 - set to gt if waking up with partial/complete hypervisor state loss
  482. */
  483. pnv_restore_hyp_resource_arch300:
  484. /*
  485. * Workaround for POWER9, if we lost resources, the ERAT
  486. * might have been mixed up and needs flushing. We also need
  487. * to reload MMCR0 (see comment above). We also need to set
  488. * then clear bit 60 in MMCRA to ensure the PMU starts running.
  489. */
  490. blt cr3,1f
  491. BEGIN_FTR_SECTION
  492. PPC_INVALIDATE_ERAT
  493. ld r1,PACAR1(r13)
  494. ld r4,_MMCR0(r1)
  495. mtspr SPRN_MMCR0,r4
  496. END_FTR_SECTION_IFCLR(CPU_FTR_POWER9_DD2_1)
  497. mfspr r4,SPRN_MMCRA
  498. ori r4,r4,(1 << (63-60))
  499. mtspr SPRN_MMCRA,r4
  500. xori r4,r4,(1 << (63-60))
  501. mtspr SPRN_MMCRA,r4
  502. 1:
  503. /*
  504. * POWER ISA 3. Use PSSCR to determine if we
  505. * are waking up from deep idle state
  506. */
  507. LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
  508. ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
  509. /*
  510. * 0-3 bits correspond to Power-Saving Level Status
  511. * which indicates the idle state we are waking up from
  512. */
  513. mfspr r5, SPRN_PSSCR
  514. rldicl r5,r5,4,60
  515. li r0, 0 /* clear requested_psscr to say we're awake */
  516. std r0, PACA_REQ_PSSCR(r13)
  517. cmpd cr4,r5,r4
  518. bge cr4,pnv_wakeup_tb_loss /* returns to caller */
  519. blr /* Waking up without hypervisor state loss. */
  520. /* Same calling convention as arch300 */
  521. pnv_restore_hyp_resource_arch207:
  522. /*
  523. * POWER ISA 2.07 or less.
  524. * Check if we slept with sleep or winkle.
  525. */
  526. lbz r4,PACA_THREAD_IDLE_STATE(r13)
  527. cmpwi cr2,r4,PNV_THREAD_NAP
  528. bgt cr2,pnv_wakeup_tb_loss /* Either sleep or Winkle */
  529. /*
  530. * We fall through here if PACA_THREAD_IDLE_STATE shows we are waking
  531. * up from nap. At this stage CR3 shouldn't contains 'gt' since that
  532. * indicates we are waking with hypervisor state loss from nap.
  533. */
  534. bgt cr3,.
  535. blr /* Waking up without hypervisor state loss */
  536. /*
  537. * Called if waking up from idle state which can cause either partial or
  538. * complete hyp state loss.
  539. * In POWER8, called if waking up from fastsleep or winkle
  540. * In POWER9, called if waking up from stop state >= pnv_first_deep_stop_state
  541. *
  542. * r13 - PACA
  543. * cr3 - gt if waking up with partial/complete hypervisor state loss
  544. *
  545. * If ISA300:
  546. * cr4 - gt or eq if waking up from complete hypervisor state loss.
  547. *
  548. * If ISA207:
  549. * r4 - PACA_THREAD_IDLE_STATE
  550. */
  551. pnv_wakeup_tb_loss:
  552. ld r1,PACAR1(r13)
  553. /*
  554. * Before entering any idle state, the NVGPRs are saved in the stack.
  555. * If there was a state loss, or PACA_NAPSTATELOST was set, then the
  556. * NVGPRs are restored. If we are here, it is likely that state is lost,
  557. * but not guaranteed -- neither ISA207 nor ISA300 tests to reach
  558. * here are the same as the test to restore NVGPRS:
  559. * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300,
  560. * and SRR1 test for restoring NVGPRs.
  561. *
  562. * We are about to clobber NVGPRs now, so set NAPSTATELOST to
  563. * guarantee they will always be restored. This might be tightened
  564. * with careful reading of specs (particularly for ISA300) but this
  565. * is already a slow wakeup path and it's simpler to be safe.
  566. */
  567. li r0,1
  568. stb r0,PACA_NAPSTATELOST(r13)
  569. /*
  570. *
  571. * Save SRR1 and LR in NVGPRs as they might be clobbered in
  572. * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required
  573. * to determine the wakeup reason if we branch to kvm_start_guest. LR
  574. * is required to return back to reset vector after hypervisor state
  575. * restore is complete.
  576. */
  577. mr r19,r12
  578. mr r18,r4
  579. mflr r17
  580. BEGIN_FTR_SECTION
  581. CHECK_HMI_INTERRUPT
  582. END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
  583. ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
  584. lbz r7,PACA_THREAD_MASK(r13)
  585. /*
  586. * Take the core lock to synchronize against other threads.
  587. *
  588. * Lock bit is set in one of the 2 cases-
  589. * a. In the sleep/winkle enter path, the last thread is executing
  590. * fastsleep workaround code.
  591. * b. In the wake up path, another thread is executing fastsleep
  592. * workaround undo code or resyncing timebase or restoring context
  593. * In either case loop until the lock bit is cleared.
  594. */
  595. 1:
  596. lwarx r15,0,r14
  597. andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h
  598. bnel- core_idle_lock_held
  599. oris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
  600. stwcx. r15,0,r14
  601. bne- 1b
  602. isync
  603. andi. r9,r15,PNV_CORE_IDLE_THREAD_BITS
  604. cmpwi cr2,r9,0
  605. /*
  606. * At this stage
  607. * cr2 - eq if first thread to wakeup in core
  608. * cr3- gt if waking up with partial/complete hypervisor state loss
  609. * ISA300:
  610. * cr4 - gt or eq if waking up from complete hypervisor state loss.
  611. */
  612. BEGIN_FTR_SECTION
  613. /*
  614. * Were we in winkle?
  615. * If yes, check if all threads were in winkle, decrement our
  616. * winkle count, set all thread winkle bits if all were in winkle.
  617. * Check if our thread has a winkle bit set, and set cr4 accordingly
  618. * (to match ISA300, above). Pseudo-code for core idle state
  619. * transitions for ISA207 is as follows (everything happens atomically
  620. * due to store conditional and/or lock bit):
  621. *
  622. * nap_idle() { }
  623. * nap_wake() { }
  624. *
  625. * sleep_idle()
  626. * {
  627. * core_idle_state &= ~thread_in_core
  628. * }
  629. *
  630. * sleep_wake()
  631. * {
  632. * bool first_in_core, first_in_subcore;
  633. *
  634. * first_in_core = (core_idle_state & IDLE_THREAD_BITS) == 0;
  635. * first_in_subcore = (core_idle_state & SUBCORE_SIBLING_MASK) == 0;
  636. *
  637. * core_idle_state |= thread_in_core;
  638. * }
  639. *
  640. * winkle_idle()
  641. * {
  642. * core_idle_state &= ~thread_in_core;
  643. * core_idle_state += 1 << WINKLE_COUNT_SHIFT;
  644. * }
  645. *
  646. * winkle_wake()
  647. * {
  648. * bool first_in_core, first_in_subcore, winkle_state_lost;
  649. *
  650. * first_in_core = (core_idle_state & IDLE_THREAD_BITS) == 0;
  651. * first_in_subcore = (core_idle_state & SUBCORE_SIBLING_MASK) == 0;
  652. *
  653. * core_idle_state |= thread_in_core;
  654. *
  655. * if ((core_idle_state & WINKLE_MASK) == (8 << WINKLE_COUNT_SIHFT))
  656. * core_idle_state |= THREAD_WINKLE_BITS;
  657. * core_idle_state -= 1 << WINKLE_COUNT_SHIFT;
  658. *
  659. * winkle_state_lost = core_idle_state &
  660. * (thread_in_core << WINKLE_THREAD_SHIFT);
  661. * core_idle_state &= ~(thread_in_core << WINKLE_THREAD_SHIFT);
  662. * }
  663. *
  664. */
  665. cmpwi r18,PNV_THREAD_WINKLE
  666. bne 2f
  667. andis. r9,r15,PNV_CORE_IDLE_WINKLE_COUNT_ALL_BIT@h
  668. subis r15,r15,PNV_CORE_IDLE_WINKLE_COUNT@h
  669. beq 2f
  670. ori r15,r15,PNV_CORE_IDLE_THREAD_WINKLE_BITS /* all were winkle */
  671. 2:
  672. /* Shift thread bit to winkle mask, then test if this thread is set,
  673. * and remove it from the winkle bits */
  674. slwi r8,r7,8
  675. and r8,r8,r15
  676. andc r15,r15,r8
  677. cmpwi cr4,r8,1 /* cr4 will be gt if our bit is set, lt if not */
  678. lbz r4,PACA_SUBCORE_SIBLING_MASK(r13)
  679. and r4,r4,r15
  680. cmpwi r4,0 /* Check if first in subcore */
  681. or r15,r15,r7 /* Set thread bit */
  682. beq first_thread_in_subcore
  683. END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
  684. or r15,r15,r7 /* Set thread bit */
  685. beq cr2,first_thread_in_core
  686. /* Not first thread in core or subcore to wake up */
  687. b clear_lock
  688. first_thread_in_subcore:
  689. /*
  690. * If waking up from sleep, subcore state is not lost. Hence
  691. * skip subcore state restore
  692. */
  693. blt cr4,subcore_state_restored
  694. /* Restore per-subcore state */
  695. ld r4,_SDR1(r1)
  696. mtspr SPRN_SDR1,r4
  697. ld r4,_RPR(r1)
  698. mtspr SPRN_RPR,r4
  699. ld r4,_AMOR(r1)
  700. mtspr SPRN_AMOR,r4
  701. subcore_state_restored:
  702. /*
  703. * Check if the thread is also the first thread in the core. If not,
  704. * skip to clear_lock.
  705. */
  706. bne cr2,clear_lock
  707. first_thread_in_core:
  708. /*
  709. * First thread in the core waking up from any state which can cause
  710. * partial or complete hypervisor state loss. It needs to
  711. * call the fastsleep workaround code if the platform requires it.
  712. * Call it unconditionally here. The below branch instruction will
  713. * be patched out if the platform does not have fastsleep or does not
  714. * require the workaround. Patching will be performed during the
  715. * discovery of idle-states.
  716. */
  717. .global pnv_fastsleep_workaround_at_exit
  718. pnv_fastsleep_workaround_at_exit:
  719. b fastsleep_workaround_at_exit
  720. timebase_resync:
  721. /*
  722. * Use cr3 which indicates that we are waking up with atleast partial
  723. * hypervisor state loss to determine if TIMEBASE RESYNC is needed.
  724. */
  725. ble cr3,.Ltb_resynced
  726. /* Time base re-sync */
  727. bl opal_resync_timebase;
  728. /*
  729. * If waking up from sleep (POWER8), per core state
  730. * is not lost, skip to clear_lock.
  731. */
  732. .Ltb_resynced:
  733. blt cr4,clear_lock
  734. /*
  735. * First thread in the core to wake up and its waking up with
  736. * complete hypervisor state loss. Restore per core hypervisor
  737. * state.
  738. */
  739. BEGIN_FTR_SECTION
  740. ld r4,_PTCR(r1)
  741. mtspr SPRN_PTCR,r4
  742. ld r4,_RPR(r1)
  743. mtspr SPRN_RPR,r4
  744. ld r4,_AMOR(r1)
  745. mtspr SPRN_AMOR,r4
  746. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
  747. ld r4,_TSCR(r1)
  748. mtspr SPRN_TSCR,r4
  749. ld r4,_WORC(r1)
  750. mtspr SPRN_WORC,r4
  751. clear_lock:
  752. xoris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
  753. lwsync
  754. stw r15,0(r14)
  755. common_exit:
  756. /*
  757. * Common to all threads.
  758. *
  759. * If waking up from sleep, hypervisor state is not lost. Hence
  760. * skip hypervisor state restore.
  761. */
  762. blt cr4,hypervisor_state_restored
  763. /* Waking up from winkle */
  764. BEGIN_MMU_FTR_SECTION
  765. b no_segments
  766. END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
  767. /* Restore SLB from PACA */
  768. ld r8,PACA_SLBSHADOWPTR(r13)
  769. .rept SLB_NUM_BOLTED
  770. li r3, SLBSHADOW_SAVEAREA
  771. LDX_BE r5, r8, r3
  772. addi r3, r3, 8
  773. LDX_BE r6, r8, r3
  774. andis. r7,r5,SLB_ESID_V@h
  775. beq 1f
  776. slbmte r6,r5
  777. 1: addi r8,r8,16
  778. .endr
  779. no_segments:
  780. /* Restore per thread state */
  781. ld r4,_SPURR(r1)
  782. mtspr SPRN_SPURR,r4
  783. ld r4,_PURR(r1)
  784. mtspr SPRN_PURR,r4
  785. ld r4,_DSCR(r1)
  786. mtspr SPRN_DSCR,r4
  787. ld r4,_WORT(r1)
  788. mtspr SPRN_WORT,r4
  789. /* Call cur_cpu_spec->cpu_restore() */
  790. LOAD_REG_ADDR(r4, cur_cpu_spec)
  791. ld r4,0(r4)
  792. ld r12,CPU_SPEC_RESTORE(r4)
  793. #ifdef PPC64_ELF_ABI_v1
  794. ld r12,0(r12)
  795. #endif
  796. mtctr r12
  797. bctrl
  798. /*
  799. * On POWER9, we can come here on wakeup from a cpuidle stop state.
  800. * Hence restore the additional SPRs to the saved value.
  801. *
  802. * On POWER8, we come here only on winkle. Since winkle is used
  803. * only in the case of CPU-Hotplug, we don't need to restore
  804. * the additional SPRs.
  805. */
  806. BEGIN_FTR_SECTION
  807. bl power9_restore_additional_sprs
  808. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
  809. hypervisor_state_restored:
  810. mr r12,r19
  811. mtlr r17
  812. blr /* return to pnv_powersave_wakeup */
  813. fastsleep_workaround_at_exit:
  814. li r3,1
  815. li r4,0
  816. bl opal_config_cpu_idle_state
  817. b timebase_resync
  818. /*
  819. * R3 here contains the value that will be returned to the caller
  820. * of power7_nap.
  821. * R12 contains SRR1 for CHECK_HMI_INTERRUPT.
  822. */
  823. .global pnv_wakeup_loss
  824. pnv_wakeup_loss:
  825. ld r1,PACAR1(r13)
  826. BEGIN_FTR_SECTION
  827. CHECK_HMI_INTERRUPT
  828. END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
  829. REST_NVGPRS(r1)
  830. REST_GPR(2, r1)
  831. ld r4,PACAKMSR(r13)
  832. ld r5,_LINK(r1)
  833. ld r6,_CCR(r1)
  834. addi r1,r1,INT_FRAME_SIZE
  835. mtlr r5
  836. mtcr r6
  837. mtmsrd r4
  838. blr
  839. /*
  840. * R3 here contains the value that will be returned to the caller
  841. * of power7_nap.
  842. * R12 contains SRR1 for CHECK_HMI_INTERRUPT.
  843. */
  844. pnv_wakeup_noloss:
  845. lbz r0,PACA_NAPSTATELOST(r13)
  846. cmpwi r0,0
  847. bne pnv_wakeup_loss
  848. ld r1,PACAR1(r13)
  849. BEGIN_FTR_SECTION
  850. CHECK_HMI_INTERRUPT
  851. END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
  852. ld r4,PACAKMSR(r13)
  853. ld r5,_NIP(r1)
  854. ld r6,_CCR(r1)
  855. addi r1,r1,INT_FRAME_SIZE
  856. mtlr r5
  857. mtcr r6
  858. mtmsrd r4
  859. blr