idle_book3s.S 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954
  1. /*
  2. * This file contains idle entry/exit functions for POWER7,
  3. * POWER8 and POWER9 CPUs.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; either version
  8. * 2 of the License, or (at your option) any later version.
  9. */
  10. #include <linux/threads.h>
  11. #include <asm/processor.h>
  12. #include <asm/page.h>
  13. #include <asm/cputable.h>
  14. #include <asm/thread_info.h>
  15. #include <asm/ppc_asm.h>
  16. #include <asm/asm-offsets.h>
  17. #include <asm/ppc-opcode.h>
  18. #include <asm/hw_irq.h>
  19. #include <asm/kvm_book3s_asm.h>
  20. #include <asm/opal.h>
  21. #include <asm/cpuidle.h>
  22. #include <asm/exception-64s.h>
  23. #include <asm/book3s/64/mmu-hash.h>
  24. #include <asm/mmu.h>
  25. #include <asm/asm-compat.h>
  26. #include <asm/feature-fixups.h>
  27. #undef DEBUG
  28. /*
  29. * Use unused space in the interrupt stack to save and restore
  30. * registers for winkle support.
  31. */
  32. #define _MMCR0 GPR0
  33. #define _SDR1 GPR3
  34. #define _PTCR GPR3
  35. #define _RPR GPR4
  36. #define _SPURR GPR5
  37. #define _PURR GPR6
  38. #define _TSCR GPR7
  39. #define _DSCR GPR8
  40. #define _AMOR GPR9
  41. #define _WORT GPR10
  42. #define _WORC GPR11
  43. #define _LPCR GPR12
  44. #define PSSCR_EC_ESL_MASK_SHIFTED (PSSCR_EC | PSSCR_ESL) >> 16
  45. .text
  46. /*
  47. * Used by threads before entering deep idle states. Saves SPRs
  48. * in interrupt stack frame
  49. */
  50. save_sprs_to_stack:
  51. /*
  52. * Note all register i.e per-core, per-subcore or per-thread is saved
  53. * here since any thread in the core might wake up first
  54. */
  55. BEGIN_FTR_SECTION
  56. /*
  57. * Note - SDR1 is dropped in Power ISA v3. Hence not restoring
  58. * SDR1 here
  59. */
  60. mfspr r3,SPRN_PTCR
  61. std r3,_PTCR(r1)
  62. mfspr r3,SPRN_LPCR
  63. std r3,_LPCR(r1)
  64. FTR_SECTION_ELSE
  65. mfspr r3,SPRN_SDR1
  66. std r3,_SDR1(r1)
  67. ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
  68. mfspr r3,SPRN_RPR
  69. std r3,_RPR(r1)
  70. mfspr r3,SPRN_SPURR
  71. std r3,_SPURR(r1)
  72. mfspr r3,SPRN_PURR
  73. std r3,_PURR(r1)
  74. mfspr r3,SPRN_TSCR
  75. std r3,_TSCR(r1)
  76. mfspr r3,SPRN_DSCR
  77. std r3,_DSCR(r1)
  78. mfspr r3,SPRN_AMOR
  79. std r3,_AMOR(r1)
  80. mfspr r3,SPRN_WORT
  81. std r3,_WORT(r1)
  82. mfspr r3,SPRN_WORC
  83. std r3,_WORC(r1)
  84. /*
  85. * On POWER9, there are idle states such as stop4, invoked via cpuidle,
  86. * that lose hypervisor resources. In such cases, we need to save
  87. * additional SPRs before entering those idle states so that they can
  88. * be restored to their older values on wakeup from the idle state.
  89. *
  90. * On POWER8, the only such deep idle state is winkle which is used
  91. * only in the context of CPU-Hotplug, where these additional SPRs are
  92. * reinitiazed to a sane value. Hence there is no need to save/restore
  93. * these SPRs.
  94. */
  95. BEGIN_FTR_SECTION
  96. blr
  97. END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
  98. power9_save_additional_sprs:
  99. mfspr r3, SPRN_PID
  100. mfspr r4, SPRN_LDBAR
  101. std r3, STOP_PID(r13)
  102. std r4, STOP_LDBAR(r13)
  103. mfspr r3, SPRN_FSCR
  104. mfspr r4, SPRN_HFSCR
  105. std r3, STOP_FSCR(r13)
  106. std r4, STOP_HFSCR(r13)
  107. mfspr r3, SPRN_MMCRA
  108. mfspr r4, SPRN_MMCR0
  109. std r3, STOP_MMCRA(r13)
  110. std r4, _MMCR0(r1)
  111. mfspr r3, SPRN_MMCR1
  112. mfspr r4, SPRN_MMCR2
  113. std r3, STOP_MMCR1(r13)
  114. std r4, STOP_MMCR2(r13)
  115. blr
  116. power9_restore_additional_sprs:
  117. ld r3,_LPCR(r1)
  118. ld r4, STOP_PID(r13)
  119. mtspr SPRN_LPCR,r3
  120. mtspr SPRN_PID, r4
  121. ld r3, STOP_LDBAR(r13)
  122. ld r4, STOP_FSCR(r13)
  123. mtspr SPRN_LDBAR, r3
  124. mtspr SPRN_FSCR, r4
  125. ld r3, STOP_HFSCR(r13)
  126. ld r4, STOP_MMCRA(r13)
  127. mtspr SPRN_HFSCR, r3
  128. mtspr SPRN_MMCRA, r4
  129. ld r3, _MMCR0(r1)
  130. ld r4, STOP_MMCR1(r13)
  131. mtspr SPRN_MMCR0, r3
  132. mtspr SPRN_MMCR1, r4
  133. ld r3, STOP_MMCR2(r13)
  134. mtspr SPRN_MMCR2, r3
  135. blr
  136. /*
  137. * Used by threads when the lock bit of core_idle_state is set.
  138. * Threads will spin in HMT_LOW until the lock bit is cleared.
  139. * r14 - pointer to core_idle_state
  140. * r15 - used to load contents of core_idle_state
  141. * r9 - used as a temporary variable
  142. */
  143. core_idle_lock_held:
  144. HMT_LOW
  145. 3: lwz r15,0(r14)
  146. andis. r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
  147. bne 3b
  148. HMT_MEDIUM
  149. lwarx r15,0,r14
  150. andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h
  151. bne- core_idle_lock_held
  152. blr
  153. /*
  154. * Pass requested state in r3:
  155. * r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8
  156. * - Requested PSSCR value in POWER9
  157. *
  158. * Address of idle handler to branch to in realmode in r4
  159. */
  160. pnv_powersave_common:
  161. /* Use r3 to pass state nap/sleep/winkle */
  162. /* NAP is a state loss, we create a regs frame on the
  163. * stack, fill it up with the state we care about and
  164. * stick a pointer to it in PACAR1. We really only
  165. * need to save PC, some CR bits and the NV GPRs,
  166. * but for now an interrupt frame will do.
  167. */
  168. mtctr r4
  169. mflr r0
  170. std r0,16(r1)
  171. stdu r1,-INT_FRAME_SIZE(r1)
  172. std r0,_LINK(r1)
  173. std r0,_NIP(r1)
  174. /* We haven't lost state ... yet */
  175. li r0,0
  176. stb r0,PACA_NAPSTATELOST(r13)
  177. /* Continue saving state */
  178. SAVE_GPR(2, r1)
  179. SAVE_NVGPRS(r1)
  180. mfcr r5
  181. std r5,_CCR(r1)
  182. std r1,PACAR1(r13)
  183. BEGIN_FTR_SECTION
  184. /*
  185. * POWER9 does not require real mode to stop, and presently does not
  186. * set hwthread_state for KVM (threads don't share MMU context), so
  187. * we can remain in virtual mode for this.
  188. */
  189. bctr
  190. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
  191. /*
  192. * POWER8
  193. * Go to real mode to do the nap, as required by the architecture.
  194. * Also, we need to be in real mode before setting hwthread_state,
  195. * because as soon as we do that, another thread can switch
  196. * the MMU context to the guest.
  197. */
  198. LOAD_REG_IMMEDIATE(r7, MSR_IDLE)
  199. mtmsrd r7,0
  200. bctr
  201. /*
  202. * This is the sequence required to execute idle instructions, as
  203. * specified in ISA v2.07 (and earlier). MSR[IR] and MSR[DR] must be 0.
  204. */
  205. #define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \
  206. /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \
  207. std r0,0(r1); \
  208. ptesync; \
  209. ld r0,0(r1); \
  210. 236: cmpd cr0,r0,r0; \
  211. bne 236b; \
  212. IDLE_INST;
  213. .globl pnv_enter_arch207_idle_mode
  214. pnv_enter_arch207_idle_mode:
  215. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  216. /* Tell KVM we're entering idle */
  217. li r4,KVM_HWTHREAD_IN_IDLE
  218. /******************************************************/
  219. /* N O T E W E L L ! ! ! N O T E W E L L */
  220. /* The following store to HSTATE_HWTHREAD_STATE(r13) */
  221. /* MUST occur in real mode, i.e. with the MMU off, */
  222. /* and the MMU must stay off until we clear this flag */
  223. /* and test HSTATE_HWTHREAD_REQ(r13) in */
  224. /* pnv_powersave_wakeup in this file. */
  225. /* The reason is that another thread can switch the */
  226. /* MMU to a guest context whenever this flag is set */
  227. /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */
  228. /* that would potentially cause this thread to start */
  229. /* executing instructions from guest memory in */
  230. /* hypervisor mode, leading to a host crash or data */
  231. /* corruption, or worse. */
  232. /******************************************************/
  233. stb r4,HSTATE_HWTHREAD_STATE(r13)
  234. #endif
  235. stb r3,PACA_THREAD_IDLE_STATE(r13)
  236. cmpwi cr3,r3,PNV_THREAD_SLEEP
  237. bge cr3,2f
  238. IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP)
  239. /* No return */
  240. 2:
  241. /* Sleep or winkle */
  242. lbz r7,PACA_THREAD_MASK(r13)
  243. ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
  244. li r5,0
  245. beq cr3,3f
  246. lis r5,PNV_CORE_IDLE_WINKLE_COUNT@h
  247. 3:
  248. lwarx_loop1:
  249. lwarx r15,0,r14
  250. andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h
  251. bnel- core_idle_lock_held
  252. add r15,r15,r5 /* Add if winkle */
  253. andc r15,r15,r7 /* Clear thread bit */
  254. andi. r9,r15,PNV_CORE_IDLE_THREAD_BITS
  255. /*
  256. * If cr0 = 0, then current thread is the last thread of the core entering
  257. * sleep. Last thread needs to execute the hardware bug workaround code if
  258. * required by the platform.
  259. * Make the workaround call unconditionally here. The below branch call is
  260. * patched out when the idle states are discovered if the platform does not
  261. * require it.
  262. */
  263. .global pnv_fastsleep_workaround_at_entry
  264. pnv_fastsleep_workaround_at_entry:
  265. beq fastsleep_workaround_at_entry
  266. stwcx. r15,0,r14
  267. bne- lwarx_loop1
  268. isync
  269. common_enter: /* common code for all the threads entering sleep or winkle */
  270. bgt cr3,enter_winkle
  271. IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP)
  272. fastsleep_workaround_at_entry:
  273. oris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
  274. stwcx. r15,0,r14
  275. bne- lwarx_loop1
  276. isync
  277. /* Fast sleep workaround */
  278. li r3,1
  279. li r4,1
  280. bl opal_config_cpu_idle_state
  281. /* Unlock */
  282. xoris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
  283. lwsync
  284. stw r15,0(r14)
  285. b common_enter
  286. enter_winkle:
  287. bl save_sprs_to_stack
  288. IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE)
  289. /*
  290. * r3 - PSSCR value corresponding to the requested stop state.
  291. */
  292. power_enter_stop:
  293. /*
  294. * Check if we are executing the lite variant with ESL=EC=0
  295. */
  296. andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED
  297. clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */
  298. bne .Lhandle_esl_ec_set
  299. PPC_STOP
  300. li r3,0 /* Since we didn't lose state, return 0 */
  301. std r3, PACA_REQ_PSSCR(r13)
  302. /*
  303. * pnv_wakeup_noloss() expects r12 to contain the SRR1 value so
  304. * it can determine if the wakeup reason is an HMI in
  305. * CHECK_HMI_INTERRUPT.
  306. *
  307. * However, when we wakeup with ESL=0, SRR1 will not contain the wakeup
  308. * reason, so there is no point setting r12 to SRR1.
  309. *
  310. * Further, we clear r12 here, so that we don't accidentally enter the
  311. * HMI in pnv_wakeup_noloss() if the value of r12[42:45] == WAKE_HMI.
  312. */
  313. li r12, 0
  314. b pnv_wakeup_noloss
  315. .Lhandle_esl_ec_set:
  316. BEGIN_FTR_SECTION
  317. /*
  318. * POWER9 DD2.0 or earlier can incorrectly set PMAO when waking up after
  319. * a state-loss idle. Saving and restoring MMCR0 over idle is a
  320. * workaround.
  321. */
  322. mfspr r4,SPRN_MMCR0
  323. std r4,_MMCR0(r1)
  324. END_FTR_SECTION_IFCLR(CPU_FTR_POWER9_DD2_1)
  325. /*
  326. * Check if the requested state is a deep idle state.
  327. */
  328. LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
  329. ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
  330. cmpd r3,r4
  331. bge .Lhandle_deep_stop
  332. PPC_STOP /* Does not return (system reset interrupt) */
  333. .Lhandle_deep_stop:
  334. /*
  335. * Entering deep idle state.
  336. * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to
  337. * stack and enter stop
  338. */
  339. lbz r7,PACA_THREAD_MASK(r13)
  340. ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
  341. lwarx_loop_stop:
  342. lwarx r15,0,r14
  343. andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h
  344. bnel- core_idle_lock_held
  345. andc r15,r15,r7 /* Clear thread bit */
  346. stwcx. r15,0,r14
  347. bne- lwarx_loop_stop
  348. isync
  349. bl save_sprs_to_stack
  350. PPC_STOP /* Does not return (system reset interrupt) */
  351. /*
  352. * Entered with MSR[EE]=0 and no soft-masked interrupts pending.
  353. * r3 contains desired idle state (PNV_THREAD_NAP/SLEEP/WINKLE).
  354. */
  355. _GLOBAL(power7_idle_insn)
  356. /* Now check if user or arch enabled NAP mode */
  357. LOAD_REG_ADDR(r4, pnv_enter_arch207_idle_mode)
  358. b pnv_powersave_common
  359. #define CHECK_HMI_INTERRUPT \
  360. BEGIN_FTR_SECTION_NESTED(66); \
  361. rlwinm r0,r12,45-31,0xf; /* extract wake reason field (P8) */ \
  362. FTR_SECTION_ELSE_NESTED(66); \
  363. rlwinm r0,r12,45-31,0xe; /* P7 wake reason field is 3 bits */ \
  364. ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \
  365. cmpwi r0,0xa; /* Hypervisor maintenance ? */ \
  366. bne+ 20f; \
  367. /* Invoke opal call to handle hmi */ \
  368. ld r2,PACATOC(r13); \
  369. ld r1,PACAR1(r13); \
  370. std r3,ORIG_GPR3(r1); /* Save original r3 */ \
  371. li r3,0; /* NULL argument */ \
  372. bl hmi_exception_realmode; \
  373. nop; \
  374. ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \
  375. 20: nop;
  376. /*
  377. * Entered with MSR[EE]=0 and no soft-masked interrupts pending.
  378. * r3 contains desired PSSCR register value.
  379. *
  380. * Offline (CPU unplug) case also must notify KVM that the CPU is
  381. * idle.
  382. */
  383. _GLOBAL(power9_offline_stop)
  384. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  385. /*
  386. * Tell KVM we're entering idle.
  387. * This does not have to be done in real mode because the P9 MMU
  388. * is independent per-thread. Some steppings share radix/hash mode
  389. * between threads, but in that case KVM has a barrier sync in real
  390. * mode before and after switching between radix and hash.
  391. */
  392. li r4,KVM_HWTHREAD_IN_IDLE
  393. stb r4,HSTATE_HWTHREAD_STATE(r13)
  394. #endif
  395. /* fall through */
  396. _GLOBAL(power9_idle_stop)
  397. std r3, PACA_REQ_PSSCR(r13)
  398. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  399. BEGIN_FTR_SECTION
  400. sync
  401. lwz r5, PACA_DONT_STOP(r13)
  402. cmpwi r5, 0
  403. bne 1f
  404. END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
  405. #endif
  406. mtspr SPRN_PSSCR,r3
  407. LOAD_REG_ADDR(r4,power_enter_stop)
  408. b pnv_powersave_common
  409. /* No return */
  410. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  411. 1:
  412. /*
  413. * We get here when TM / thread reconfiguration bug workaround
  414. * code wants to get the CPU into SMT4 mode, and therefore
  415. * we are being asked not to stop.
  416. */
  417. li r3, 0
  418. std r3, PACA_REQ_PSSCR(r13)
  419. blr /* return 0 for wakeup cause / SRR1 value */
  420. #endif
  421. /*
  422. * Called from machine check handler for powersave wakeups.
  423. * Low level machine check processing has already been done. Now just
  424. * go through the wake up path to get everything in order.
  425. *
  426. * r3 - The original SRR1 value.
  427. * Original SRR[01] have been clobbered.
  428. * MSR_RI is clear.
  429. */
  430. .global pnv_powersave_wakeup_mce
  431. pnv_powersave_wakeup_mce:
  432. /* Set cr3 for pnv_powersave_wakeup */
  433. rlwinm r11,r3,47-31,30,31
  434. cmpwi cr3,r11,2
  435. /*
  436. * Now put the original SRR1 with SRR1_WAKEMCE_RESVD as the wake
  437. * reason into r12, which allows reuse of the system reset wakeup
  438. * code without being mistaken for another type of wakeup.
  439. */
  440. oris r12,r3,SRR1_WAKEMCE_RESVD@h
  441. b pnv_powersave_wakeup
  442. /*
  443. * Called from reset vector for powersave wakeups.
  444. * cr3 - set to gt if waking up with partial/complete hypervisor state loss
  445. * r12 - SRR1
  446. */
  447. .global pnv_powersave_wakeup
  448. pnv_powersave_wakeup:
  449. ld r2, PACATOC(r13)
  450. BEGIN_FTR_SECTION
  451. bl pnv_restore_hyp_resource_arch300
  452. FTR_SECTION_ELSE
  453. bl pnv_restore_hyp_resource_arch207
  454. ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
  455. li r0,PNV_THREAD_RUNNING
  456. stb r0,PACA_THREAD_IDLE_STATE(r13) /* Clear thread state */
  457. mr r3,r12
  458. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  459. lbz r0,HSTATE_HWTHREAD_STATE(r13)
  460. cmpwi r0,KVM_HWTHREAD_IN_KERNEL
  461. beq 0f
  462. li r0,KVM_HWTHREAD_IN_KERNEL
  463. stb r0,HSTATE_HWTHREAD_STATE(r13)
  464. /* Order setting hwthread_state vs. testing hwthread_req */
  465. sync
  466. 0: lbz r0,HSTATE_HWTHREAD_REQ(r13)
  467. cmpwi r0,0
  468. beq 1f
  469. b kvm_start_guest
  470. 1:
  471. #endif
  472. /* Return SRR1 from power7_nap() */
  473. blt cr3,pnv_wakeup_noloss
  474. b pnv_wakeup_loss
  475. /*
  476. * Check whether we have woken up with hypervisor state loss.
  477. * If yes, restore hypervisor state and return back to link.
  478. *
  479. * cr3 - set to gt if waking up with partial/complete hypervisor state loss
  480. */
  481. pnv_restore_hyp_resource_arch300:
  482. /*
  483. * Workaround for POWER9, if we lost resources, the ERAT
  484. * might have been mixed up and needs flushing. We also need
  485. * to reload MMCR0 (see comment above). We also need to set
  486. * then clear bit 60 in MMCRA to ensure the PMU starts running.
  487. */
  488. blt cr3,1f
  489. BEGIN_FTR_SECTION
  490. PPC_INVALIDATE_ERAT
  491. ld r1,PACAR1(r13)
  492. ld r4,_MMCR0(r1)
  493. mtspr SPRN_MMCR0,r4
  494. END_FTR_SECTION_IFCLR(CPU_FTR_POWER9_DD2_1)
  495. mfspr r4,SPRN_MMCRA
  496. ori r4,r4,(1 << (63-60))
  497. mtspr SPRN_MMCRA,r4
  498. xori r4,r4,(1 << (63-60))
  499. mtspr SPRN_MMCRA,r4
  500. 1:
  501. /*
  502. * POWER ISA 3. Use PSSCR to determine if we
  503. * are waking up from deep idle state
  504. */
  505. LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
  506. ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
  507. /*
  508. * 0-3 bits correspond to Power-Saving Level Status
  509. * which indicates the idle state we are waking up from
  510. */
  511. mfspr r5, SPRN_PSSCR
  512. rldicl r5,r5,4,60
  513. li r0, 0 /* clear requested_psscr to say we're awake */
  514. std r0, PACA_REQ_PSSCR(r13)
  515. cmpd cr4,r5,r4
  516. bge cr4,pnv_wakeup_tb_loss /* returns to caller */
  517. blr /* Waking up without hypervisor state loss. */
  518. /* Same calling convention as arch300 */
  519. pnv_restore_hyp_resource_arch207:
  520. /*
  521. * POWER ISA 2.07 or less.
  522. * Check if we slept with sleep or winkle.
  523. */
  524. lbz r4,PACA_THREAD_IDLE_STATE(r13)
  525. cmpwi cr2,r4,PNV_THREAD_NAP
  526. bgt cr2,pnv_wakeup_tb_loss /* Either sleep or Winkle */
  527. /*
  528. * We fall through here if PACA_THREAD_IDLE_STATE shows we are waking
  529. * up from nap. At this stage CR3 shouldn't contains 'gt' since that
  530. * indicates we are waking with hypervisor state loss from nap.
  531. */
  532. bgt cr3,.
  533. blr /* Waking up without hypervisor state loss */
  534. /*
  535. * Called if waking up from idle state which can cause either partial or
  536. * complete hyp state loss.
  537. * In POWER8, called if waking up from fastsleep or winkle
  538. * In POWER9, called if waking up from stop state >= pnv_first_deep_stop_state
  539. *
  540. * r13 - PACA
  541. * cr3 - gt if waking up with partial/complete hypervisor state loss
  542. *
  543. * If ISA300:
  544. * cr4 - gt or eq if waking up from complete hypervisor state loss.
  545. *
  546. * If ISA207:
  547. * r4 - PACA_THREAD_IDLE_STATE
  548. */
  549. pnv_wakeup_tb_loss:
  550. ld r1,PACAR1(r13)
  551. /*
  552. * Before entering any idle state, the NVGPRs are saved in the stack.
  553. * If there was a state loss, or PACA_NAPSTATELOST was set, then the
  554. * NVGPRs are restored. If we are here, it is likely that state is lost,
  555. * but not guaranteed -- neither ISA207 nor ISA300 tests to reach
  556. * here are the same as the test to restore NVGPRS:
  557. * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300,
  558. * and SRR1 test for restoring NVGPRs.
  559. *
  560. * We are about to clobber NVGPRs now, so set NAPSTATELOST to
  561. * guarantee they will always be restored. This might be tightened
  562. * with careful reading of specs (particularly for ISA300) but this
  563. * is already a slow wakeup path and it's simpler to be safe.
  564. */
  565. li r0,1
  566. stb r0,PACA_NAPSTATELOST(r13)
  567. /*
  568. *
  569. * Save SRR1 and LR in NVGPRs as they might be clobbered in
  570. * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required
  571. * to determine the wakeup reason if we branch to kvm_start_guest. LR
  572. * is required to return back to reset vector after hypervisor state
  573. * restore is complete.
  574. */
  575. mr r19,r12
  576. mr r18,r4
  577. mflr r17
  578. BEGIN_FTR_SECTION
  579. CHECK_HMI_INTERRUPT
  580. END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
  581. ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
  582. lbz r7,PACA_THREAD_MASK(r13)
  583. /*
  584. * Take the core lock to synchronize against other threads.
  585. *
  586. * Lock bit is set in one of the 2 cases-
  587. * a. In the sleep/winkle enter path, the last thread is executing
  588. * fastsleep workaround code.
  589. * b. In the wake up path, another thread is executing fastsleep
  590. * workaround undo code or resyncing timebase or restoring context
  591. * In either case loop until the lock bit is cleared.
  592. */
  593. 1:
  594. lwarx r15,0,r14
  595. andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h
  596. bnel- core_idle_lock_held
  597. oris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
  598. stwcx. r15,0,r14
  599. bne- 1b
  600. isync
  601. andi. r9,r15,PNV_CORE_IDLE_THREAD_BITS
  602. cmpwi cr2,r9,0
  603. /*
  604. * At this stage
  605. * cr2 - eq if first thread to wakeup in core
  606. * cr3- gt if waking up with partial/complete hypervisor state loss
  607. * ISA300:
  608. * cr4 - gt or eq if waking up from complete hypervisor state loss.
  609. */
  610. BEGIN_FTR_SECTION
  611. /*
  612. * Were we in winkle?
  613. * If yes, check if all threads were in winkle, decrement our
  614. * winkle count, set all thread winkle bits if all were in winkle.
  615. * Check if our thread has a winkle bit set, and set cr4 accordingly
  616. * (to match ISA300, above). Pseudo-code for core idle state
  617. * transitions for ISA207 is as follows (everything happens atomically
  618. * due to store conditional and/or lock bit):
  619. *
  620. * nap_idle() { }
  621. * nap_wake() { }
  622. *
  623. * sleep_idle()
  624. * {
  625. * core_idle_state &= ~thread_in_core
  626. * }
  627. *
  628. * sleep_wake()
  629. * {
  630. * bool first_in_core, first_in_subcore;
  631. *
  632. * first_in_core = (core_idle_state & IDLE_THREAD_BITS) == 0;
  633. * first_in_subcore = (core_idle_state & SUBCORE_SIBLING_MASK) == 0;
  634. *
  635. * core_idle_state |= thread_in_core;
  636. * }
  637. *
  638. * winkle_idle()
  639. * {
  640. * core_idle_state &= ~thread_in_core;
  641. * core_idle_state += 1 << WINKLE_COUNT_SHIFT;
  642. * }
  643. *
  644. * winkle_wake()
  645. * {
  646. * bool first_in_core, first_in_subcore, winkle_state_lost;
  647. *
  648. * first_in_core = (core_idle_state & IDLE_THREAD_BITS) == 0;
  649. * first_in_subcore = (core_idle_state & SUBCORE_SIBLING_MASK) == 0;
  650. *
  651. * core_idle_state |= thread_in_core;
  652. *
  653. * if ((core_idle_state & WINKLE_MASK) == (8 << WINKLE_COUNT_SIHFT))
  654. * core_idle_state |= THREAD_WINKLE_BITS;
  655. * core_idle_state -= 1 << WINKLE_COUNT_SHIFT;
  656. *
  657. * winkle_state_lost = core_idle_state &
  658. * (thread_in_core << WINKLE_THREAD_SHIFT);
  659. * core_idle_state &= ~(thread_in_core << WINKLE_THREAD_SHIFT);
  660. * }
  661. *
  662. */
  663. cmpwi r18,PNV_THREAD_WINKLE
  664. bne 2f
  665. andis. r9,r15,PNV_CORE_IDLE_WINKLE_COUNT_ALL_BIT@h
  666. subis r15,r15,PNV_CORE_IDLE_WINKLE_COUNT@h
  667. beq 2f
  668. ori r15,r15,PNV_CORE_IDLE_THREAD_WINKLE_BITS /* all were winkle */
  669. 2:
  670. /* Shift thread bit to winkle mask, then test if this thread is set,
  671. * and remove it from the winkle bits */
  672. slwi r8,r7,8
  673. and r8,r8,r15
  674. andc r15,r15,r8
  675. cmpwi cr4,r8,1 /* cr4 will be gt if our bit is set, lt if not */
  676. lbz r4,PACA_SUBCORE_SIBLING_MASK(r13)
  677. and r4,r4,r15
  678. cmpwi r4,0 /* Check if first in subcore */
  679. or r15,r15,r7 /* Set thread bit */
  680. beq first_thread_in_subcore
  681. END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
  682. or r15,r15,r7 /* Set thread bit */
  683. beq cr2,first_thread_in_core
  684. /* Not first thread in core or subcore to wake up */
  685. b clear_lock
  686. first_thread_in_subcore:
  687. /*
  688. * If waking up from sleep, subcore state is not lost. Hence
  689. * skip subcore state restore
  690. */
  691. blt cr4,subcore_state_restored
  692. /* Restore per-subcore state */
  693. ld r4,_SDR1(r1)
  694. mtspr SPRN_SDR1,r4
  695. ld r4,_RPR(r1)
  696. mtspr SPRN_RPR,r4
  697. ld r4,_AMOR(r1)
  698. mtspr SPRN_AMOR,r4
  699. subcore_state_restored:
  700. /*
  701. * Check if the thread is also the first thread in the core. If not,
  702. * skip to clear_lock.
  703. */
  704. bne cr2,clear_lock
  705. first_thread_in_core:
  706. /*
  707. * First thread in the core waking up from any state which can cause
  708. * partial or complete hypervisor state loss. It needs to
  709. * call the fastsleep workaround code if the platform requires it.
  710. * Call it unconditionally here. The below branch instruction will
  711. * be patched out if the platform does not have fastsleep or does not
  712. * require the workaround. Patching will be performed during the
  713. * discovery of idle-states.
  714. */
  715. .global pnv_fastsleep_workaround_at_exit
  716. pnv_fastsleep_workaround_at_exit:
  717. b fastsleep_workaround_at_exit
  718. timebase_resync:
  719. /*
  720. * Use cr3 which indicates that we are waking up with atleast partial
  721. * hypervisor state loss to determine if TIMEBASE RESYNC is needed.
  722. */
  723. ble cr3,.Ltb_resynced
  724. /* Time base re-sync */
  725. bl opal_resync_timebase;
  726. /*
  727. * If waking up from sleep (POWER8), per core state
  728. * is not lost, skip to clear_lock.
  729. */
  730. .Ltb_resynced:
  731. blt cr4,clear_lock
  732. /*
  733. * First thread in the core to wake up and its waking up with
  734. * complete hypervisor state loss. Restore per core hypervisor
  735. * state.
  736. */
  737. BEGIN_FTR_SECTION
  738. ld r4,_PTCR(r1)
  739. mtspr SPRN_PTCR,r4
  740. ld r4,_RPR(r1)
  741. mtspr SPRN_RPR,r4
  742. ld r4,_AMOR(r1)
  743. mtspr SPRN_AMOR,r4
  744. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
  745. ld r4,_TSCR(r1)
  746. mtspr SPRN_TSCR,r4
  747. ld r4,_WORC(r1)
  748. mtspr SPRN_WORC,r4
  749. clear_lock:
  750. xoris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
  751. lwsync
  752. stw r15,0(r14)
  753. common_exit:
  754. /*
  755. * Common to all threads.
  756. *
  757. * If waking up from sleep, hypervisor state is not lost. Hence
  758. * skip hypervisor state restore.
  759. */
  760. blt cr4,hypervisor_state_restored
  761. /* Waking up from winkle */
  762. BEGIN_MMU_FTR_SECTION
  763. b no_segments
  764. END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
  765. /* Restore SLB from PACA */
  766. ld r8,PACA_SLBSHADOWPTR(r13)
  767. .rept SLB_NUM_BOLTED
  768. li r3, SLBSHADOW_SAVEAREA
  769. LDX_BE r5, r8, r3
  770. addi r3, r3, 8
  771. LDX_BE r6, r8, r3
  772. andis. r7,r5,SLB_ESID_V@h
  773. beq 1f
  774. slbmte r6,r5
  775. 1: addi r8,r8,16
  776. .endr
  777. no_segments:
  778. /* Restore per thread state */
  779. ld r4,_SPURR(r1)
  780. mtspr SPRN_SPURR,r4
  781. ld r4,_PURR(r1)
  782. mtspr SPRN_PURR,r4
  783. ld r4,_DSCR(r1)
  784. mtspr SPRN_DSCR,r4
  785. ld r4,_WORT(r1)
  786. mtspr SPRN_WORT,r4
  787. /* Call cur_cpu_spec->cpu_restore() */
  788. LOAD_REG_ADDR(r4, cur_cpu_spec)
  789. ld r4,0(r4)
  790. ld r12,CPU_SPEC_RESTORE(r4)
  791. #ifdef PPC64_ELF_ABI_v1
  792. ld r12,0(r12)
  793. #endif
  794. mtctr r12
  795. bctrl
  796. /*
  797. * On POWER9, we can come here on wakeup from a cpuidle stop state.
  798. * Hence restore the additional SPRs to the saved value.
  799. *
  800. * On POWER8, we come here only on winkle. Since winkle is used
  801. * only in the case of CPU-Hotplug, we don't need to restore
  802. * the additional SPRs.
  803. */
  804. BEGIN_FTR_SECTION
  805. bl power9_restore_additional_sprs
  806. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
  807. hypervisor_state_restored:
  808. mr r12,r19
  809. mtlr r17
  810. blr /* return to pnv_powersave_wakeup */
  811. fastsleep_workaround_at_exit:
  812. li r3,1
  813. li r4,0
  814. bl opal_config_cpu_idle_state
  815. b timebase_resync
  816. /*
  817. * R3 here contains the value that will be returned to the caller
  818. * of power7_nap.
  819. * R12 contains SRR1 for CHECK_HMI_INTERRUPT.
  820. */
  821. .global pnv_wakeup_loss
  822. pnv_wakeup_loss:
  823. ld r1,PACAR1(r13)
  824. BEGIN_FTR_SECTION
  825. CHECK_HMI_INTERRUPT
  826. END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
  827. REST_NVGPRS(r1)
  828. REST_GPR(2, r1)
  829. ld r4,PACAKMSR(r13)
  830. ld r5,_LINK(r1)
  831. ld r6,_CCR(r1)
  832. addi r1,r1,INT_FRAME_SIZE
  833. mtlr r5
  834. mtcr r6
  835. mtmsrd r4
  836. blr
  837. /*
  838. * R3 here contains the value that will be returned to the caller
  839. * of power7_nap.
  840. * R12 contains SRR1 for CHECK_HMI_INTERRUPT.
  841. */
  842. pnv_wakeup_noloss:
  843. lbz r0,PACA_NAPSTATELOST(r13)
  844. cmpwi r0,0
  845. bne pnv_wakeup_loss
  846. ld r1,PACAR1(r13)
  847. BEGIN_FTR_SECTION
  848. CHECK_HMI_INTERRUPT
  849. END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
  850. ld r4,PACAKMSR(r13)
  851. ld r5,_NIP(r1)
  852. ld r6,_CCR(r1)
  853. addi r1,r1,INT_FRAME_SIZE
  854. mtlr r5
  855. mtcr r6
  856. mtmsrd r4
  857. blr