idle_power7.S 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515
  1. /*
  2. * This file contains the power_save function for Power7 CPUs.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/threads.h>
  10. #include <asm/processor.h>
  11. #include <asm/page.h>
  12. #include <asm/cputable.h>
  13. #include <asm/thread_info.h>
  14. #include <asm/ppc_asm.h>
  15. #include <asm/asm-offsets.h>
  16. #include <asm/ppc-opcode.h>
  17. #include <asm/hw_irq.h>
  18. #include <asm/kvm_book3s_asm.h>
  19. #include <asm/opal.h>
  20. #include <asm/cpuidle.h>
  21. #include <asm/mmu-hash64.h>
  22. #undef DEBUG
  23. /*
  24. * Use unused space in the interrupt stack to save and restore
  25. * registers for winkle support.
  26. */
  27. #define _SDR1 GPR3
  28. #define _RPR GPR4
  29. #define _SPURR GPR5
  30. #define _PURR GPR6
  31. #define _TSCR GPR7
  32. #define _DSCR GPR8
  33. #define _AMOR GPR9
  34. #define _WORT GPR10
  35. #define _WORC GPR11
  36. /* Idle state entry routines */
  37. #define IDLE_STATE_ENTER_SEQ(IDLE_INST) \
  38. /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \
  39. std r0,0(r1); \
  40. ptesync; \
  41. ld r0,0(r1); \
  42. 1: cmp cr0,r0,r0; \
  43. bne 1b; \
  44. IDLE_INST; \
  45. b .
  46. .text
  47. /*
  48. * Used by threads when the lock bit of core_idle_state is set.
  49. * Threads will spin in HMT_LOW until the lock bit is cleared.
  50. * r14 - pointer to core_idle_state
  51. * r15 - used to load contents of core_idle_state
  52. */
  53. core_idle_lock_held:
  54. HMT_LOW
  55. 3: lwz r15,0(r14)
  56. andi. r15,r15,PNV_CORE_IDLE_LOCK_BIT
  57. bne 3b
  58. HMT_MEDIUM
  59. lwarx r15,0,r14
  60. blr
  61. /*
  62. * Pass requested state in r3:
  63. * r3 - PNV_THREAD_NAP/SLEEP/WINKLE
  64. *
  65. * To check IRQ_HAPPENED in r4
  66. * 0 - don't check
  67. * 1 - check
  68. */
  69. _GLOBAL(power7_powersave_common)
  70. /* Use r3 to pass state nap/sleep/winkle */
  71. /* NAP is a state loss, we create a regs frame on the
  72. * stack, fill it up with the state we care about and
  73. * stick a pointer to it in PACAR1. We really only
  74. * need to save PC, some CR bits and the NV GPRs,
  75. * but for now an interrupt frame will do.
  76. */
  77. mflr r0
  78. std r0,16(r1)
  79. stdu r1,-INT_FRAME_SIZE(r1)
  80. std r0,_LINK(r1)
  81. std r0,_NIP(r1)
  82. /* Hard disable interrupts */
  83. mfmsr r9
  84. rldicl r9,r9,48,1
  85. rotldi r9,r9,16
  86. mtmsrd r9,1 /* hard-disable interrupts */
  87. /* Check if something happened while soft-disabled */
  88. lbz r0,PACAIRQHAPPENED(r13)
  89. andi. r0,r0,~PACA_IRQ_HARD_DIS@l
  90. beq 1f
  91. cmpwi cr0,r4,0
  92. beq 1f
  93. addi r1,r1,INT_FRAME_SIZE
  94. ld r0,16(r1)
  95. li r3,0 /* Return 0 (no nap) */
  96. mtlr r0
  97. blr
  98. 1: /* We mark irqs hard disabled as this is the state we'll
  99. * be in when returning and we need to tell arch_local_irq_restore()
  100. * about it
  101. */
  102. li r0,PACA_IRQ_HARD_DIS
  103. stb r0,PACAIRQHAPPENED(r13)
  104. /* We haven't lost state ... yet */
  105. li r0,0
  106. stb r0,PACA_NAPSTATELOST(r13)
  107. /* Continue saving state */
  108. SAVE_GPR(2, r1)
  109. SAVE_NVGPRS(r1)
  110. mfcr r4
  111. std r4,_CCR(r1)
  112. std r9,_MSR(r1)
  113. std r1,PACAR1(r13)
  114. /*
  115. * Go to real mode to do the nap, as required by the architecture.
  116. * Also, we need to be in real mode before setting hwthread_state,
  117. * because as soon as we do that, another thread can switch
  118. * the MMU context to the guest.
  119. */
  120. LOAD_REG_IMMEDIATE(r5, MSR_IDLE)
  121. li r6, MSR_RI
  122. andc r6, r9, r6
  123. LOAD_REG_ADDR(r7, power7_enter_nap_mode)
  124. mtmsrd r6, 1 /* clear RI before setting SRR0/1 */
  125. mtspr SPRN_SRR0, r7
  126. mtspr SPRN_SRR1, r5
  127. rfid
  128. .globl power7_enter_nap_mode
  129. power7_enter_nap_mode:
  130. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  131. /* Tell KVM we're napping */
  132. li r4,KVM_HWTHREAD_IN_NAP
  133. stb r4,HSTATE_HWTHREAD_STATE(r13)
  134. #endif
  135. stb r3,PACA_THREAD_IDLE_STATE(r13)
  136. cmpwi cr3,r3,PNV_THREAD_SLEEP
  137. bge cr3,2f
  138. IDLE_STATE_ENTER_SEQ(PPC_NAP)
  139. /* No return */
  140. 2:
  141. /* Sleep or winkle */
  142. lbz r7,PACA_THREAD_MASK(r13)
  143. ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
  144. lwarx_loop1:
  145. lwarx r15,0,r14
  146. andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
  147. bnel core_idle_lock_held
  148. andc r15,r15,r7 /* Clear thread bit */
  149. andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS
  150. /*
  151. * If cr0 = 0, then current thread is the last thread of the core entering
  152. * sleep. Last thread needs to execute the hardware bug workaround code if
  153. * required by the platform.
  154. * Make the workaround call unconditionally here. The below branch call is
  155. * patched out when the idle states are discovered if the platform does not
  156. * require it.
  157. */
  158. .global pnv_fastsleep_workaround_at_entry
  159. pnv_fastsleep_workaround_at_entry:
  160. beq fastsleep_workaround_at_entry
  161. stwcx. r15,0,r14
  162. bne- lwarx_loop1
  163. isync
  164. common_enter: /* common code for all the threads entering sleep or winkle */
  165. bgt cr3,enter_winkle
  166. IDLE_STATE_ENTER_SEQ(PPC_SLEEP)
  167. fastsleep_workaround_at_entry:
  168. ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
  169. stwcx. r15,0,r14
  170. bne- lwarx_loop1
  171. isync
  172. /* Fast sleep workaround */
  173. li r3,1
  174. li r4,1
  175. li r0,OPAL_CONFIG_CPU_IDLE_STATE
  176. bl opal_call_realmode
  177. /* Clear Lock bit */
  178. li r0,0
  179. lwsync
  180. stw r0,0(r14)
  181. b common_enter
  182. enter_winkle:
  183. /*
  184. * Note all register i.e per-core, per-subcore or per-thread is saved
  185. * here since any thread in the core might wake up first
  186. */
  187. mfspr r3,SPRN_SDR1
  188. std r3,_SDR1(r1)
  189. mfspr r3,SPRN_RPR
  190. std r3,_RPR(r1)
  191. mfspr r3,SPRN_SPURR
  192. std r3,_SPURR(r1)
  193. mfspr r3,SPRN_PURR
  194. std r3,_PURR(r1)
  195. mfspr r3,SPRN_TSCR
  196. std r3,_TSCR(r1)
  197. mfspr r3,SPRN_DSCR
  198. std r3,_DSCR(r1)
  199. mfspr r3,SPRN_AMOR
  200. std r3,_AMOR(r1)
  201. mfspr r3,SPRN_WORT
  202. std r3,_WORT(r1)
  203. mfspr r3,SPRN_WORC
  204. std r3,_WORC(r1)
  205. IDLE_STATE_ENTER_SEQ(PPC_WINKLE)
  206. _GLOBAL(power7_idle)
  207. /* Now check if user or arch enabled NAP mode */
  208. LOAD_REG_ADDRBASE(r3,powersave_nap)
  209. lwz r4,ADDROFF(powersave_nap)(r3)
  210. cmpwi 0,r4,0
  211. beqlr
  212. li r3, 1
  213. /* fall through */
  214. _GLOBAL(power7_nap)
  215. mr r4,r3
  216. li r3,PNV_THREAD_NAP
  217. b power7_powersave_common
  218. /* No return */
  219. _GLOBAL(power7_sleep)
  220. li r3,PNV_THREAD_SLEEP
  221. li r4,1
  222. b power7_powersave_common
  223. /* No return */
  224. _GLOBAL(power7_winkle)
  225. li r3,3
  226. li r4,1
  227. b power7_powersave_common
  228. /* No return */
  229. #define CHECK_HMI_INTERRUPT \
  230. mfspr r0,SPRN_SRR1; \
  231. BEGIN_FTR_SECTION_NESTED(66); \
  232. rlwinm r0,r0,45-31,0xf; /* extract wake reason field (P8) */ \
  233. FTR_SECTION_ELSE_NESTED(66); \
  234. rlwinm r0,r0,45-31,0xe; /* P7 wake reason field is 3 bits */ \
  235. ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \
  236. cmpwi r0,0xa; /* Hypervisor maintenance ? */ \
  237. bne 20f; \
  238. /* Invoke opal call to handle hmi */ \
  239. ld r2,PACATOC(r13); \
  240. ld r1,PACAR1(r13); \
  241. std r3,ORIG_GPR3(r1); /* Save original r3 */ \
  242. li r0,OPAL_HANDLE_HMI; /* Pass opal token argument*/ \
  243. bl opal_call_realmode; \
  244. ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \
  245. 20: nop;
  246. _GLOBAL(power7_wakeup_tb_loss)
  247. ld r2,PACATOC(r13);
  248. ld r1,PACAR1(r13)
  249. /*
  250. * Before entering any idle state, the NVGPRs are saved in the stack
  251. * and they are restored before switching to the process context. Hence
  252. * until they are restored, they are free to be used.
  253. *
  254. * Save SRR1 in a NVGPR as it might be clobbered in opal_call_realmode
  255. * (called in CHECK_HMI_INTERRUPT). SRR1 is required to determine the
  256. * wakeup reason if we branch to kvm_start_guest.
  257. */
  258. mfspr r16,SPRN_SRR1
  259. BEGIN_FTR_SECTION
  260. CHECK_HMI_INTERRUPT
  261. END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
  262. lbz r7,PACA_THREAD_MASK(r13)
  263. ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
  264. lwarx_loop2:
  265. lwarx r15,0,r14
  266. andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
  267. /*
  268. * Lock bit is set in one of the 2 cases-
  269. * a. In the sleep/winkle enter path, the last thread is executing
  270. * fastsleep workaround code.
  271. * b. In the wake up path, another thread is executing fastsleep
  272. * workaround undo code or resyncing timebase or restoring context
  273. * In either case loop until the lock bit is cleared.
  274. */
  275. bnel core_idle_lock_held
  276. cmpwi cr2,r15,0
  277. lbz r4,PACA_SUBCORE_SIBLING_MASK(r13)
  278. and r4,r4,r15
  279. cmpwi cr1,r4,0 /* Check if first in subcore */
  280. /*
  281. * At this stage
  282. * cr1 - 0b0100 if first thread to wakeup in subcore
  283. * cr2 - 0b0100 if first thread to wakeup in core
  284. * cr3- 0b0010 if waking up from sleep or winkle
  285. * cr4 - 0b0100 if waking up from winkle
  286. */
  287. or r15,r15,r7 /* Set thread bit */
  288. beq cr1,first_thread_in_subcore
  289. /* Not first thread in subcore to wake up */
  290. stwcx. r15,0,r14
  291. bne- lwarx_loop2
  292. isync
  293. b common_exit
  294. first_thread_in_subcore:
  295. /* First thread in subcore to wakeup */
  296. ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
  297. stwcx. r15,0,r14
  298. bne- lwarx_loop2
  299. isync
  300. /*
  301. * If waking up from sleep, subcore state is not lost. Hence
  302. * skip subcore state restore
  303. */
  304. bne cr4,subcore_state_restored
  305. /* Restore per-subcore state */
  306. ld r4,_SDR1(r1)
  307. mtspr SPRN_SDR1,r4
  308. ld r4,_RPR(r1)
  309. mtspr SPRN_RPR,r4
  310. ld r4,_AMOR(r1)
  311. mtspr SPRN_AMOR,r4
  312. subcore_state_restored:
  313. /*
  314. * Check if the thread is also the first thread in the core. If not,
  315. * skip to clear_lock.
  316. */
  317. bne cr2,clear_lock
  318. first_thread_in_core:
  319. /*
  320. * First thread in the core waking up from fastsleep. It needs to
  321. * call the fastsleep workaround code if the platform requires it.
  322. * Call it unconditionally here. The below branch instruction will
  323. * be patched out when the idle states are discovered if platform
  324. * does not require workaround.
  325. */
  326. .global pnv_fastsleep_workaround_at_exit
  327. pnv_fastsleep_workaround_at_exit:
  328. b fastsleep_workaround_at_exit
  329. timebase_resync:
  330. /* Do timebase resync if we are waking up from sleep. Use cr3 value
  331. * set in exceptions-64s.S */
  332. ble cr3,clear_lock
  333. /* Time base re-sync */
  334. li r0,OPAL_RESYNC_TIMEBASE
  335. bl opal_call_realmode;
  336. /* TODO: Check r3 for failure */
  337. /*
  338. * If waking up from sleep, per core state is not lost, skip to
  339. * clear_lock.
  340. */
  341. bne cr4,clear_lock
  342. /* Restore per core state */
  343. ld r4,_TSCR(r1)
  344. mtspr SPRN_TSCR,r4
  345. ld r4,_WORC(r1)
  346. mtspr SPRN_WORC,r4
  347. clear_lock:
  348. andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS
  349. lwsync
  350. stw r15,0(r14)
  351. common_exit:
  352. /*
  353. * Common to all threads.
  354. *
  355. * If waking up from sleep, hypervisor state is not lost. Hence
  356. * skip hypervisor state restore.
  357. */
  358. bne cr4,hypervisor_state_restored
  359. /* Waking up from winkle */
  360. /* Restore per thread state */
  361. bl __restore_cpu_power8
  362. /* Restore SLB from PACA */
  363. ld r8,PACA_SLBSHADOWPTR(r13)
  364. .rept SLB_NUM_BOLTED
  365. li r3, SLBSHADOW_SAVEAREA
  366. LDX_BE r5, r8, r3
  367. addi r3, r3, 8
  368. LDX_BE r6, r8, r3
  369. andis. r7,r5,SLB_ESID_V@h
  370. beq 1f
  371. slbmte r6,r5
  372. 1: addi r8,r8,16
  373. .endr
  374. ld r4,_SPURR(r1)
  375. mtspr SPRN_SPURR,r4
  376. ld r4,_PURR(r1)
  377. mtspr SPRN_PURR,r4
  378. ld r4,_DSCR(r1)
  379. mtspr SPRN_DSCR,r4
  380. ld r4,_WORT(r1)
  381. mtspr SPRN_WORT,r4
  382. hypervisor_state_restored:
  383. li r5,PNV_THREAD_RUNNING
  384. stb r5,PACA_THREAD_IDLE_STATE(r13)
  385. mtspr SPRN_SRR1,r16
  386. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  387. li r0,KVM_HWTHREAD_IN_KERNEL
  388. stb r0,HSTATE_HWTHREAD_STATE(r13)
  389. /* Order setting hwthread_state vs. testing hwthread_req */
  390. sync
  391. lbz r0,HSTATE_HWTHREAD_REQ(r13)
  392. cmpwi r0,0
  393. beq 6f
  394. b kvm_start_guest
  395. 6:
  396. #endif
  397. REST_NVGPRS(r1)
  398. REST_GPR(2, r1)
  399. ld r3,_CCR(r1)
  400. ld r4,_MSR(r1)
  401. ld r5,_NIP(r1)
  402. addi r1,r1,INT_FRAME_SIZE
  403. mtcr r3
  404. mfspr r3,SPRN_SRR1 /* Return SRR1 */
  405. mtspr SPRN_SRR1,r4
  406. mtspr SPRN_SRR0,r5
  407. rfid
  408. fastsleep_workaround_at_exit:
  409. li r3,1
  410. li r4,0
  411. li r0,OPAL_CONFIG_CPU_IDLE_STATE
  412. bl opal_call_realmode
  413. b timebase_resync
  414. /*
  415. * R3 here contains the value that will be returned to the caller
  416. * of power7_nap.
  417. */
  418. _GLOBAL(power7_wakeup_loss)
  419. ld r1,PACAR1(r13)
  420. BEGIN_FTR_SECTION
  421. CHECK_HMI_INTERRUPT
  422. END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
  423. REST_NVGPRS(r1)
  424. REST_GPR(2, r1)
  425. ld r6,_CCR(r1)
  426. ld r4,_MSR(r1)
  427. ld r5,_NIP(r1)
  428. addi r1,r1,INT_FRAME_SIZE
  429. mtcr r6
  430. mtspr SPRN_SRR1,r4
  431. mtspr SPRN_SRR0,r5
  432. rfid
  433. /*
  434. * R3 here contains the value that will be returned to the caller
  435. * of power7_nap.
  436. */
  437. _GLOBAL(power7_wakeup_noloss)
  438. lbz r0,PACA_NAPSTATELOST(r13)
  439. cmpwi r0,0
  440. bne power7_wakeup_loss
  441. BEGIN_FTR_SECTION
  442. CHECK_HMI_INTERRUPT
  443. END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
  444. ld r1,PACAR1(r13)
  445. ld r6,_CCR(r1)
  446. ld r4,_MSR(r1)
  447. ld r5,_NIP(r1)
  448. addi r1,r1,INT_FRAME_SIZE
  449. mtcr r6
  450. mtspr SPRN_SRR1,r4
  451. mtspr SPRN_SRR0,r5
  452. rfid