swsusp_32.S 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #include <linux/threads.h>
  3. #include <asm/processor.h>
  4. #include <asm/page.h>
  5. #include <asm/cputable.h>
  6. #include <asm/thread_info.h>
  7. #include <asm/ppc_asm.h>
  8. #include <asm/asm-offsets.h>
  9. #include <asm/mmu.h>
  10. /*
  11. * Structure for storing CPU registers on the save area.
  12. */
  13. #define SL_SP 0
  14. #define SL_PC 4
  15. #define SL_MSR 8
  16. #define SL_SDR1 0xc
  17. #define SL_SPRG0 0x10 /* 4 sprg's */
  18. #define SL_DBAT0 0x20
  19. #define SL_IBAT0 0x28
  20. #define SL_DBAT1 0x30
  21. #define SL_IBAT1 0x38
  22. #define SL_DBAT2 0x40
  23. #define SL_IBAT2 0x48
  24. #define SL_DBAT3 0x50
  25. #define SL_IBAT3 0x58
  26. #define SL_TB 0x60
  27. #define SL_R2 0x68
  28. #define SL_CR 0x6c
  29. #define SL_LR 0x70
  30. #define SL_R12 0x74 /* r12 to r31 */
  31. #define SL_SIZE (SL_R12 + 80)
  32. .section .data
  33. .align 5
  34. _GLOBAL(swsusp_save_area)
  35. .space SL_SIZE
  36. .section .text
  37. .align 5
  38. _GLOBAL(swsusp_arch_suspend)
  39. lis r11,swsusp_save_area@h
  40. ori r11,r11,swsusp_save_area@l
  41. mflr r0
  42. stw r0,SL_LR(r11)
  43. mfcr r0
  44. stw r0,SL_CR(r11)
  45. stw r1,SL_SP(r11)
  46. stw r2,SL_R2(r11)
  47. stmw r12,SL_R12(r11)
  48. /* Save MSR & SDR1 */
  49. mfmsr r4
  50. stw r4,SL_MSR(r11)
  51. mfsdr1 r4
  52. stw r4,SL_SDR1(r11)
  53. /* Get a stable timebase and save it */
  54. 1: mftbu r4
  55. stw r4,SL_TB(r11)
  56. mftb r5
  57. stw r5,SL_TB+4(r11)
  58. mftbu r3
  59. cmpw r3,r4
  60. bne 1b
  61. /* Save SPRGs */
  62. mfsprg r4,0
  63. stw r4,SL_SPRG0(r11)
  64. mfsprg r4,1
  65. stw r4,SL_SPRG0+4(r11)
  66. mfsprg r4,2
  67. stw r4,SL_SPRG0+8(r11)
  68. mfsprg r4,3
  69. stw r4,SL_SPRG0+12(r11)
  70. /* Save BATs */
  71. mfdbatu r4,0
  72. stw r4,SL_DBAT0(r11)
  73. mfdbatl r4,0
  74. stw r4,SL_DBAT0+4(r11)
  75. mfdbatu r4,1
  76. stw r4,SL_DBAT1(r11)
  77. mfdbatl r4,1
  78. stw r4,SL_DBAT1+4(r11)
  79. mfdbatu r4,2
  80. stw r4,SL_DBAT2(r11)
  81. mfdbatl r4,2
  82. stw r4,SL_DBAT2+4(r11)
  83. mfdbatu r4,3
  84. stw r4,SL_DBAT3(r11)
  85. mfdbatl r4,3
  86. stw r4,SL_DBAT3+4(r11)
  87. mfibatu r4,0
  88. stw r4,SL_IBAT0(r11)
  89. mfibatl r4,0
  90. stw r4,SL_IBAT0+4(r11)
  91. mfibatu r4,1
  92. stw r4,SL_IBAT1(r11)
  93. mfibatl r4,1
  94. stw r4,SL_IBAT1+4(r11)
  95. mfibatu r4,2
  96. stw r4,SL_IBAT2(r11)
  97. mfibatl r4,2
  98. stw r4,SL_IBAT2+4(r11)
  99. mfibatu r4,3
  100. stw r4,SL_IBAT3(r11)
  101. mfibatl r4,3
  102. stw r4,SL_IBAT3+4(r11)
  103. #if 0
  104. /* Backup various CPU config stuffs */
  105. bl __save_cpu_setup
  106. #endif
  107. /* Call the low level suspend stuff (we should probably have made
  108. * a stackframe...
  109. */
  110. bl swsusp_save
  111. /* Restore LR from the save area */
  112. lis r11,swsusp_save_area@h
  113. ori r11,r11,swsusp_save_area@l
  114. lwz r0,SL_LR(r11)
  115. mtlr r0
  116. blr
  117. /* Resume code */
  118. _GLOBAL(swsusp_arch_resume)
  119. #ifdef CONFIG_ALTIVEC
  120. /* Stop pending alitvec streams and memory accesses */
  121. BEGIN_FTR_SECTION
  122. DSSALL
  123. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  124. #endif
  125. sync
  126. /* Disable MSR:DR to make sure we don't take a TLB or
  127. * hash miss during the copy, as our hash table will
  128. * for a while be unusable. For .text, we assume we are
  129. * covered by a BAT. This works only for non-G5 at this
  130. * point. G5 will need a better approach, possibly using
  131. * a small temporary hash table filled with large mappings,
  132. * disabling the MMU completely isn't a good option for
  133. * performance reasons.
  134. * (Note that 750's may have the same performance issue as
  135. * the G5 in this case, we should investigate using moving
  136. * BATs for these CPUs)
  137. */
  138. mfmsr r0
  139. sync
  140. rlwinm r0,r0,0,28,26 /* clear MSR_DR */
  141. mtmsr r0
  142. sync
  143. isync
  144. /* Load ptr the list of pages to copy in r3 */
  145. lis r11,(restore_pblist - KERNELBASE)@h
  146. ori r11,r11,restore_pblist@l
  147. lwz r10,0(r11)
  148. /* Copy the pages. This is a very basic implementation, to
  149. * be replaced by something more cache efficient */
  150. 1:
  151. tophys(r3,r10)
  152. li r0,256
  153. mtctr r0
  154. lwz r11,pbe_address(r3) /* source */
  155. tophys(r5,r11)
  156. lwz r10,pbe_orig_address(r3) /* destination */
  157. tophys(r6,r10)
  158. 2:
  159. lwz r8,0(r5)
  160. lwz r9,4(r5)
  161. lwz r10,8(r5)
  162. lwz r11,12(r5)
  163. addi r5,r5,16
  164. stw r8,0(r6)
  165. stw r9,4(r6)
  166. stw r10,8(r6)
  167. stw r11,12(r6)
  168. addi r6,r6,16
  169. bdnz 2b
  170. lwz r10,pbe_next(r3)
  171. cmpwi 0,r10,0
  172. bne 1b
  173. /* Do a very simple cache flush/inval of the L1 to ensure
  174. * coherency of the icache
  175. */
  176. lis r3,0x0002
  177. mtctr r3
  178. li r3, 0
  179. 1:
  180. lwz r0,0(r3)
  181. addi r3,r3,0x0020
  182. bdnz 1b
  183. isync
  184. sync
  185. /* Now flush those cache lines */
  186. lis r3,0x0002
  187. mtctr r3
  188. li r3, 0
  189. 1:
  190. dcbf 0,r3
  191. addi r3,r3,0x0020
  192. bdnz 1b
  193. sync
  194. /* Ok, we are now running with the kernel data of the old
  195. * kernel fully restored. We can get to the save area
  196. * easily now. As for the rest of the code, it assumes the
  197. * loader kernel and the booted one are exactly identical
  198. */
  199. lis r11,swsusp_save_area@h
  200. ori r11,r11,swsusp_save_area@l
  201. tophys(r11,r11)
  202. #if 0
  203. /* Restore various CPU config stuffs */
  204. bl __restore_cpu_setup
  205. #endif
  206. /* Restore the BATs, and SDR1. Then we can turn on the MMU.
  207. * This is a bit hairy as we are running out of those BATs,
  208. * but first, our code is probably in the icache, and we are
  209. * writing the same value to the BAT, so that should be fine,
  210. * though a better solution will have to be found long-term
  211. */
  212. lwz r4,SL_SDR1(r11)
  213. mtsdr1 r4
  214. lwz r4,SL_SPRG0(r11)
  215. mtsprg 0,r4
  216. lwz r4,SL_SPRG0+4(r11)
  217. mtsprg 1,r4
  218. lwz r4,SL_SPRG0+8(r11)
  219. mtsprg 2,r4
  220. lwz r4,SL_SPRG0+12(r11)
  221. mtsprg 3,r4
  222. #if 0
  223. lwz r4,SL_DBAT0(r11)
  224. mtdbatu 0,r4
  225. lwz r4,SL_DBAT0+4(r11)
  226. mtdbatl 0,r4
  227. lwz r4,SL_DBAT1(r11)
  228. mtdbatu 1,r4
  229. lwz r4,SL_DBAT1+4(r11)
  230. mtdbatl 1,r4
  231. lwz r4,SL_DBAT2(r11)
  232. mtdbatu 2,r4
  233. lwz r4,SL_DBAT2+4(r11)
  234. mtdbatl 2,r4
  235. lwz r4,SL_DBAT3(r11)
  236. mtdbatu 3,r4
  237. lwz r4,SL_DBAT3+4(r11)
  238. mtdbatl 3,r4
  239. lwz r4,SL_IBAT0(r11)
  240. mtibatu 0,r4
  241. lwz r4,SL_IBAT0+4(r11)
  242. mtibatl 0,r4
  243. lwz r4,SL_IBAT1(r11)
  244. mtibatu 1,r4
  245. lwz r4,SL_IBAT1+4(r11)
  246. mtibatl 1,r4
  247. lwz r4,SL_IBAT2(r11)
  248. mtibatu 2,r4
  249. lwz r4,SL_IBAT2+4(r11)
  250. mtibatl 2,r4
  251. lwz r4,SL_IBAT3(r11)
  252. mtibatu 3,r4
  253. lwz r4,SL_IBAT3+4(r11)
  254. mtibatl 3,r4
  255. #endif
  256. BEGIN_MMU_FTR_SECTION
  257. li r4,0
  258. mtspr SPRN_DBAT4U,r4
  259. mtspr SPRN_DBAT4L,r4
  260. mtspr SPRN_DBAT5U,r4
  261. mtspr SPRN_DBAT5L,r4
  262. mtspr SPRN_DBAT6U,r4
  263. mtspr SPRN_DBAT6L,r4
  264. mtspr SPRN_DBAT7U,r4
  265. mtspr SPRN_DBAT7L,r4
  266. mtspr SPRN_IBAT4U,r4
  267. mtspr SPRN_IBAT4L,r4
  268. mtspr SPRN_IBAT5U,r4
  269. mtspr SPRN_IBAT5L,r4
  270. mtspr SPRN_IBAT6U,r4
  271. mtspr SPRN_IBAT6L,r4
  272. mtspr SPRN_IBAT7U,r4
  273. mtspr SPRN_IBAT7L,r4
  274. END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
  275. /* Flush all TLBs */
  276. lis r4,0x1000
  277. 1: addic. r4,r4,-0x1000
  278. tlbie r4
  279. bgt 1b
  280. sync
  281. /* restore the MSR and turn on the MMU */
  282. lwz r3,SL_MSR(r11)
  283. bl turn_on_mmu
  284. tovirt(r11,r11)
  285. /* Restore TB */
  286. li r3,0
  287. mttbl r3
  288. lwz r3,SL_TB(r11)
  289. lwz r4,SL_TB+4(r11)
  290. mttbu r3
  291. mttbl r4
  292. /* Kick decrementer */
  293. li r0,1
  294. mtdec r0
  295. /* Restore the callee-saved registers and return */
  296. lwz r0,SL_CR(r11)
  297. mtcr r0
  298. lwz r2,SL_R2(r11)
  299. lmw r12,SL_R12(r11)
  300. lwz r1,SL_SP(r11)
  301. lwz r0,SL_LR(r11)
  302. mtlr r0
  303. // XXX Note: we don't really need to call swsusp_resume
  304. li r3,0
  305. blr
  306. /* FIXME:This construct is actually not useful since we don't shut
  307. * down the instruction MMU, we could just flip back MSR-DR on.
  308. */
  309. turn_on_mmu:
  310. mflr r4
  311. mtsrr0 r4
  312. mtsrr1 r3
  313. sync
  314. isync
  315. rfi