stackframe.h 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle
  7. * Copyright (C) 1994, 1995, 1996 Paul M. Antoine.
  8. * Copyright (C) 1999 Silicon Graphics, Inc.
  9. * Copyright (C) 2007 Maciej W. Rozycki
  10. */
  11. #ifndef _ASM_STACKFRAME_H
  12. #define _ASM_STACKFRAME_H
  13. #include <linux/threads.h>
  14. #include <asm/asm.h>
  15. #include <asm/asmmacro.h>
  16. #include <asm/mipsregs.h>
  17. #include <asm/asm-offsets.h>
  18. #include <asm/thread_info.h>
  19. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  20. #define STATMASK 0x3f
  21. #else
  22. #define STATMASK 0x1f
  23. #endif
  24. .macro SAVE_AT
  25. .set push
  26. .set noat
  27. LONG_S $1, PT_R1(sp)
  28. .set pop
  29. .endm
  30. .macro SAVE_TEMP
  31. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  32. mflhxu v1
  33. LONG_S v1, PT_LO(sp)
  34. mflhxu v1
  35. LONG_S v1, PT_HI(sp)
  36. mflhxu v1
  37. LONG_S v1, PT_ACX(sp)
  38. #elif !defined(CONFIG_CPU_MIPSR6)
  39. mfhi v1
  40. #endif
  41. #ifdef CONFIG_32BIT
  42. LONG_S $8, PT_R8(sp)
  43. LONG_S $9, PT_R9(sp)
  44. #endif
  45. LONG_S $10, PT_R10(sp)
  46. LONG_S $11, PT_R11(sp)
  47. LONG_S $12, PT_R12(sp)
  48. #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
  49. LONG_S v1, PT_HI(sp)
  50. mflo v1
  51. #endif
  52. LONG_S $13, PT_R13(sp)
  53. LONG_S $14, PT_R14(sp)
  54. LONG_S $15, PT_R15(sp)
  55. LONG_S $24, PT_R24(sp)
  56. #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
  57. LONG_S v1, PT_LO(sp)
  58. #endif
  59. #ifdef CONFIG_CPU_CAVIUM_OCTEON
  60. /*
  61. * The Octeon multiplier state is affected by general
  62. * multiply instructions. It must be saved before and
  63. * kernel code might corrupt it
  64. */
  65. jal octeon_mult_save
  66. #endif
  67. .endm
  68. .macro SAVE_STATIC
  69. LONG_S $16, PT_R16(sp)
  70. LONG_S $17, PT_R17(sp)
  71. LONG_S $18, PT_R18(sp)
  72. LONG_S $19, PT_R19(sp)
  73. LONG_S $20, PT_R20(sp)
  74. LONG_S $21, PT_R21(sp)
  75. LONG_S $22, PT_R22(sp)
  76. LONG_S $23, PT_R23(sp)
  77. LONG_S $30, PT_R30(sp)
  78. .endm
  79. #ifdef CONFIG_SMP
  80. .macro get_saved_sp /* SMP variation */
  81. ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
  82. #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
  83. lui k1, %hi(kernelsp)
  84. #else
  85. lui k1, %highest(kernelsp)
  86. daddiu k1, %higher(kernelsp)
  87. dsll k1, 16
  88. daddiu k1, %hi(kernelsp)
  89. dsll k1, 16
  90. #endif
  91. LONG_SRL k0, SMP_CPUID_PTRSHIFT
  92. LONG_ADDU k1, k0
  93. LONG_L k1, %lo(kernelsp)(k1)
  94. .endm
  95. .macro set_saved_sp stackp temp temp2
  96. ASM_CPUID_MFC0 \temp, ASM_SMP_CPUID_REG
  97. LONG_SRL \temp, SMP_CPUID_PTRSHIFT
  98. LONG_S \stackp, kernelsp(\temp)
  99. .endm
  100. #else /* !CONFIG_SMP */
  101. .macro get_saved_sp /* Uniprocessor variation */
  102. #ifdef CONFIG_CPU_JUMP_WORKAROUNDS
  103. /*
  104. * Clear BTB (branch target buffer), forbid RAS (return address
  105. * stack) to workaround the Out-of-order Issue in Loongson2F
  106. * via its diagnostic register.
  107. */
  108. move k0, ra
  109. jal 1f
  110. nop
  111. 1: jal 1f
  112. nop
  113. 1: jal 1f
  114. nop
  115. 1: jal 1f
  116. nop
  117. 1: move ra, k0
  118. li k0, 3
  119. mtc0 k0, $22
  120. #endif /* CONFIG_CPU_JUMP_WORKAROUNDS */
  121. #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
  122. lui k1, %hi(kernelsp)
  123. #else
  124. lui k1, %highest(kernelsp)
  125. daddiu k1, %higher(kernelsp)
  126. dsll k1, k1, 16
  127. daddiu k1, %hi(kernelsp)
  128. dsll k1, k1, 16
  129. #endif
  130. LONG_L k1, %lo(kernelsp)(k1)
  131. .endm
  132. .macro set_saved_sp stackp temp temp2
  133. LONG_S \stackp, kernelsp
  134. .endm
  135. #endif
  136. .macro SAVE_SOME
  137. .set push
  138. .set noat
  139. .set reorder
  140. mfc0 k0, CP0_STATUS
  141. sll k0, 3 /* extract cu0 bit */
  142. .set noreorder
  143. bltz k0, 8f
  144. move k1, sp
  145. #ifdef CONFIG_EVA
  146. /*
  147. * Flush interAptiv's Return Prediction Stack (RPS) by writing
  148. * EntryHi. Toggling Config7.RPS is slower and less portable.
  149. *
  150. * The RPS isn't automatically flushed when exceptions are
  151. * taken, which can result in kernel mode speculative accesses
  152. * to user addresses if the RPS mispredicts. That's harmless
  153. * when user and kernel share the same address space, but with
  154. * EVA the same user segments may be unmapped to kernel mode,
  155. * even containing sensitive MMIO regions or invalid memory.
  156. *
  157. * This can happen when the kernel sets the return address to
  158. * ret_from_* and jr's to the exception handler, which looks
  159. * more like a tail call than a function call. If nested calls
  160. * don't evict the last user address in the RPS, it will
  161. * mispredict the return and fetch from a user controlled
  162. * address into the icache.
  163. *
  164. * More recent EVA-capable cores with MAAR to restrict
  165. * speculative accesses aren't affected.
  166. */
  167. MFC0 k0, CP0_ENTRYHI
  168. MTC0 k0, CP0_ENTRYHI
  169. #endif
  170. .set reorder
  171. /* Called from user mode, new stack. */
  172. get_saved_sp
  173. #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
  174. 8: move k0, sp
  175. PTR_SUBU sp, k1, PT_SIZE
  176. #else
  177. .set at=k0
  178. 8: PTR_SUBU k1, PT_SIZE
  179. .set noat
  180. move k0, sp
  181. move sp, k1
  182. #endif
  183. LONG_S k0, PT_R29(sp)
  184. LONG_S $3, PT_R3(sp)
  185. /*
  186. * You might think that you don't need to save $0,
  187. * but the FPU emulator and gdb remote debug stub
  188. * need it to operate correctly
  189. */
  190. LONG_S $0, PT_R0(sp)
  191. mfc0 v1, CP0_STATUS
  192. LONG_S $2, PT_R2(sp)
  193. LONG_S v1, PT_STATUS(sp)
  194. LONG_S $4, PT_R4(sp)
  195. mfc0 v1, CP0_CAUSE
  196. LONG_S $5, PT_R5(sp)
  197. LONG_S v1, PT_CAUSE(sp)
  198. LONG_S $6, PT_R6(sp)
  199. MFC0 v1, CP0_EPC
  200. LONG_S $7, PT_R7(sp)
  201. #ifdef CONFIG_64BIT
  202. LONG_S $8, PT_R8(sp)
  203. LONG_S $9, PT_R9(sp)
  204. #endif
  205. LONG_S v1, PT_EPC(sp)
  206. LONG_S $25, PT_R25(sp)
  207. LONG_S $28, PT_R28(sp)
  208. LONG_S $31, PT_R31(sp)
  209. ori $28, sp, _THREAD_MASK
  210. xori $28, _THREAD_MASK
  211. #ifdef CONFIG_CPU_CAVIUM_OCTEON
  212. .set mips64
  213. pref 0, 0($28) /* Prefetch the current pointer */
  214. #endif
  215. .set pop
  216. .endm
  217. .macro SAVE_ALL
  218. SAVE_SOME
  219. SAVE_AT
  220. SAVE_TEMP
  221. SAVE_STATIC
  222. .endm
  223. .macro RESTORE_AT
  224. .set push
  225. .set noat
  226. LONG_L $1, PT_R1(sp)
  227. .set pop
  228. .endm
  229. .macro RESTORE_TEMP
  230. #ifdef CONFIG_CPU_CAVIUM_OCTEON
  231. /* Restore the Octeon multiplier state */
  232. jal octeon_mult_restore
  233. #endif
  234. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  235. LONG_L $24, PT_ACX(sp)
  236. mtlhx $24
  237. LONG_L $24, PT_HI(sp)
  238. mtlhx $24
  239. LONG_L $24, PT_LO(sp)
  240. mtlhx $24
  241. #elif !defined(CONFIG_CPU_MIPSR6)
  242. LONG_L $24, PT_LO(sp)
  243. mtlo $24
  244. LONG_L $24, PT_HI(sp)
  245. mthi $24
  246. #endif
  247. #ifdef CONFIG_32BIT
  248. LONG_L $8, PT_R8(sp)
  249. LONG_L $9, PT_R9(sp)
  250. #endif
  251. LONG_L $10, PT_R10(sp)
  252. LONG_L $11, PT_R11(sp)
  253. LONG_L $12, PT_R12(sp)
  254. LONG_L $13, PT_R13(sp)
  255. LONG_L $14, PT_R14(sp)
  256. LONG_L $15, PT_R15(sp)
  257. LONG_L $24, PT_R24(sp)
  258. .endm
  259. .macro RESTORE_STATIC
  260. LONG_L $16, PT_R16(sp)
  261. LONG_L $17, PT_R17(sp)
  262. LONG_L $18, PT_R18(sp)
  263. LONG_L $19, PT_R19(sp)
  264. LONG_L $20, PT_R20(sp)
  265. LONG_L $21, PT_R21(sp)
  266. LONG_L $22, PT_R22(sp)
  267. LONG_L $23, PT_R23(sp)
  268. LONG_L $30, PT_R30(sp)
  269. .endm
  270. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  271. .macro RESTORE_SOME
  272. .set push
  273. .set reorder
  274. .set noat
  275. mfc0 a0, CP0_STATUS
  276. li v1, ST0_CU1 | ST0_IM
  277. ori a0, STATMASK
  278. xori a0, STATMASK
  279. mtc0 a0, CP0_STATUS
  280. and a0, v1
  281. LONG_L v0, PT_STATUS(sp)
  282. nor v1, $0, v1
  283. and v0, v1
  284. or v0, a0
  285. mtc0 v0, CP0_STATUS
  286. LONG_L $31, PT_R31(sp)
  287. LONG_L $28, PT_R28(sp)
  288. LONG_L $25, PT_R25(sp)
  289. LONG_L $7, PT_R7(sp)
  290. LONG_L $6, PT_R6(sp)
  291. LONG_L $5, PT_R5(sp)
  292. LONG_L $4, PT_R4(sp)
  293. LONG_L $3, PT_R3(sp)
  294. LONG_L $2, PT_R2(sp)
  295. .set pop
  296. .endm
  297. .macro RESTORE_SP_AND_RET
  298. .set push
  299. .set noreorder
  300. LONG_L k0, PT_EPC(sp)
  301. LONG_L sp, PT_R29(sp)
  302. jr k0
  303. rfe
  304. .set pop
  305. .endm
  306. #else
  307. .macro RESTORE_SOME
  308. .set push
  309. .set reorder
  310. .set noat
  311. mfc0 a0, CP0_STATUS
  312. ori a0, STATMASK
  313. xori a0, STATMASK
  314. mtc0 a0, CP0_STATUS
  315. li v1, ST0_CU1 | ST0_FR | ST0_IM
  316. and a0, v1
  317. LONG_L v0, PT_STATUS(sp)
  318. nor v1, $0, v1
  319. and v0, v1
  320. or v0, a0
  321. mtc0 v0, CP0_STATUS
  322. LONG_L v1, PT_EPC(sp)
  323. MTC0 v1, CP0_EPC
  324. LONG_L $31, PT_R31(sp)
  325. LONG_L $28, PT_R28(sp)
  326. LONG_L $25, PT_R25(sp)
  327. #ifdef CONFIG_64BIT
  328. LONG_L $8, PT_R8(sp)
  329. LONG_L $9, PT_R9(sp)
  330. #endif
  331. LONG_L $7, PT_R7(sp)
  332. LONG_L $6, PT_R6(sp)
  333. LONG_L $5, PT_R5(sp)
  334. LONG_L $4, PT_R4(sp)
  335. LONG_L $3, PT_R3(sp)
  336. LONG_L $2, PT_R2(sp)
  337. .set pop
  338. .endm
  339. .macro RESTORE_SP_AND_RET
  340. LONG_L sp, PT_R29(sp)
  341. .set arch=r4000
  342. eret
  343. .set mips0
  344. .endm
  345. #endif
  346. .macro RESTORE_SP
  347. LONG_L sp, PT_R29(sp)
  348. .endm
  349. .macro RESTORE_ALL
  350. RESTORE_TEMP
  351. RESTORE_STATIC
  352. RESTORE_AT
  353. RESTORE_SOME
  354. RESTORE_SP
  355. .endm
  356. .macro RESTORE_ALL_AND_RET
  357. RESTORE_TEMP
  358. RESTORE_STATIC
  359. RESTORE_AT
  360. RESTORE_SOME
  361. RESTORE_SP_AND_RET
  362. .endm
  363. /*
  364. * Move to kernel mode and disable interrupts.
  365. * Set cp0 enable bit as sign that we're running on the kernel stack
  366. */
  367. .macro CLI
  368. mfc0 t0, CP0_STATUS
  369. li t1, ST0_CU0 | STATMASK
  370. or t0, t1
  371. xori t0, STATMASK
  372. mtc0 t0, CP0_STATUS
  373. irq_disable_hazard
  374. .endm
  375. /*
  376. * Move to kernel mode and enable interrupts.
  377. * Set cp0 enable bit as sign that we're running on the kernel stack
  378. */
  379. .macro STI
  380. mfc0 t0, CP0_STATUS
  381. li t1, ST0_CU0 | STATMASK
  382. or t0, t1
  383. xori t0, STATMASK & ~1
  384. mtc0 t0, CP0_STATUS
  385. irq_enable_hazard
  386. .endm
  387. /*
  388. * Just move to kernel mode and leave interrupts as they are. Note
  389. * for the R3000 this means copying the previous enable from IEp.
  390. * Set cp0 enable bit as sign that we're running on the kernel stack
  391. */
  392. .macro KMODE
  393. mfc0 t0, CP0_STATUS
  394. li t1, ST0_CU0 | (STATMASK & ~1)
  395. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  396. andi t2, t0, ST0_IEP
  397. srl t2, 2
  398. or t0, t2
  399. #endif
  400. or t0, t1
  401. xori t0, STATMASK & ~1
  402. mtc0 t0, CP0_STATUS
  403. irq_disable_hazard
  404. .endm
  405. #endif /* _ASM_STACKFRAME_H */