entry.S 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
  3. #include <linux/linkage.h>
  4. #include <abi/entry.h>
  5. #include <abi/pgtable-bits.h>
  6. #include <asm/errno.h>
  7. #include <asm/setup.h>
  8. #include <asm/unistd.h>
  9. #include <asm/asm-offsets.h>
  10. #include <linux/threads.h>
  11. #include <asm/setup.h>
  12. #include <asm/page.h>
  13. #include <asm/thread_info.h>
  14. #define PTE_INDX_MSK 0xffc
  15. #define PTE_INDX_SHIFT 10
  16. #define _PGDIR_SHIFT 22
  17. .macro tlbop_begin name, val0, val1, val2
  18. ENTRY(csky_\name)
  19. mtcr a3, ss2
  20. mtcr r6, ss3
  21. mtcr a2, ss4
  22. RD_PGDR r6
  23. RD_MEH a3
  24. #ifdef CONFIG_CPU_HAS_TLBI
  25. tlbi.vaas a3
  26. sync.is
  27. btsti a3, 31
  28. bf 1f
  29. RD_PGDR_K r6
  30. 1:
  31. #else
  32. bgeni a2, 31
  33. WR_MCIR a2
  34. bgeni a2, 25
  35. WR_MCIR a2
  36. #endif
  37. bclri r6, 0
  38. lrw a2, PHYS_OFFSET
  39. subu r6, a2
  40. bseti r6, 31
  41. mov a2, a3
  42. lsri a2, _PGDIR_SHIFT
  43. lsli a2, 2
  44. addu r6, a2
  45. ldw r6, (r6)
  46. lrw a2, PHYS_OFFSET
  47. subu r6, a2
  48. bseti r6, 31
  49. lsri a3, PTE_INDX_SHIFT
  50. lrw a2, PTE_INDX_MSK
  51. and a3, a2
  52. addu r6, a3
  53. ldw a3, (r6)
  54. movi a2, (_PAGE_PRESENT | \val0)
  55. and a3, a2
  56. cmpne a3, a2
  57. bt \name
  58. /* First read/write the page, just update the flags */
  59. ldw a3, (r6)
  60. bgeni a2, PAGE_VALID_BIT
  61. bseti a2, PAGE_ACCESSED_BIT
  62. bseti a2, \val1
  63. bseti a2, \val2
  64. or a3, a2
  65. stw a3, (r6)
  66. /* Some cpu tlb-hardrefill bypass the cache */
  67. #ifdef CONFIG_CPU_NEED_TLBSYNC
  68. movi a2, 0x22
  69. bseti a2, 6
  70. mtcr r6, cr22
  71. mtcr a2, cr17
  72. sync
  73. #endif
  74. mfcr a3, ss2
  75. mfcr r6, ss3
  76. mfcr a2, ss4
  77. rte
  78. \name:
  79. mfcr a3, ss2
  80. mfcr r6, ss3
  81. mfcr a2, ss4
  82. SAVE_ALL EPC_KEEP
  83. .endm
  84. .macro tlbop_end is_write
  85. RD_MEH a2
  86. psrset ee, ie
  87. mov a0, sp
  88. movi a1, \is_write
  89. jbsr do_page_fault
  90. movi r11_sig, 0 /* r11 = 0, Not a syscall. */
  91. jmpi ret_from_exception
  92. .endm
  93. .text
  94. tlbop_begin tlbinvalidl, _PAGE_READ, PAGE_VALID_BIT, PAGE_ACCESSED_BIT
  95. tlbop_end 0
  96. tlbop_begin tlbinvalids, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
  97. tlbop_end 1
  98. tlbop_begin tlbmodified, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
  99. #ifndef CONFIG_CPU_HAS_LDSTEX
  100. jbsr csky_cmpxchg_fixup
  101. #endif
  102. tlbop_end 1
  103. ENTRY(csky_systemcall)
  104. SAVE_ALL EPC_INCREASE
  105. psrset ee, ie
  106. /* Stack frame for syscall, origin call set_esp0 */
  107. mov r12, sp
  108. bmaski r11, 13
  109. andn r12, r11
  110. bgeni r11, 9
  111. addi r11, 32
  112. addu r12, r11
  113. st sp, (r12, 0)
  114. lrw r11, __NR_syscalls
  115. cmphs syscallid, r11 /* Check nr of syscall */
  116. bt ret_from_exception
  117. lrw r13, sys_call_table
  118. ixw r13, syscallid
  119. ldw r11, (r13)
  120. cmpnei r11, 0
  121. bf ret_from_exception
  122. mov r9, sp
  123. bmaski r10, THREAD_SHIFT
  124. andn r9, r10
  125. ldw r8, (r9, TINFO_FLAGS)
  126. btsti r8, TIF_SYSCALL_TRACE
  127. bt 1f
  128. #if defined(__CSKYABIV2__)
  129. subi sp, 8
  130. stw r5, (sp, 0x4)
  131. stw r4, (sp, 0x0)
  132. jsr r11 /* Do system call */
  133. addi sp, 8
  134. #else
  135. jsr r11
  136. #endif
  137. stw a0, (sp, LSAVE_A0) /* Save return value */
  138. jmpi ret_from_exception
  139. 1:
  140. movi a0, 0 /* enter system call */
  141. mov a1, sp /* sp = pt_regs pointer */
  142. jbsr syscall_trace
  143. /* Prepare args before do system call */
  144. ldw a0, (sp, LSAVE_A0)
  145. ldw a1, (sp, LSAVE_A1)
  146. ldw a2, (sp, LSAVE_A2)
  147. ldw a3, (sp, LSAVE_A3)
  148. #if defined(__CSKYABIV2__)
  149. subi sp, 8
  150. stw r5, (sp, 0x4)
  151. stw r4, (sp, 0x0)
  152. #else
  153. ldw r6, (sp, LSAVE_A4)
  154. ldw r7, (sp, LSAVE_A5)
  155. #endif
  156. jsr r11 /* Do system call */
  157. #if defined(__CSKYABIV2__)
  158. addi sp, 8
  159. #endif
  160. stw a0, (sp, LSAVE_A0) /* Save return value */
  161. movi a0, 1 /* leave system call */
  162. mov a1, sp /* sp = pt_regs pointer */
  163. jbsr syscall_trace
  164. syscall_exit_work:
  165. ld syscallid, (sp, LSAVE_PSR)
  166. btsti syscallid, 31
  167. bt 2f
  168. jmpi resume_userspace
  169. 2: RESTORE_ALL
  170. ENTRY(ret_from_kernel_thread)
  171. jbsr schedule_tail
  172. mov a0, r8
  173. jsr r9
  174. jbsr ret_from_exception
  175. ENTRY(ret_from_fork)
  176. jbsr schedule_tail
  177. mov r9, sp
  178. bmaski r10, THREAD_SHIFT
  179. andn r9, r10
  180. ldw r8, (r9, TINFO_FLAGS)
  181. movi r11_sig, 1
  182. btsti r8, TIF_SYSCALL_TRACE
  183. bf 3f
  184. movi a0, 1
  185. mov a1, sp /* sp = pt_regs pointer */
  186. jbsr syscall_trace
  187. 3:
  188. jbsr ret_from_exception
  189. ret_from_exception:
  190. ld syscallid, (sp, LSAVE_PSR)
  191. btsti syscallid, 31
  192. bt 1f
  193. /*
  194. * Load address of current->thread_info, Then get address of task_struct
  195. * Get task_needreshed in task_struct
  196. */
  197. mov r9, sp
  198. bmaski r10, THREAD_SHIFT
  199. andn r9, r10
  200. resume_userspace:
  201. ldw r8, (r9, TINFO_FLAGS)
  202. andi r8, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED)
  203. cmpnei r8, 0
  204. bt exit_work
  205. 1: RESTORE_ALL
  206. exit_work:
  207. mov a0, sp /* Stack address is arg[0] */
  208. jbsr set_esp0 /* Call C level */
  209. btsti r8, TIF_NEED_RESCHED
  210. bt work_resched
  211. /* If thread_info->flag is empty, RESTORE_ALL */
  212. cmpnei r8, 0
  213. bf 1b
  214. mov a1, sp
  215. mov a0, r8
  216. mov a2, r11_sig /* syscall? */
  217. btsti r8, TIF_SIGPENDING /* delivering a signal? */
  218. /* prevent further restarts(set r11 = 0) */
  219. clrt r11_sig
  220. jbsr do_notify_resume /* do signals */
  221. br resume_userspace
  222. work_resched:
  223. lrw syscallid, ret_from_exception
  224. mov r15, syscallid /* Return address in link */
  225. jmpi schedule
  226. ENTRY(sys_rt_sigreturn)
  227. movi r11_sig, 0
  228. jmpi do_rt_sigreturn
  229. ENTRY(csky_trap)
  230. SAVE_ALL EPC_KEEP
  231. psrset ee
  232. movi r11_sig, 0 /* r11 = 0, Not a syscall. */
  233. mov a0, sp /* Push Stack pointer arg */
  234. jbsr trap_c /* Call C-level trap handler */
  235. jmpi ret_from_exception
  236. /*
  237.  * Prototype from libc for abiv1:
  238.  * register unsigned int __result asm("a0");
  239.  * asm( "trap 3" :"=r"(__result)::);
  240.  */
  241. ENTRY(csky_get_tls)
  242. USPTOKSP
  243. /* increase epc for continue */
  244. mfcr a0, epc
  245. INCTRAP a0
  246. mtcr a0, epc
  247. /* get current task thread_info with kernel 8K stack */
  248. bmaski a0, THREAD_SHIFT
  249. not a0
  250. subi sp, 1
  251. and a0, sp
  252. addi sp, 1
  253. /* get tls */
  254. ldw a0, (a0, TINFO_TP_VALUE)
  255. KSPTOUSP
  256. rte
  257. ENTRY(csky_irq)
  258. SAVE_ALL EPC_KEEP
  259. psrset ee
  260. movi r11_sig, 0 /* r11 = 0, Not a syscall. */
  261. #ifdef CONFIG_PREEMPT
  262. mov r9, sp /* Get current stack pointer */
  263. bmaski r10, THREAD_SHIFT
  264. andn r9, r10 /* Get thread_info */
  265. /*
  266. * Get task_struct->stack.preempt_count for current,
  267. * and increase 1.
  268. */
  269. ldw r8, (r9, TINFO_PREEMPT)
  270. addi r8, 1
  271. stw r8, (r9, TINFO_PREEMPT)
  272. #endif
  273. mov a0, sp
  274. jbsr csky_do_IRQ
  275. #ifdef CONFIG_PREEMPT
  276. subi r8, 1
  277. stw r8, (r9, TINFO_PREEMPT)
  278. cmpnei r8, 0
  279. bt 2f
  280. ldw r8, (r9, TINFO_FLAGS)
  281. btsti r8, TIF_NEED_RESCHED
  282. bf 2f
  283. 1:
  284. jbsr preempt_schedule_irq /* irq en/disable is done inside */
  285. ldw r7, (r9, TINFO_FLAGS) /* get new tasks TI_FLAGS */
  286. btsti r7, TIF_NEED_RESCHED
  287. bt 1b /* go again */
  288. #endif
  289. 2:
  290. jmpi ret_from_exception
  291. /*
  292. * a0 = prev task_struct *
  293. * a1 = next task_struct *
  294. * a0 = return next
  295. */
  296. ENTRY(__switch_to)
  297. lrw a3, TASK_THREAD
  298. addu a3, a0
  299. mfcr a2, psr /* Save PSR value */
  300. stw a2, (a3, THREAD_SR) /* Save PSR in task struct */
  301. bclri a2, 6 /* Disable interrupts */
  302. mtcr a2, psr
  303. SAVE_SWITCH_STACK
  304. stw sp, (a3, THREAD_KSP)
  305. #ifdef CONFIG_CPU_HAS_HILO
  306. lrw r10, THREAD_DSPHI
  307. add r10, a3
  308. mfhi r6
  309. mflo r7
  310. stw r6, (r10, 0) /* THREAD_DSPHI */
  311. stw r7, (r10, 4) /* THREAD_DSPLO */
  312. mfcr r6, cr14
  313. stw r6, (r10, 8) /* THREAD_DSPCSR */
  314. #endif
  315. /* Set up next process to run */
  316. lrw a3, TASK_THREAD
  317. addu a3, a1
  318. ldw sp, (a3, THREAD_KSP) /* Set next kernel sp */
  319. #ifdef CONFIG_CPU_HAS_HILO
  320. lrw r10, THREAD_DSPHI
  321. add r10, a3
  322. ldw r6, (r10, 8) /* THREAD_DSPCSR */
  323. mtcr r6, cr14
  324. ldw r6, (r10, 0) /* THREAD_DSPHI */
  325. ldw r7, (r10, 4) /* THREAD_DSPLO */
  326. mthi r6
  327. mtlo r7
  328. #endif
  329. ldw a2, (a3, THREAD_SR) /* Set next PSR */
  330. mtcr a2, psr
  331. #if defined(__CSKYABIV2__)
  332. addi r7, a1, TASK_THREAD_INFO
  333. ldw tls, (r7, TINFO_TP_VALUE)
  334. #endif
  335. RESTORE_SWITCH_STACK
  336. rts
  337. ENDPROC(__switch_to)