genex.S 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
  7. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8. * Copyright (C) 2002, 2007 Maciej W. Rozycki
  9. * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved.
  10. */
  11. #include <linux/init.h>
  12. #include <asm/asm.h>
  13. #include <asm/asmmacro.h>
  14. #include <asm/cacheops.h>
  15. #include <asm/irqflags.h>
  16. #include <asm/regdef.h>
  17. #include <asm/fpregdef.h>
  18. #include <asm/mipsregs.h>
  19. #include <asm/stackframe.h>
  20. #include <asm/war.h>
  21. #include <asm/thread_info.h>
  22. __INIT
  23. /*
  24. * General exception vector for all other CPUs.
  25. *
  26. * Be careful when changing this, it has to be at most 128 bytes
  27. * to fit into space reserved for the exception handler.
  28. */
  29. NESTED(except_vec3_generic, 0, sp)
  30. .set push
  31. .set noat
  32. #if R5432_CP0_INTERRUPT_WAR
  33. mfc0 k0, CP0_INDEX
  34. #endif
  35. mfc0 k1, CP0_CAUSE
  36. andi k1, k1, 0x7c
  37. #ifdef CONFIG_64BIT
  38. dsll k1, k1, 1
  39. #endif
  40. PTR_L k0, exception_handlers(k1)
  41. jr k0
  42. .set pop
  43. END(except_vec3_generic)
  44. /*
  45. * General exception handler for CPUs with virtual coherency exception.
  46. *
  47. * Be careful when changing this, it has to be at most 256 (as a special
  48. * exception) bytes to fit into space reserved for the exception handler.
  49. */
  50. NESTED(except_vec3_r4000, 0, sp)
  51. .set push
  52. .set arch=r4000
  53. .set noat
  54. mfc0 k1, CP0_CAUSE
  55. li k0, 31<<2
  56. andi k1, k1, 0x7c
  57. .set push
  58. .set noreorder
  59. .set nomacro
  60. beq k1, k0, handle_vced
  61. li k0, 14<<2
  62. beq k1, k0, handle_vcei
  63. #ifdef CONFIG_64BIT
  64. dsll k1, k1, 1
  65. #endif
  66. .set pop
  67. PTR_L k0, exception_handlers(k1)
  68. jr k0
  69. /*
  70. * Big shit, we now may have two dirty primary cache lines for the same
  71. * physical address. We can safely invalidate the line pointed to by
  72. * c0_badvaddr because after return from this exception handler the
  73. * load / store will be re-executed.
  74. */
  75. handle_vced:
  76. MFC0 k0, CP0_BADVADDR
  77. li k1, -4 # Is this ...
  78. and k0, k1 # ... really needed?
  79. mtc0 zero, CP0_TAGLO
  80. cache Index_Store_Tag_D, (k0)
  81. cache Hit_Writeback_Inv_SD, (k0)
  82. #ifdef CONFIG_PROC_FS
  83. PTR_LA k0, vced_count
  84. lw k1, (k0)
  85. addiu k1, 1
  86. sw k1, (k0)
  87. #endif
  88. eret
  89. handle_vcei:
  90. MFC0 k0, CP0_BADVADDR
  91. cache Hit_Writeback_Inv_SD, (k0) # also cleans pi
  92. #ifdef CONFIG_PROC_FS
  93. PTR_LA k0, vcei_count
  94. lw k1, (k0)
  95. addiu k1, 1
  96. sw k1, (k0)
  97. #endif
  98. eret
  99. .set pop
  100. END(except_vec3_r4000)
  101. __FINIT
  102. .align 5 /* 32 byte rollback region */
  103. LEAF(__r4k_wait)
  104. .set push
  105. .set noreorder
  106. /* start of rollback region */
  107. LONG_L t0, TI_FLAGS($28)
  108. nop
  109. andi t0, _TIF_NEED_RESCHED
  110. bnez t0, 1f
  111. nop
  112. nop
  113. nop
  114. #ifdef CONFIG_CPU_MICROMIPS
  115. nop
  116. nop
  117. nop
  118. nop
  119. #endif
  120. .set MIPS_ISA_ARCH_LEVEL_RAW
  121. wait
  122. /* end of rollback region (the region size must be power of two) */
  123. 1:
  124. jr ra
  125. nop
  126. .set pop
  127. END(__r4k_wait)
  128. .macro BUILD_ROLLBACK_PROLOGUE handler
  129. FEXPORT(rollback_\handler)
  130. .set push
  131. .set noat
  132. MFC0 k0, CP0_EPC
  133. PTR_LA k1, __r4k_wait
  134. ori k0, 0x1f /* 32 byte rollback region */
  135. xori k0, 0x1f
  136. bne k0, k1, \handler
  137. MTC0 k0, CP0_EPC
  138. .set pop
  139. .endm
  140. .align 5
  141. BUILD_ROLLBACK_PROLOGUE handle_int
  142. NESTED(handle_int, PT_SIZE, sp)
  143. .cfi_signal_frame
  144. #ifdef CONFIG_TRACE_IRQFLAGS
  145. /*
  146. * Check to see if the interrupted code has just disabled
  147. * interrupts and ignore this interrupt for now if so.
  148. *
  149. * local_irq_disable() disables interrupts and then calls
  150. * trace_hardirqs_off() to track the state. If an interrupt is taken
  151. * after interrupts are disabled but before the state is updated
  152. * it will appear to restore_all that it is incorrectly returning with
  153. * interrupts disabled
  154. */
  155. .set push
  156. .set noat
  157. mfc0 k0, CP0_STATUS
  158. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  159. and k0, ST0_IEP
  160. bnez k0, 1f
  161. mfc0 k0, CP0_EPC
  162. .set noreorder
  163. j k0
  164. rfe
  165. #else
  166. and k0, ST0_IE
  167. bnez k0, 1f
  168. eret
  169. #endif
  170. 1:
  171. .set pop
  172. #endif
  173. SAVE_ALL docfi=1
  174. CLI
  175. TRACE_IRQS_OFF
  176. LONG_L s0, TI_REGS($28)
  177. LONG_S sp, TI_REGS($28)
  178. /*
  179. * SAVE_ALL ensures we are using a valid kernel stack for the thread.
  180. * Check if we are already using the IRQ stack.
  181. */
  182. move s1, sp # Preserve the sp
  183. /* Get IRQ stack for this CPU */
  184. ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
  185. #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
  186. lui k1, %hi(irq_stack)
  187. #else
  188. lui k1, %highest(irq_stack)
  189. daddiu k1, %higher(irq_stack)
  190. dsll k1, 16
  191. daddiu k1, %hi(irq_stack)
  192. dsll k1, 16
  193. #endif
  194. LONG_SRL k0, SMP_CPUID_PTRSHIFT
  195. LONG_ADDU k1, k0
  196. LONG_L t0, %lo(irq_stack)(k1)
  197. # Check if already on IRQ stack
  198. PTR_LI t1, ~(_THREAD_SIZE-1)
  199. and t1, t1, sp
  200. beq t0, t1, 2f
  201. /* Switch to IRQ stack */
  202. li t1, _IRQ_STACK_START
  203. PTR_ADD sp, t0, t1
  204. /* Save task's sp on IRQ stack so that unwinding can follow it */
  205. LONG_S s1, 0(sp)
  206. 2:
  207. jal plat_irq_dispatch
  208. /* Restore sp */
  209. move sp, s1
  210. j ret_from_irq
  211. #ifdef CONFIG_CPU_MICROMIPS
  212. nop
  213. #endif
  214. END(handle_int)
  215. __INIT
  216. /*
  217. * Special interrupt vector for MIPS64 ISA & embedded MIPS processors.
  218. * This is a dedicated interrupt exception vector which reduces the
  219. * interrupt processing overhead. The jump instruction will be replaced
  220. * at the initialization time.
  221. *
  222. * Be careful when changing this, it has to be at most 128 bytes
  223. * to fit into space reserved for the exception handler.
  224. */
  225. NESTED(except_vec4, 0, sp)
  226. 1: j 1b /* Dummy, will be replaced */
  227. END(except_vec4)
  228. /*
  229. * EJTAG debug exception handler.
  230. * The EJTAG debug exception entry point is 0xbfc00480, which
  231. * normally is in the boot PROM, so the boot PROM must do an
  232. * unconditional jump to this vector.
  233. */
  234. NESTED(except_vec_ejtag_debug, 0, sp)
  235. j ejtag_debug_handler
  236. #ifdef CONFIG_CPU_MICROMIPS
  237. nop
  238. #endif
  239. END(except_vec_ejtag_debug)
  240. __FINIT
  241. /*
  242. * Vectored interrupt handler.
  243. * This prototype is copied to ebase + n*IntCtl.VS and patched
  244. * to invoke the handler
  245. */
  246. BUILD_ROLLBACK_PROLOGUE except_vec_vi
  247. NESTED(except_vec_vi, 0, sp)
  248. SAVE_SOME docfi=1
  249. SAVE_AT docfi=1
  250. .set push
  251. .set noreorder
  252. PTR_LA v1, except_vec_vi_handler
  253. FEXPORT(except_vec_vi_lui)
  254. lui v0, 0 /* Patched */
  255. jr v1
  256. FEXPORT(except_vec_vi_ori)
  257. ori v0, 0 /* Patched */
  258. .set pop
  259. END(except_vec_vi)
  260. EXPORT(except_vec_vi_end)
  261. /*
  262. * Common Vectored Interrupt code
  263. * Complete the register saves and invoke the handler which is passed in $v0
  264. */
  265. NESTED(except_vec_vi_handler, 0, sp)
  266. SAVE_TEMP
  267. SAVE_STATIC
  268. CLI
  269. #ifdef CONFIG_TRACE_IRQFLAGS
  270. move s0, v0
  271. TRACE_IRQS_OFF
  272. move v0, s0
  273. #endif
  274. LONG_L s0, TI_REGS($28)
  275. LONG_S sp, TI_REGS($28)
  276. /*
  277. * SAVE_ALL ensures we are using a valid kernel stack for the thread.
  278. * Check if we are already using the IRQ stack.
  279. */
  280. move s1, sp # Preserve the sp
  281. /* Get IRQ stack for this CPU */
  282. ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
  283. #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
  284. lui k1, %hi(irq_stack)
  285. #else
  286. lui k1, %highest(irq_stack)
  287. daddiu k1, %higher(irq_stack)
  288. dsll k1, 16
  289. daddiu k1, %hi(irq_stack)
  290. dsll k1, 16
  291. #endif
  292. LONG_SRL k0, SMP_CPUID_PTRSHIFT
  293. LONG_ADDU k1, k0
  294. LONG_L t0, %lo(irq_stack)(k1)
  295. # Check if already on IRQ stack
  296. PTR_LI t1, ~(_THREAD_SIZE-1)
  297. and t1, t1, sp
  298. beq t0, t1, 2f
  299. /* Switch to IRQ stack */
  300. li t1, _IRQ_STACK_START
  301. PTR_ADD sp, t0, t1
  302. /* Save task's sp on IRQ stack so that unwinding can follow it */
  303. LONG_S s1, 0(sp)
  304. 2:
  305. jalr v0
  306. /* Restore sp */
  307. move sp, s1
  308. j ret_from_irq
  309. END(except_vec_vi_handler)
  310. /*
  311. * EJTAG debug exception handler.
  312. */
  313. NESTED(ejtag_debug_handler, PT_SIZE, sp)
  314. .set push
  315. .set noat
  316. MTC0 k0, CP0_DESAVE
  317. mfc0 k0, CP0_DEBUG
  318. sll k0, k0, 30 # Check for SDBBP.
  319. bgez k0, ejtag_return
  320. #ifdef CONFIG_SMP
  321. 1: PTR_LA k0, ejtag_debug_buffer_spinlock
  322. ll k0, 0(k0)
  323. bnez k0, 1b
  324. PTR_LA k0, ejtag_debug_buffer_spinlock
  325. sc k0, 0(k0)
  326. beqz k0, 1b
  327. # ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC
  328. sync
  329. # endif
  330. PTR_LA k0, ejtag_debug_buffer
  331. LONG_S k1, 0(k0)
  332. ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
  333. PTR_SRL k1, SMP_CPUID_PTRSHIFT
  334. PTR_SLL k1, LONGLOG
  335. PTR_LA k0, ejtag_debug_buffer_per_cpu
  336. PTR_ADDU k0, k1
  337. PTR_LA k1, ejtag_debug_buffer
  338. LONG_L k1, 0(k1)
  339. LONG_S k1, 0(k0)
  340. PTR_LA k0, ejtag_debug_buffer_spinlock
  341. sw zero, 0(k0)
  342. #else
  343. PTR_LA k0, ejtag_debug_buffer
  344. LONG_S k1, 0(k0)
  345. #endif
  346. SAVE_ALL
  347. move a0, sp
  348. jal ejtag_exception_handler
  349. RESTORE_ALL
  350. #ifdef CONFIG_SMP
  351. ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
  352. PTR_SRL k1, SMP_CPUID_PTRSHIFT
  353. PTR_SLL k1, LONGLOG
  354. PTR_LA k0, ejtag_debug_buffer_per_cpu
  355. PTR_ADDU k0, k1
  356. LONG_L k1, 0(k0)
  357. #else
  358. PTR_LA k0, ejtag_debug_buffer
  359. LONG_L k1, 0(k0)
  360. #endif
  361. ejtag_return:
  362. back_to_back_c0_hazard
  363. MFC0 k0, CP0_DESAVE
  364. .set mips32
  365. deret
  366. .set pop
  367. END(ejtag_debug_handler)
  368. /*
  369. * This buffer is reserved for the use of the EJTAG debug
  370. * handler.
  371. */
  372. .data
  373. EXPORT(ejtag_debug_buffer)
  374. .fill LONGSIZE
  375. #ifdef CONFIG_SMP
  376. EXPORT(ejtag_debug_buffer_spinlock)
  377. .fill LONGSIZE
  378. EXPORT(ejtag_debug_buffer_per_cpu)
  379. .fill LONGSIZE * NR_CPUS
  380. #endif
  381. .previous
  382. __INIT
  383. /*
  384. * NMI debug exception handler for MIPS reference boards.
  385. * The NMI debug exception entry point is 0xbfc00000, which
  386. * normally is in the boot PROM, so the boot PROM must do a
  387. * unconditional jump to this vector.
  388. */
  389. NESTED(except_vec_nmi, 0, sp)
  390. j nmi_handler
  391. #ifdef CONFIG_CPU_MICROMIPS
  392. nop
  393. #endif
  394. END(except_vec_nmi)
  395. __FINIT
  396. NESTED(nmi_handler, PT_SIZE, sp)
  397. .cfi_signal_frame
  398. .set push
  399. .set noat
  400. /*
  401. * Clear ERL - restore segment mapping
  402. * Clear BEV - required for page fault exception handler to work
  403. */
  404. mfc0 k0, CP0_STATUS
  405. ori k0, k0, ST0_EXL
  406. li k1, ~(ST0_BEV | ST0_ERL)
  407. and k0, k0, k1
  408. mtc0 k0, CP0_STATUS
  409. _ehb
  410. SAVE_ALL
  411. move a0, sp
  412. jal nmi_exception_handler
  413. /* nmi_exception_handler never returns */
  414. .set pop
  415. END(nmi_handler)
  416. .macro __build_clear_none
  417. .endm
  418. .macro __build_clear_sti
  419. TRACE_IRQS_ON
  420. STI
  421. .endm
  422. .macro __build_clear_cli
  423. CLI
  424. TRACE_IRQS_OFF
  425. .endm
  426. .macro __build_clear_fpe
  427. .set push
  428. /* gas fails to assemble cfc1 for some archs (octeon).*/ \
  429. .set mips1
  430. SET_HARDFLOAT
  431. cfc1 a1, fcr31
  432. .set pop
  433. CLI
  434. TRACE_IRQS_OFF
  435. .endm
  436. .macro __build_clear_msa_fpe
  437. _cfcmsa a1, MSA_CSR
  438. CLI
  439. TRACE_IRQS_OFF
  440. .endm
  441. .macro __build_clear_ade
  442. MFC0 t0, CP0_BADVADDR
  443. PTR_S t0, PT_BVADDR(sp)
  444. KMODE
  445. .endm
  446. .macro __BUILD_silent exception
  447. .endm
  448. /* Gas tries to parse the PRINT argument as a string containing
  449. string escapes and emits bogus warnings if it believes to
  450. recognize an unknown escape code. So make the arguments
  451. start with an n and gas will believe \n is ok ... */
  452. .macro __BUILD_verbose nexception
  453. LONG_L a1, PT_EPC(sp)
  454. #ifdef CONFIG_32BIT
  455. PRINT("Got \nexception at %08lx\012")
  456. #endif
  457. #ifdef CONFIG_64BIT
  458. PRINT("Got \nexception at %016lx\012")
  459. #endif
  460. .endm
  461. .macro __BUILD_count exception
  462. LONG_L t0,exception_count_\exception
  463. LONG_ADDIU t0, 1
  464. LONG_S t0,exception_count_\exception
  465. .comm exception_count\exception, 8, 8
  466. .endm
  467. .macro __BUILD_HANDLER exception handler clear verbose ext
  468. .align 5
  469. NESTED(handle_\exception, PT_SIZE, sp)
  470. .cfi_signal_frame
  471. .set noat
  472. SAVE_ALL
  473. FEXPORT(handle_\exception\ext)
  474. __build_clear_\clear
  475. .set at
  476. __BUILD_\verbose \exception
  477. move a0, sp
  478. jal do_\handler
  479. j ret_from_exception
  480. END(handle_\exception)
  481. .endm
  482. .macro BUILD_HANDLER exception handler clear verbose
  483. __BUILD_HANDLER \exception \handler \clear \verbose _int
  484. .endm
  485. BUILD_HANDLER adel ade ade silent /* #4 */
  486. BUILD_HANDLER ades ade ade silent /* #5 */
  487. BUILD_HANDLER ibe be cli silent /* #6 */
  488. BUILD_HANDLER dbe be cli silent /* #7 */
  489. BUILD_HANDLER bp bp sti silent /* #9 */
  490. BUILD_HANDLER ri ri sti silent /* #10 */
  491. BUILD_HANDLER cpu cpu sti silent /* #11 */
  492. BUILD_HANDLER ov ov sti silent /* #12 */
  493. BUILD_HANDLER tr tr sti silent /* #13 */
  494. BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */
  495. BUILD_HANDLER fpe fpe fpe silent /* #15 */
  496. BUILD_HANDLER ftlb ftlb none silent /* #16 */
  497. BUILD_HANDLER msa msa sti silent /* #21 */
  498. BUILD_HANDLER mdmx mdmx sti silent /* #22 */
  499. #ifdef CONFIG_HARDWARE_WATCHPOINTS
  500. /*
  501. * For watch, interrupts will be enabled after the watch
  502. * registers are read.
  503. */
  504. BUILD_HANDLER watch watch cli silent /* #23 */
  505. #else
  506. BUILD_HANDLER watch watch sti verbose /* #23 */
  507. #endif
  508. BUILD_HANDLER mcheck mcheck cli verbose /* #24 */
  509. BUILD_HANDLER mt mt sti silent /* #25 */
  510. BUILD_HANDLER dsp dsp sti silent /* #26 */
  511. BUILD_HANDLER reserved reserved sti verbose /* others */
  512. .align 5
  513. LEAF(handle_ri_rdhwr_tlbp)
  514. .set push
  515. .set noat
  516. .set noreorder
  517. /* check if TLB contains a entry for EPC */
  518. MFC0 k1, CP0_ENTRYHI
  519. andi k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX
  520. MFC0 k0, CP0_EPC
  521. PTR_SRL k0, _PAGE_SHIFT + 1
  522. PTR_SLL k0, _PAGE_SHIFT + 1
  523. or k1, k0
  524. MTC0 k1, CP0_ENTRYHI
  525. mtc0_tlbw_hazard
  526. tlbp
  527. tlb_probe_hazard
  528. mfc0 k1, CP0_INDEX
  529. .set pop
  530. bltz k1, handle_ri /* slow path */
  531. /* fall thru */
  532. END(handle_ri_rdhwr_tlbp)
  533. LEAF(handle_ri_rdhwr)
  534. .set push
  535. .set noat
  536. .set noreorder
  537. /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */
  538. /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
  539. MFC0 k1, CP0_EPC
  540. #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
  541. and k0, k1, 1
  542. beqz k0, 1f
  543. xor k1, k0
  544. lhu k0, (k1)
  545. lhu k1, 2(k1)
  546. ins k1, k0, 16, 16
  547. lui k0, 0x007d
  548. b docheck
  549. ori k0, 0x6b3c
  550. 1:
  551. lui k0, 0x7c03
  552. lw k1, (k1)
  553. ori k0, 0xe83b
  554. #else
  555. andi k0, k1, 1
  556. bnez k0, handle_ri
  557. lui k0, 0x7c03
  558. lw k1, (k1)
  559. ori k0, 0xe83b
  560. #endif
  561. .set reorder
  562. docheck:
  563. bne k0, k1, handle_ri /* if not ours */
  564. isrdhwr:
  565. /* The insn is rdhwr. No need to check CAUSE.BD here. */
  566. get_saved_sp /* k1 := current_thread_info */
  567. .set noreorder
  568. MFC0 k0, CP0_EPC
  569. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  570. ori k1, _THREAD_MASK
  571. xori k1, _THREAD_MASK
  572. LONG_L v1, TI_TP_VALUE(k1)
  573. LONG_ADDIU k0, 4
  574. jr k0
  575. rfe
  576. #else
  577. #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
  578. LONG_ADDIU k0, 4 /* stall on $k0 */
  579. #else
  580. .set at=v1
  581. LONG_ADDIU k0, 4
  582. .set noat
  583. #endif
  584. MTC0 k0, CP0_EPC
  585. /* I hope three instructions between MTC0 and ERET are enough... */
  586. ori k1, _THREAD_MASK
  587. xori k1, _THREAD_MASK
  588. LONG_L v1, TI_TP_VALUE(k1)
  589. .set arch=r4000
  590. eret
  591. .set mips0
  592. #endif
  593. .set pop
  594. END(handle_ri_rdhwr)
  595. #ifdef CONFIG_64BIT
  596. /* A temporary overflow handler used by check_daddi(). */
  597. __INIT
  598. BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */
  599. #endif