entry.S 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
  7. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8. * Copyright (C) 2001 MIPS Technologies, Inc.
  9. */
  10. #include <asm/asm.h>
  11. #include <asm/asmmacro.h>
  12. #include <asm/compiler.h>
  13. #include <asm/regdef.h>
  14. #include <asm/mipsregs.h>
  15. #include <asm/stackframe.h>
  16. #include <asm/isadep.h>
  17. #include <asm/thread_info.h>
  18. #include <asm/war.h>
  19. #ifndef CONFIG_PREEMPT
  20. #define resume_kernel restore_all
  21. #else
  22. #define __ret_from_irq ret_from_exception
  23. #endif
  24. .text
  25. .align 5
  26. #ifndef CONFIG_PREEMPT
  27. FEXPORT(ret_from_exception)
  28. local_irq_disable # preempt stop
  29. b __ret_from_irq
  30. #endif
  31. FEXPORT(ret_from_irq)
  32. LONG_S s0, TI_REGS($28)
  33. FEXPORT(__ret_from_irq)
  34. /*
  35. * We can be coming here from a syscall done in the kernel space,
  36. * e.g. a failed kernel_execve().
  37. */
  38. resume_userspace_check:
  39. LONG_L t0, PT_STATUS(sp) # returning to kernel mode?
  40. andi t0, t0, KU_USER
  41. beqz t0, resume_kernel
  42. resume_userspace:
  43. local_irq_disable # make sure we dont miss an
  44. # interrupt setting need_resched
  45. # between sampling and return
  46. LONG_L a2, TI_FLAGS($28) # current->work
  47. andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace)
  48. bnez t0, work_pending
  49. j restore_all
  50. #ifdef CONFIG_PREEMPT
  51. resume_kernel:
  52. local_irq_disable
  53. lw t0, TI_PRE_COUNT($28)
  54. bnez t0, restore_all
  55. need_resched:
  56. LONG_L t0, TI_FLAGS($28)
  57. andi t1, t0, _TIF_NEED_RESCHED
  58. beqz t1, restore_all
  59. LONG_L t0, PT_STATUS(sp) # Interrupts off?
  60. andi t0, 1
  61. beqz t0, restore_all
  62. jal preempt_schedule_irq
  63. b need_resched
  64. #endif
  65. FEXPORT(ret_from_kernel_thread)
  66. jal schedule_tail # a0 = struct task_struct *prev
  67. move a0, s1
  68. jal s0
  69. j syscall_exit
  70. FEXPORT(ret_from_fork)
  71. jal schedule_tail # a0 = struct task_struct *prev
  72. FEXPORT(syscall_exit)
  73. local_irq_disable # make sure need_resched and
  74. # signals dont change between
  75. # sampling and return
  76. LONG_L a2, TI_FLAGS($28) # current->work
  77. li t0, _TIF_ALLWORK_MASK
  78. and t0, a2, t0
  79. bnez t0, syscall_exit_work
  80. restore_all: # restore full frame
  81. .set noat
  82. RESTORE_TEMP
  83. RESTORE_AT
  84. RESTORE_STATIC
  85. restore_partial: # restore partial frame
  86. #ifdef CONFIG_TRACE_IRQFLAGS
  87. SAVE_STATIC
  88. SAVE_AT
  89. SAVE_TEMP
  90. LONG_L v0, PT_STATUS(sp)
  91. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  92. and v0, ST0_IEP
  93. #else
  94. and v0, ST0_IE
  95. #endif
  96. beqz v0, 1f
  97. jal trace_hardirqs_on
  98. b 2f
  99. 1: jal trace_hardirqs_off
  100. 2:
  101. RESTORE_TEMP
  102. RESTORE_AT
  103. RESTORE_STATIC
  104. #endif
  105. RESTORE_SOME
  106. RESTORE_SP_AND_RET
  107. .set at
  108. work_pending:
  109. andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
  110. beqz t0, work_notifysig
  111. work_resched:
  112. jal schedule
  113. local_irq_disable # make sure need_resched and
  114. # signals dont change between
  115. # sampling and return
  116. LONG_L a2, TI_FLAGS($28)
  117. andi t0, a2, _TIF_WORK_MASK # is there any work to be done
  118. # other than syscall tracing?
  119. beqz t0, restore_all
  120. andi t0, a2, _TIF_NEED_RESCHED
  121. bnez t0, work_resched
  122. work_notifysig: # deal with pending signals and
  123. # notify-resume requests
  124. move a0, sp
  125. li a1, 0
  126. jal do_notify_resume # a2 already loaded
  127. j resume_userspace_check
  128. FEXPORT(syscall_exit_partial)
  129. local_irq_disable # make sure need_resched doesn't
  130. # change between and return
  131. LONG_L a2, TI_FLAGS($28) # current->work
  132. li t0, _TIF_ALLWORK_MASK
  133. and t0, a2
  134. beqz t0, restore_partial
  135. SAVE_STATIC
  136. syscall_exit_work:
  137. LONG_L t0, PT_STATUS(sp) # returning to kernel mode?
  138. andi t0, t0, KU_USER
  139. beqz t0, resume_kernel
  140. li t0, _TIF_WORK_SYSCALL_EXIT
  141. and t0, a2 # a2 is preloaded with TI_FLAGS
  142. beqz t0, work_pending # trace bit set?
  143. local_irq_enable # could let syscall_trace_leave()
  144. # call schedule() instead
  145. move a0, sp
  146. jal syscall_trace_leave
  147. b resume_userspace
  148. #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) || \
  149. defined(CONFIG_MIPS_MT)
  150. /*
  151. * MIPS32R2 Instruction Hazard Barrier - must be called
  152. *
  153. * For C code use the inline version named instruction_hazard().
  154. */
  155. LEAF(mips_ihb)
  156. .set MIPS_ISA_LEVEL_RAW
  157. jr.hb ra
  158. nop
  159. END(mips_ihb)
  160. #endif /* CONFIG_CPU_MIPSR2 or CONFIG_CPU_MIPSR6 or CONFIG_MIPS_MT */