switch_to.h 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
  7. * Copyright (C) 1996 by Paul M. Antoine
  8. * Copyright (C) 1999 Silicon Graphics
  9. * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
  10. * Copyright (C) 2000 MIPS Technologies, Inc.
  11. */
  12. #ifndef _ASM_SWITCH_TO_H
  13. #define _ASM_SWITCH_TO_H
  14. #include <asm/cpu-features.h>
  15. #include <asm/watch.h>
  16. #include <asm/dsp.h>
  17. #include <asm/cop2.h>
  18. #include <asm/msa.h>
  19. struct task_struct;
  20. enum {
  21. FP_SAVE_NONE = 0,
  22. FP_SAVE_VECTOR = -1,
  23. FP_SAVE_SCALAR = 1,
  24. };
  25. /**
  26. * resume - resume execution of a task
  27. * @prev: The task previously executed.
  28. * @next: The task to begin executing.
  29. * @next_ti: task_thread_info(next).
  30. * @fp_save: Which, if any, FP context to save for prev.
  31. *
  32. * This function is used whilst scheduling to save the context of prev & load
  33. * the context of next. Returns prev.
  34. */
  35. extern asmlinkage struct task_struct *resume(struct task_struct *prev,
  36. struct task_struct *next, struct thread_info *next_ti,
  37. s32 fp_save);
  38. extern unsigned int ll_bit;
  39. extern struct task_struct *ll_task;
  40. #ifdef CONFIG_MIPS_MT_FPAFF
  41. /*
  42. * Handle the scheduler resume end of FPU affinity management. We do this
  43. * inline to try to keep the overhead down. If we have been forced to run on
  44. * a "CPU" with an FPU because of a previous high level of FP computation,
  45. * but did not actually use the FPU during the most recent time-slice (CU1
  46. * isn't set), we undo the restriction on cpus_allowed.
  47. *
  48. * We're not calling set_cpus_allowed() here, because we have no need to
  49. * force prompt migration - we're already switching the current CPU to a
  50. * different thread.
  51. */
  52. #define __mips_mt_fpaff_switch_to(prev) \
  53. do { \
  54. struct thread_info *__prev_ti = task_thread_info(prev); \
  55. \
  56. if (cpu_has_fpu && \
  57. test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \
  58. (!(KSTK_STATUS(prev) & ST0_CU1))) { \
  59. clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \
  60. prev->cpus_allowed = prev->thread.user_cpus_allowed; \
  61. } \
  62. next->thread.emulated_fp = 0; \
  63. } while(0)
  64. #else
  65. #define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0)
  66. #endif
  67. #define __clear_software_ll_bit() \
  68. do { if (cpu_has_rw_llb) { \
  69. write_c0_lladdr(0); \
  70. } else { \
  71. if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc)\
  72. ll_bit = 0; \
  73. } \
  74. } while (0)
  75. #define switch_to(prev, next, last) \
  76. do { \
  77. u32 __c0_stat; \
  78. s32 __fpsave = FP_SAVE_NONE; \
  79. __mips_mt_fpaff_switch_to(prev); \
  80. if (cpu_has_dsp) \
  81. __save_dsp(prev); \
  82. if (cop2_present && (KSTK_STATUS(prev) & ST0_CU2)) { \
  83. if (cop2_lazy_restore) \
  84. KSTK_STATUS(prev) &= ~ST0_CU2; \
  85. __c0_stat = read_c0_status(); \
  86. write_c0_status(__c0_stat | ST0_CU2); \
  87. cop2_save(prev); \
  88. write_c0_status(__c0_stat & ~ST0_CU2); \
  89. } \
  90. __clear_software_ll_bit(); \
  91. if (test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU)) \
  92. __fpsave = FP_SAVE_SCALAR; \
  93. if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA)) \
  94. __fpsave = FP_SAVE_VECTOR; \
  95. (last) = resume(prev, next, task_thread_info(next), __fpsave); \
  96. disable_msa(); \
  97. } while (0)
  98. #define finish_arch_switch(prev) \
  99. do { \
  100. u32 __c0_stat; \
  101. if (cop2_present && !cop2_lazy_restore && \
  102. (KSTK_STATUS(current) & ST0_CU2)) { \
  103. __c0_stat = read_c0_status(); \
  104. write_c0_status(__c0_stat | ST0_CU2); \
  105. cop2_restore(current); \
  106. write_c0_status(__c0_stat & ~ST0_CU2); \
  107. } \
  108. if (cpu_has_dsp) \
  109. __restore_dsp(current); \
  110. if (cpu_has_userlocal) \
  111. write_c0_userlocal(current_thread_info()->tp_value); \
  112. __restore_watch(); \
  113. } while (0)
  114. #endif /* _ASM_SWITCH_TO_H */