dsemul.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/err.h>
  3. #include <linux/slab.h>
  4. #include <linux/mm_types.h>
  5. #include <linux/sched/task.h>
  6. #include <asm/branch.h>
  7. #include <asm/cacheflush.h>
  8. #include <asm/fpu_emulator.h>
  9. #include <asm/inst.h>
  10. #include <asm/mipsregs.h>
  11. #include <linux/uaccess.h>
  12. /**
  13. * struct emuframe - The 'emulation' frame structure
  14. * @emul: The instruction to 'emulate'.
  15. * @badinst: A break instruction to cause a return to the kernel.
  16. *
  17. * This structure defines the frames placed within the delay slot emulation
  18. * page in response to a call to mips_dsemul(). Each thread may be allocated
  19. * only one frame at any given time. The kernel stores within it the
  20. * instruction to be 'emulated' followed by a break instruction, then
  21. * executes the frame in user mode. The break causes a trap to the kernel
  22. * which leads to do_dsemulret() being called unless the instruction in
  23. * @emul causes a trap itself, is a branch, or a signal is delivered to
  24. * the thread. In these cases the allocated frame will either be reused by
  25. * a subsequent delay slot 'emulation', or be freed during signal delivery or
  26. * upon thread exit.
  27. *
  28. * This approach is used because:
  29. *
  30. * - Actually emulating all instructions isn't feasible. We would need to
  31. * be able to handle instructions from all revisions of the MIPS ISA,
  32. * all ASEs & all vendor instruction set extensions. This would be a
  33. * whole lot of work & continual maintenance burden as new instructions
  34. * are introduced, and in the case of some vendor extensions may not
  35. * even be possible. Thus we need to take the approach of actually
  36. * executing the instruction.
  37. *
  38. * - We must execute the instruction within user context. If we were to
  39. * execute the instruction in kernel mode then it would have access to
  40. * kernel resources without very careful checks, leaving us with a
  41. * high potential for security or stability issues to arise.
  42. *
  43. * - We used to place the frame on the users stack, but this requires
  44. * that the stack be executable. This is bad for security so the
  45. * per-process page is now used instead.
  46. *
  47. * - The instruction in @emul may be something entirely invalid for a
  48. * delay slot. The user may (intentionally or otherwise) place a branch
  49. * in a delay slot, or a kernel mode instruction, or something else
  50. * which generates an exception. Thus we can't rely upon the break in
  51. * @badinst always being hit. For this reason we track the index of the
  52. * frame allocated to each thread, allowing us to clean it up at later
  53. * points such as signal delivery or thread exit.
  54. *
  55. * - The user may generate a fake struct emuframe if they wish, invoking
  56. * the BRK_MEMU break instruction themselves. We must therefore not
  57. * trust that BRK_MEMU means there's actually a valid frame allocated
  58. * to the thread, and must not allow the user to do anything they
  59. * couldn't already.
  60. */
  61. struct emuframe {
  62. mips_instruction emul;
  63. mips_instruction badinst;
  64. };
  65. static const int emupage_frame_count = PAGE_SIZE / sizeof(struct emuframe);
  66. static inline __user struct emuframe *dsemul_page(void)
  67. {
  68. return (__user struct emuframe *)STACK_TOP;
  69. }
  70. static int alloc_emuframe(void)
  71. {
  72. mm_context_t *mm_ctx = &current->mm->context;
  73. int idx;
  74. retry:
  75. spin_lock(&mm_ctx->bd_emupage_lock);
  76. /* Ensure we have an allocation bitmap */
  77. if (!mm_ctx->bd_emupage_allocmap) {
  78. mm_ctx->bd_emupage_allocmap =
  79. kcalloc(BITS_TO_LONGS(emupage_frame_count),
  80. sizeof(unsigned long),
  81. GFP_ATOMIC);
  82. if (!mm_ctx->bd_emupage_allocmap) {
  83. idx = BD_EMUFRAME_NONE;
  84. goto out_unlock;
  85. }
  86. }
  87. /* Attempt to allocate a single bit/frame */
  88. idx = bitmap_find_free_region(mm_ctx->bd_emupage_allocmap,
  89. emupage_frame_count, 0);
  90. if (idx < 0) {
  91. /*
  92. * Failed to allocate a frame. We'll wait until one becomes
  93. * available. We unlock the page so that other threads actually
  94. * get the opportunity to free their frames, which means
  95. * technically the result of bitmap_full may be incorrect.
  96. * However the worst case is that we repeat all this and end up
  97. * back here again.
  98. */
  99. spin_unlock(&mm_ctx->bd_emupage_lock);
  100. if (!wait_event_killable(mm_ctx->bd_emupage_queue,
  101. !bitmap_full(mm_ctx->bd_emupage_allocmap,
  102. emupage_frame_count)))
  103. goto retry;
  104. /* Received a fatal signal - just give in */
  105. return BD_EMUFRAME_NONE;
  106. }
  107. /* Success! */
  108. pr_debug("allocate emuframe %d to %d\n", idx, current->pid);
  109. out_unlock:
  110. spin_unlock(&mm_ctx->bd_emupage_lock);
  111. return idx;
  112. }
  113. static void free_emuframe(int idx, struct mm_struct *mm)
  114. {
  115. mm_context_t *mm_ctx = &mm->context;
  116. spin_lock(&mm_ctx->bd_emupage_lock);
  117. pr_debug("free emuframe %d from %d\n", idx, current->pid);
  118. bitmap_clear(mm_ctx->bd_emupage_allocmap, idx, 1);
  119. /* If some thread is waiting for a frame, now's its chance */
  120. wake_up(&mm_ctx->bd_emupage_queue);
  121. spin_unlock(&mm_ctx->bd_emupage_lock);
  122. }
  123. static bool within_emuframe(struct pt_regs *regs)
  124. {
  125. unsigned long base = (unsigned long)dsemul_page();
  126. if (regs->cp0_epc < base)
  127. return false;
  128. if (regs->cp0_epc >= (base + PAGE_SIZE))
  129. return false;
  130. return true;
  131. }
  132. bool dsemul_thread_cleanup(struct task_struct *tsk)
  133. {
  134. int fr_idx;
  135. /* Clear any allocated frame, retrieving its index */
  136. fr_idx = atomic_xchg(&tsk->thread.bd_emu_frame, BD_EMUFRAME_NONE);
  137. /* If no frame was allocated, we're done */
  138. if (fr_idx == BD_EMUFRAME_NONE)
  139. return false;
  140. task_lock(tsk);
  141. /* Free the frame that this thread had allocated */
  142. if (tsk->mm)
  143. free_emuframe(fr_idx, tsk->mm);
  144. task_unlock(tsk);
  145. return true;
  146. }
  147. bool dsemul_thread_rollback(struct pt_regs *regs)
  148. {
  149. struct emuframe __user *fr;
  150. int fr_idx;
  151. /* Do nothing if we're not executing from a frame */
  152. if (!within_emuframe(regs))
  153. return false;
  154. /* Find the frame being executed */
  155. fr_idx = atomic_read(&current->thread.bd_emu_frame);
  156. if (fr_idx == BD_EMUFRAME_NONE)
  157. return false;
  158. fr = &dsemul_page()[fr_idx];
  159. /*
  160. * If the PC is at the emul instruction, roll back to the branch. If
  161. * PC is at the badinst (break) instruction, we've already emulated the
  162. * instruction so progress to the continue PC. If it's anything else
  163. * then something is amiss & the user has branched into some other area
  164. * of the emupage - we'll free the allocated frame anyway.
  165. */
  166. if (msk_isa16_mode(regs->cp0_epc) == (unsigned long)&fr->emul)
  167. regs->cp0_epc = current->thread.bd_emu_branch_pc;
  168. else if (msk_isa16_mode(regs->cp0_epc) == (unsigned long)&fr->badinst)
  169. regs->cp0_epc = current->thread.bd_emu_cont_pc;
  170. atomic_set(&current->thread.bd_emu_frame, BD_EMUFRAME_NONE);
  171. free_emuframe(fr_idx, current->mm);
  172. return true;
  173. }
  174. void dsemul_mm_cleanup(struct mm_struct *mm)
  175. {
  176. mm_context_t *mm_ctx = &mm->context;
  177. kfree(mm_ctx->bd_emupage_allocmap);
  178. }
  179. int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
  180. unsigned long branch_pc, unsigned long cont_pc)
  181. {
  182. int isa16 = get_isa16_mode(regs->cp0_epc);
  183. mips_instruction break_math;
  184. struct emuframe __user *fr;
  185. int err, fr_idx;
  186. /* NOP is easy */
  187. if (ir == 0)
  188. return -1;
  189. /* microMIPS instructions */
  190. if (isa16) {
  191. union mips_instruction insn = { .word = ir };
  192. /* NOP16 aka MOVE16 $0, $0 */
  193. if ((ir >> 16) == MM_NOP16)
  194. return -1;
  195. /* ADDIUPC */
  196. if (insn.mm_a_format.opcode == mm_addiupc_op) {
  197. unsigned int rs;
  198. s32 v;
  199. rs = (((insn.mm_a_format.rs + 0xe) & 0xf) + 2);
  200. v = regs->cp0_epc & ~3;
  201. v += insn.mm_a_format.simmediate << 2;
  202. regs->regs[rs] = (long)v;
  203. return -1;
  204. }
  205. }
  206. pr_debug("dsemul 0x%08lx cont at 0x%08lx\n", regs->cp0_epc, cont_pc);
  207. /* Allocate a frame if we don't already have one */
  208. fr_idx = atomic_read(&current->thread.bd_emu_frame);
  209. if (fr_idx == BD_EMUFRAME_NONE)
  210. fr_idx = alloc_emuframe();
  211. if (fr_idx == BD_EMUFRAME_NONE)
  212. return SIGBUS;
  213. fr = &dsemul_page()[fr_idx];
  214. /* Retrieve the appropriately encoded break instruction */
  215. break_math = BREAK_MATH(isa16);
  216. /* Write the instructions to the frame */
  217. if (isa16) {
  218. err = __put_user(ir >> 16,
  219. (u16 __user *)(&fr->emul));
  220. err |= __put_user(ir & 0xffff,
  221. (u16 __user *)((long)(&fr->emul) + 2));
  222. err |= __put_user(break_math >> 16,
  223. (u16 __user *)(&fr->badinst));
  224. err |= __put_user(break_math & 0xffff,
  225. (u16 __user *)((long)(&fr->badinst) + 2));
  226. } else {
  227. err = __put_user(ir, &fr->emul);
  228. err |= __put_user(break_math, &fr->badinst);
  229. }
  230. if (unlikely(err)) {
  231. MIPS_FPU_EMU_INC_STATS(errors);
  232. free_emuframe(fr_idx, current->mm);
  233. return SIGBUS;
  234. }
  235. /* Record the PC of the branch, PC to continue from & frame index */
  236. current->thread.bd_emu_branch_pc = branch_pc;
  237. current->thread.bd_emu_cont_pc = cont_pc;
  238. atomic_set(&current->thread.bd_emu_frame, fr_idx);
  239. /* Change user register context to execute the frame */
  240. regs->cp0_epc = (unsigned long)&fr->emul | isa16;
  241. /* Ensure the icache observes our newly written frame */
  242. flush_cache_sigtramp((unsigned long)&fr->emul);
  243. return 0;
  244. }
  245. bool do_dsemulret(struct pt_regs *xcp)
  246. {
  247. /* Cleanup the allocated frame, returning if there wasn't one */
  248. if (!dsemul_thread_cleanup(current)) {
  249. MIPS_FPU_EMU_INC_STATS(errors);
  250. return false;
  251. }
  252. /* Set EPC to return to post-branch instruction */
  253. xcp->cp0_epc = current->thread.bd_emu_cont_pc;
  254. pr_debug("dsemulret to 0x%08lx\n", xcp->cp0_epc);
  255. MIPS_FPU_EMU_INC_STATS(ds_emul);
  256. return true;
  257. }