extable.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244
  1. #include <linux/extable.h>
  2. #include <linux/uaccess.h>
  3. #include <linux/sched/debug.h>
  4. #include <asm/fpu/internal.h>
  5. #include <asm/traps.h>
  6. #include <asm/kdebug.h>
  7. typedef bool (*ex_handler_t)(const struct exception_table_entry *,
  8. struct pt_regs *, int);
  9. static inline unsigned long
  10. ex_fixup_addr(const struct exception_table_entry *x)
  11. {
  12. return (unsigned long)&x->fixup + x->fixup;
  13. }
  14. static inline ex_handler_t
  15. ex_fixup_handler(const struct exception_table_entry *x)
  16. {
  17. return (ex_handler_t)((unsigned long)&x->handler + x->handler);
  18. }
  19. bool ex_handler_default(const struct exception_table_entry *fixup,
  20. struct pt_regs *regs, int trapnr)
  21. {
  22. regs->ip = ex_fixup_addr(fixup);
  23. return true;
  24. }
  25. EXPORT_SYMBOL(ex_handler_default);
  26. bool ex_handler_fault(const struct exception_table_entry *fixup,
  27. struct pt_regs *regs, int trapnr)
  28. {
  29. regs->ip = ex_fixup_addr(fixup);
  30. regs->ax = trapnr;
  31. return true;
  32. }
  33. EXPORT_SYMBOL_GPL(ex_handler_fault);
  34. /*
  35. * Handler for UD0 exception following a failed test against the
  36. * result of a refcount inc/dec/add/sub.
  37. */
  38. bool ex_handler_refcount(const struct exception_table_entry *fixup,
  39. struct pt_regs *regs, int trapnr)
  40. {
  41. /* First unconditionally saturate the refcount. */
  42. *(int *)regs->cx = INT_MIN / 2;
  43. /*
  44. * Strictly speaking, this reports the fixup destination, not
  45. * the fault location, and not the actually overflowing
  46. * instruction, which is the instruction before the "js", but
  47. * since that instruction could be a variety of lengths, just
  48. * report the location after the overflow, which should be close
  49. * enough for finding the overflow, as it's at least back in
  50. * the function, having returned from .text.unlikely.
  51. */
  52. regs->ip = ex_fixup_addr(fixup);
  53. /*
  54. * This function has been called because either a negative refcount
  55. * value was seen by any of the refcount functions, or a zero
  56. * refcount value was seen by refcount_dec().
  57. *
  58. * If we crossed from INT_MAX to INT_MIN, OF (Overflow Flag: result
  59. * wrapped around) will be set. Additionally, seeing the refcount
  60. * reach 0 will set ZF (Zero Flag: result was zero). In each of
  61. * these cases we want a report, since it's a boundary condition.
  62. *
  63. */
  64. if (regs->flags & (X86_EFLAGS_OF | X86_EFLAGS_ZF)) {
  65. bool zero = regs->flags & X86_EFLAGS_ZF;
  66. refcount_error_report(regs, zero ? "hit zero" : "overflow");
  67. }
  68. return true;
  69. }
  70. EXPORT_SYMBOL_GPL(ex_handler_refcount);
  71. /*
  72. * Handler for when we fail to restore a task's FPU state. We should never get
  73. * here because the FPU state of a task using the FPU (task->thread.fpu.state)
  74. * should always be valid. However, past bugs have allowed userspace to set
  75. * reserved bits in the XSAVE area using PTRACE_SETREGSET or sys_rt_sigreturn().
  76. * These caused XRSTOR to fail when switching to the task, leaking the FPU
  77. * registers of the task previously executing on the CPU. Mitigate this class
  78. * of vulnerability by restoring from the initial state (essentially, zeroing
  79. * out all the FPU registers) if we can't restore from the task's FPU state.
  80. */
  81. bool ex_handler_fprestore(const struct exception_table_entry *fixup,
  82. struct pt_regs *regs, int trapnr)
  83. {
  84. regs->ip = ex_fixup_addr(fixup);
  85. WARN_ONCE(1, "Bad FPU state detected at %pB, reinitializing FPU registers.",
  86. (void *)instruction_pointer(regs));
  87. __copy_kernel_to_fpregs(&init_fpstate, -1);
  88. return true;
  89. }
  90. EXPORT_SYMBOL_GPL(ex_handler_fprestore);
  91. bool ex_handler_ext(const struct exception_table_entry *fixup,
  92. struct pt_regs *regs, int trapnr)
  93. {
  94. /* Special hack for uaccess_err */
  95. current->thread.uaccess_err = 1;
  96. regs->ip = ex_fixup_addr(fixup);
  97. return true;
  98. }
  99. EXPORT_SYMBOL(ex_handler_ext);
  100. bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup,
  101. struct pt_regs *regs, int trapnr)
  102. {
  103. if (pr_warn_once("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pF)\n",
  104. (unsigned int)regs->cx, regs->ip, (void *)regs->ip))
  105. show_stack_regs(regs);
  106. /* Pretend that the read succeeded and returned 0. */
  107. regs->ip = ex_fixup_addr(fixup);
  108. regs->ax = 0;
  109. regs->dx = 0;
  110. return true;
  111. }
  112. EXPORT_SYMBOL(ex_handler_rdmsr_unsafe);
  113. bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup,
  114. struct pt_regs *regs, int trapnr)
  115. {
  116. if (pr_warn_once("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pF)\n",
  117. (unsigned int)regs->cx, (unsigned int)regs->dx,
  118. (unsigned int)regs->ax, regs->ip, (void *)regs->ip))
  119. show_stack_regs(regs);
  120. /* Pretend that the write succeeded. */
  121. regs->ip = ex_fixup_addr(fixup);
  122. return true;
  123. }
  124. EXPORT_SYMBOL(ex_handler_wrmsr_unsafe);
  125. bool ex_handler_clear_fs(const struct exception_table_entry *fixup,
  126. struct pt_regs *regs, int trapnr)
  127. {
  128. if (static_cpu_has(X86_BUG_NULL_SEG))
  129. asm volatile ("mov %0, %%fs" : : "rm" (__USER_DS));
  130. asm volatile ("mov %0, %%fs" : : "rm" (0));
  131. return ex_handler_default(fixup, regs, trapnr);
  132. }
  133. EXPORT_SYMBOL(ex_handler_clear_fs);
  134. bool ex_has_fault_handler(unsigned long ip)
  135. {
  136. const struct exception_table_entry *e;
  137. ex_handler_t handler;
  138. e = search_exception_tables(ip);
  139. if (!e)
  140. return false;
  141. handler = ex_fixup_handler(e);
  142. return handler == ex_handler_fault;
  143. }
  144. int fixup_exception(struct pt_regs *regs, int trapnr)
  145. {
  146. const struct exception_table_entry *e;
  147. ex_handler_t handler;
  148. #ifdef CONFIG_PNPBIOS
  149. if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
  150. extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
  151. extern u32 pnp_bios_is_utter_crap;
  152. pnp_bios_is_utter_crap = 1;
  153. printk(KERN_CRIT "PNPBIOS fault.. attempting recovery.\n");
  154. __asm__ volatile(
  155. "movl %0, %%esp\n\t"
  156. "jmp *%1\n\t"
  157. : : "g" (pnp_bios_fault_esp), "g" (pnp_bios_fault_eip));
  158. panic("do_trap: can't hit this");
  159. }
  160. #endif
  161. e = search_exception_tables(regs->ip);
  162. if (!e)
  163. return 0;
  164. handler = ex_fixup_handler(e);
  165. return handler(e, regs, trapnr);
  166. }
  167. extern unsigned int early_recursion_flag;
  168. /* Restricted version used during very early boot */
  169. void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
  170. {
  171. /* Ignore early NMIs. */
  172. if (trapnr == X86_TRAP_NMI)
  173. return;
  174. if (early_recursion_flag > 2)
  175. goto halt_loop;
  176. /*
  177. * Old CPUs leave the high bits of CS on the stack
  178. * undefined. I'm not sure which CPUs do this, but at least
  179. * the 486 DX works this way.
  180. */
  181. if (regs->cs != __KERNEL_CS)
  182. goto fail;
  183. /*
  184. * The full exception fixup machinery is available as soon as
  185. * the early IDT is loaded. This means that it is the
  186. * responsibility of extable users to either function correctly
  187. * when handlers are invoked early or to simply avoid causing
  188. * exceptions before they're ready to handle them.
  189. *
  190. * This is better than filtering which handlers can be used,
  191. * because refusing to call a handler here is guaranteed to
  192. * result in a hard-to-debug panic.
  193. *
  194. * Keep in mind that not all vectors actually get here. Early
  195. * fage faults, for example, are special.
  196. */
  197. if (fixup_exception(regs, trapnr))
  198. return;
  199. if (fixup_bug(regs, trapnr))
  200. return;
  201. fail:
  202. early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n",
  203. (unsigned)trapnr, (unsigned long)regs->cs, regs->ip,
  204. regs->orig_ax, read_cr2());
  205. show_regs(regs);
  206. halt_loop:
  207. while (true)
  208. halt();
  209. }