extable.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337
  1. #include <linux/extable.h>
  2. #include <linux/uaccess.h>
  3. #include <linux/sched/debug.h>
  4. #include <xen/xen.h>
  5. #include <asm/fpu/internal.h>
  6. #include <asm/traps.h>
  7. #include <asm/kdebug.h>
  8. typedef bool (*ex_handler_t)(const struct exception_table_entry *,
  9. struct pt_regs *, int, unsigned long,
  10. unsigned long);
  11. static inline unsigned long
  12. ex_fixup_addr(const struct exception_table_entry *x)
  13. {
  14. return (unsigned long)&x->fixup + x->fixup;
  15. }
  16. static inline ex_handler_t
  17. ex_fixup_handler(const struct exception_table_entry *x)
  18. {
  19. return (ex_handler_t)((unsigned long)&x->handler + x->handler);
  20. }
  21. __visible bool ex_handler_default(const struct exception_table_entry *fixup,
  22. struct pt_regs *regs, int trapnr,
  23. unsigned long error_code,
  24. unsigned long fault_addr)
  25. {
  26. regs->ip = ex_fixup_addr(fixup);
  27. return true;
  28. }
  29. EXPORT_SYMBOL(ex_handler_default);
  30. __visible bool ex_handler_fault(const struct exception_table_entry *fixup,
  31. struct pt_regs *regs, int trapnr,
  32. unsigned long error_code,
  33. unsigned long fault_addr)
  34. {
  35. regs->ip = ex_fixup_addr(fixup);
  36. regs->ax = trapnr;
  37. return true;
  38. }
  39. EXPORT_SYMBOL_GPL(ex_handler_fault);
  40. /*
  41. * Handler for UD0 exception following a failed test against the
  42. * result of a refcount inc/dec/add/sub.
  43. */
  44. __visible bool ex_handler_refcount(const struct exception_table_entry *fixup,
  45. struct pt_regs *regs, int trapnr,
  46. unsigned long error_code,
  47. unsigned long fault_addr)
  48. {
  49. /* First unconditionally saturate the refcount. */
  50. *(int *)regs->cx = INT_MIN / 2;
  51. /*
  52. * Strictly speaking, this reports the fixup destination, not
  53. * the fault location, and not the actually overflowing
  54. * instruction, which is the instruction before the "js", but
  55. * since that instruction could be a variety of lengths, just
  56. * report the location after the overflow, which should be close
  57. * enough for finding the overflow, as it's at least back in
  58. * the function, having returned from .text.unlikely.
  59. */
  60. regs->ip = ex_fixup_addr(fixup);
  61. /*
  62. * This function has been called because either a negative refcount
  63. * value was seen by any of the refcount functions, or a zero
  64. * refcount value was seen by refcount_dec().
  65. *
  66. * If we crossed from INT_MAX to INT_MIN, OF (Overflow Flag: result
  67. * wrapped around) will be set. Additionally, seeing the refcount
  68. * reach 0 will set ZF (Zero Flag: result was zero). In each of
  69. * these cases we want a report, since it's a boundary condition.
  70. * The SF case is not reported since it indicates post-boundary
  71. * manipulations below zero or above INT_MAX. And if none of the
  72. * flags are set, something has gone very wrong, so report it.
  73. */
  74. if (regs->flags & (X86_EFLAGS_OF | X86_EFLAGS_ZF)) {
  75. bool zero = regs->flags & X86_EFLAGS_ZF;
  76. refcount_error_report(regs, zero ? "hit zero" : "overflow");
  77. } else if ((regs->flags & X86_EFLAGS_SF) == 0) {
  78. /* Report if none of OF, ZF, nor SF are set. */
  79. refcount_error_report(regs, "unexpected saturation");
  80. }
  81. return true;
  82. }
  83. EXPORT_SYMBOL(ex_handler_refcount);
  84. /*
  85. * Handler for when we fail to restore a task's FPU state. We should never get
  86. * here because the FPU state of a task using the FPU (task->thread.fpu.state)
  87. * should always be valid. However, past bugs have allowed userspace to set
  88. * reserved bits in the XSAVE area using PTRACE_SETREGSET or sys_rt_sigreturn().
  89. * These caused XRSTOR to fail when switching to the task, leaking the FPU
  90. * registers of the task previously executing on the CPU. Mitigate this class
  91. * of vulnerability by restoring from the initial state (essentially, zeroing
  92. * out all the FPU registers) if we can't restore from the task's FPU state.
  93. */
  94. __visible bool ex_handler_fprestore(const struct exception_table_entry *fixup,
  95. struct pt_regs *regs, int trapnr,
  96. unsigned long error_code,
  97. unsigned long fault_addr)
  98. {
  99. regs->ip = ex_fixup_addr(fixup);
  100. WARN_ONCE(1, "Bad FPU state detected at %pB, reinitializing FPU registers.",
  101. (void *)instruction_pointer(regs));
  102. __copy_kernel_to_fpregs(&init_fpstate, -1);
  103. return true;
  104. }
  105. EXPORT_SYMBOL_GPL(ex_handler_fprestore);
  106. /* Helper to check whether a uaccess fault indicates a kernel bug. */
  107. static bool bogus_uaccess(struct pt_regs *regs, int trapnr,
  108. unsigned long fault_addr)
  109. {
  110. /* This is the normal case: #PF with a fault address in userspace. */
  111. if (trapnr == X86_TRAP_PF && fault_addr < TASK_SIZE_MAX)
  112. return false;
  113. /*
  114. * This code can be reached for machine checks, but only if the #MC
  115. * handler has already decided that it looks like a candidate for fixup.
  116. * This e.g. happens when attempting to access userspace memory which
  117. * the CPU can't access because of uncorrectable bad memory.
  118. */
  119. if (trapnr == X86_TRAP_MC)
  120. return false;
  121. /*
  122. * There are two remaining exception types we might encounter here:
  123. * - #PF for faulting accesses to kernel addresses
  124. * - #GP for faulting accesses to noncanonical addresses
  125. * Complain about anything else.
  126. */
  127. if (trapnr != X86_TRAP_PF && trapnr != X86_TRAP_GP) {
  128. WARN(1, "unexpected trap %d in uaccess\n", trapnr);
  129. return false;
  130. }
  131. /*
  132. * This is a faulting memory access in kernel space, on a kernel
  133. * address, in a usercopy function. This can e.g. be caused by improper
  134. * use of helpers like __put_user and by improper attempts to access
  135. * userspace addresses in KERNEL_DS regions.
  136. * The one (semi-)legitimate exception are probe_kernel_{read,write}(),
  137. * which can be invoked from places like kgdb, /dev/mem (for reading)
  138. * and privileged BPF code (for reading).
  139. * The probe_kernel_*() functions set the kernel_uaccess_faults_ok flag
  140. * to tell us that faulting on kernel addresses, and even noncanonical
  141. * addresses, in a userspace accessor does not necessarily imply a
  142. * kernel bug, root might just be doing weird stuff.
  143. */
  144. if (current->kernel_uaccess_faults_ok)
  145. return false;
  146. /* This is bad. Refuse the fixup so that we go into die(). */
  147. if (trapnr == X86_TRAP_PF) {
  148. pr_emerg("BUG: pagefault on kernel address 0x%lx in non-whitelisted uaccess\n",
  149. fault_addr);
  150. } else {
  151. pr_emerg("BUG: GPF in non-whitelisted uaccess (non-canonical address?)\n");
  152. }
  153. return true;
  154. }
  155. __visible bool ex_handler_uaccess(const struct exception_table_entry *fixup,
  156. struct pt_regs *regs, int trapnr,
  157. unsigned long error_code,
  158. unsigned long fault_addr)
  159. {
  160. if (bogus_uaccess(regs, trapnr, fault_addr))
  161. return false;
  162. regs->ip = ex_fixup_addr(fixup);
  163. return true;
  164. }
  165. EXPORT_SYMBOL(ex_handler_uaccess);
  166. __visible bool ex_handler_ext(const struct exception_table_entry *fixup,
  167. struct pt_regs *regs, int trapnr,
  168. unsigned long error_code,
  169. unsigned long fault_addr)
  170. {
  171. if (bogus_uaccess(regs, trapnr, fault_addr))
  172. return false;
  173. /* Special hack for uaccess_err */
  174. current->thread.uaccess_err = 1;
  175. regs->ip = ex_fixup_addr(fixup);
  176. return true;
  177. }
  178. EXPORT_SYMBOL(ex_handler_ext);
  179. __visible bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup,
  180. struct pt_regs *regs, int trapnr,
  181. unsigned long error_code,
  182. unsigned long fault_addr)
  183. {
  184. if (pr_warn_once("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pF)\n",
  185. (unsigned int)regs->cx, regs->ip, (void *)regs->ip))
  186. show_stack_regs(regs);
  187. /* Pretend that the read succeeded and returned 0. */
  188. regs->ip = ex_fixup_addr(fixup);
  189. regs->ax = 0;
  190. regs->dx = 0;
  191. return true;
  192. }
  193. EXPORT_SYMBOL(ex_handler_rdmsr_unsafe);
  194. __visible bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup,
  195. struct pt_regs *regs, int trapnr,
  196. unsigned long error_code,
  197. unsigned long fault_addr)
  198. {
  199. if (pr_warn_once("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pF)\n",
  200. (unsigned int)regs->cx, (unsigned int)regs->dx,
  201. (unsigned int)regs->ax, regs->ip, (void *)regs->ip))
  202. show_stack_regs(regs);
  203. /* Pretend that the write succeeded. */
  204. regs->ip = ex_fixup_addr(fixup);
  205. return true;
  206. }
  207. EXPORT_SYMBOL(ex_handler_wrmsr_unsafe);
  208. __visible bool ex_handler_clear_fs(const struct exception_table_entry *fixup,
  209. struct pt_regs *regs, int trapnr,
  210. unsigned long error_code,
  211. unsigned long fault_addr)
  212. {
  213. if (static_cpu_has(X86_BUG_NULL_SEG))
  214. asm volatile ("mov %0, %%fs" : : "rm" (__USER_DS));
  215. asm volatile ("mov %0, %%fs" : : "rm" (0));
  216. return ex_handler_default(fixup, regs, trapnr, error_code, fault_addr);
  217. }
  218. EXPORT_SYMBOL(ex_handler_clear_fs);
  219. __visible bool ex_has_fault_handler(unsigned long ip)
  220. {
  221. const struct exception_table_entry *e;
  222. ex_handler_t handler;
  223. e = search_exception_tables(ip);
  224. if (!e)
  225. return false;
  226. handler = ex_fixup_handler(e);
  227. return handler == ex_handler_fault;
  228. }
  229. int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code,
  230. unsigned long fault_addr)
  231. {
  232. const struct exception_table_entry *e;
  233. ex_handler_t handler;
  234. #ifdef CONFIG_PNPBIOS
  235. if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
  236. extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
  237. extern u32 pnp_bios_is_utter_crap;
  238. pnp_bios_is_utter_crap = 1;
  239. printk(KERN_CRIT "PNPBIOS fault.. attempting recovery.\n");
  240. __asm__ volatile(
  241. "movl %0, %%esp\n\t"
  242. "jmp *%1\n\t"
  243. : : "g" (pnp_bios_fault_esp), "g" (pnp_bios_fault_eip));
  244. panic("do_trap: can't hit this");
  245. }
  246. #endif
  247. e = search_exception_tables(regs->ip);
  248. if (!e)
  249. return 0;
  250. handler = ex_fixup_handler(e);
  251. return handler(e, regs, trapnr, error_code, fault_addr);
  252. }
  253. extern unsigned int early_recursion_flag;
  254. /* Restricted version used during very early boot */
  255. void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
  256. {
  257. /* Ignore early NMIs. */
  258. if (trapnr == X86_TRAP_NMI)
  259. return;
  260. if (early_recursion_flag > 2)
  261. goto halt_loop;
  262. /*
  263. * Old CPUs leave the high bits of CS on the stack
  264. * undefined. I'm not sure which CPUs do this, but at least
  265. * the 486 DX works this way.
  266. * Xen pv domains are not using the default __KERNEL_CS.
  267. */
  268. if (!xen_pv_domain() && regs->cs != __KERNEL_CS)
  269. goto fail;
  270. /*
  271. * The full exception fixup machinery is available as soon as
  272. * the early IDT is loaded. This means that it is the
  273. * responsibility of extable users to either function correctly
  274. * when handlers are invoked early or to simply avoid causing
  275. * exceptions before they're ready to handle them.
  276. *
  277. * This is better than filtering which handlers can be used,
  278. * because refusing to call a handler here is guaranteed to
  279. * result in a hard-to-debug panic.
  280. *
  281. * Keep in mind that not all vectors actually get here. Early
  282. * page faults, for example, are special.
  283. */
  284. if (fixup_exception(regs, trapnr, regs->orig_ax, 0))
  285. return;
  286. if (fixup_bug(regs, trapnr))
  287. return;
  288. fail:
  289. early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n",
  290. (unsigned)trapnr, (unsigned long)regs->cs, regs->ip,
  291. regs->orig_ax, read_cr2());
  292. show_regs(regs);
  293. halt_loop:
  294. while (true)
  295. halt();
  296. }