fault.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311
  1. /*
  2. * Copyright (C) 1995 Linus Torvalds
  3. * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
  4. * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
  5. */
  6. #include <linux/magic.h> /* STACK_END_MAGIC */
  7. #include <linux/sched.h> /* test_thread_flag(), ... */
  8. #include <linux/kdebug.h> /* oops_begin/end, ... */
  9. #include <linux/module.h> /* search_exception_table */
  10. #include <linux/bootmem.h> /* max_low_pfn */
  11. #include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */
  12. #include <linux/mmiotrace.h> /* kmmio_handler, ... */
  13. #include <linux/perf_event.h> /* perf_sw_event */
  14. #include <linux/hugetlb.h> /* hstate_index_to_shift */
  15. #include <linux/prefetch.h> /* prefetchw */
  16. #include <linux/context_tracking.h> /* exception_enter(), ... */
  17. #include <asm/traps.h> /* dotraplinkage, ... */
  18. #include <asm/pgalloc.h> /* pgd_*(), ... */
  19. #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
  20. #include <asm/fixmap.h> /* VSYSCALL_ADDR */
  21. #include <asm/vsyscall.h> /* emulate_vsyscall */
  22. #define CREATE_TRACE_POINTS
  23. #include <asm/trace/exceptions.h>
  24. /*
  25. * Page fault error code bits:
  26. *
  27. * bit 0 == 0: no page found 1: protection fault
  28. * bit 1 == 0: read access 1: write access
  29. * bit 2 == 0: kernel-mode access 1: user-mode access
  30. * bit 3 == 1: use of reserved bit detected
  31. * bit 4 == 1: fault was an instruction fetch
  32. */
  33. enum x86_pf_error_code {
  34. PF_PROT = 1 << 0,
  35. PF_WRITE = 1 << 1,
  36. PF_USER = 1 << 2,
  37. PF_RSVD = 1 << 3,
  38. PF_INSTR = 1 << 4,
  39. };
  40. /*
  41. * Returns 0 if mmiotrace is disabled, or if the fault is not
  42. * handled by mmiotrace:
  43. */
  44. static nokprobe_inline int
  45. kmmio_fault(struct pt_regs *regs, unsigned long addr)
  46. {
  47. if (unlikely(is_kmmio_active()))
  48. if (kmmio_handler(regs, addr) == 1)
  49. return -1;
  50. return 0;
  51. }
  52. static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
  53. {
  54. int ret = 0;
  55. /* kprobe_running() needs smp_processor_id() */
  56. if (kprobes_built_in() && !user_mode_vm(regs)) {
  57. preempt_disable();
  58. if (kprobe_running() && kprobe_fault_handler(regs, 14))
  59. ret = 1;
  60. preempt_enable();
  61. }
  62. return ret;
  63. }
  64. /*
  65. * Prefetch quirks:
  66. *
  67. * 32-bit mode:
  68. *
  69. * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
  70. * Check that here and ignore it.
  71. *
  72. * 64-bit mode:
  73. *
  74. * Sometimes the CPU reports invalid exceptions on prefetch.
  75. * Check that here and ignore it.
  76. *
  77. * Opcode checker based on code by Richard Brunner.
  78. */
  79. static inline int
  80. check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
  81. unsigned char opcode, int *prefetch)
  82. {
  83. unsigned char instr_hi = opcode & 0xf0;
  84. unsigned char instr_lo = opcode & 0x0f;
  85. switch (instr_hi) {
  86. case 0x20:
  87. case 0x30:
  88. /*
  89. * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
  90. * In X86_64 long mode, the CPU will signal invalid
  91. * opcode if some of these prefixes are present so
  92. * X86_64 will never get here anyway
  93. */
  94. return ((instr_lo & 7) == 0x6);
  95. #ifdef CONFIG_X86_64
  96. case 0x40:
  97. /*
  98. * In AMD64 long mode 0x40..0x4F are valid REX prefixes
  99. * Need to figure out under what instruction mode the
  100. * instruction was issued. Could check the LDT for lm,
  101. * but for now it's good enough to assume that long
  102. * mode only uses well known segments or kernel.
  103. */
  104. return (!user_mode(regs) || user_64bit_mode(regs));
  105. #endif
  106. case 0x60:
  107. /* 0x64 thru 0x67 are valid prefixes in all modes. */
  108. return (instr_lo & 0xC) == 0x4;
  109. case 0xF0:
  110. /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
  111. return !instr_lo || (instr_lo>>1) == 1;
  112. case 0x00:
  113. /* Prefetch instruction is 0x0F0D or 0x0F18 */
  114. if (probe_kernel_address(instr, opcode))
  115. return 0;
  116. *prefetch = (instr_lo == 0xF) &&
  117. (opcode == 0x0D || opcode == 0x18);
  118. return 0;
  119. default:
  120. return 0;
  121. }
  122. }
  123. static int
  124. is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
  125. {
  126. unsigned char *max_instr;
  127. unsigned char *instr;
  128. int prefetch = 0;
  129. /*
  130. * If it was a exec (instruction fetch) fault on NX page, then
  131. * do not ignore the fault:
  132. */
  133. if (error_code & PF_INSTR)
  134. return 0;
  135. instr = (void *)convert_ip_to_linear(current, regs);
  136. max_instr = instr + 15;
  137. if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
  138. return 0;
  139. while (instr < max_instr) {
  140. unsigned char opcode;
  141. if (probe_kernel_address(instr, opcode))
  142. break;
  143. instr++;
  144. if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
  145. break;
  146. }
  147. return prefetch;
  148. }
  149. static void
  150. force_sig_info_fault(int si_signo, int si_code, unsigned long address,
  151. struct task_struct *tsk, int fault)
  152. {
  153. unsigned lsb = 0;
  154. siginfo_t info;
  155. info.si_signo = si_signo;
  156. info.si_errno = 0;
  157. info.si_code = si_code;
  158. info.si_addr = (void __user *)address;
  159. if (fault & VM_FAULT_HWPOISON_LARGE)
  160. lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
  161. if (fault & VM_FAULT_HWPOISON)
  162. lsb = PAGE_SHIFT;
  163. info.si_addr_lsb = lsb;
  164. force_sig_info(si_signo, &info, tsk);
  165. }
  166. DEFINE_SPINLOCK(pgd_lock);
  167. LIST_HEAD(pgd_list);
  168. #ifdef CONFIG_X86_32
  169. static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
  170. {
  171. unsigned index = pgd_index(address);
  172. pgd_t *pgd_k;
  173. pud_t *pud, *pud_k;
  174. pmd_t *pmd, *pmd_k;
  175. pgd += index;
  176. pgd_k = init_mm.pgd + index;
  177. if (!pgd_present(*pgd_k))
  178. return NULL;
  179. /*
  180. * set_pgd(pgd, *pgd_k); here would be useless on PAE
  181. * and redundant with the set_pmd() on non-PAE. As would
  182. * set_pud.
  183. */
  184. pud = pud_offset(pgd, address);
  185. pud_k = pud_offset(pgd_k, address);
  186. if (!pud_present(*pud_k))
  187. return NULL;
  188. pmd = pmd_offset(pud, address);
  189. pmd_k = pmd_offset(pud_k, address);
  190. if (!pmd_present(*pmd_k))
  191. return NULL;
  192. if (!pmd_present(*pmd))
  193. set_pmd(pmd, *pmd_k);
  194. else
  195. BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
  196. return pmd_k;
  197. }
  198. void vmalloc_sync_all(void)
  199. {
  200. unsigned long address;
  201. if (SHARED_KERNEL_PMD)
  202. return;
  203. for (address = VMALLOC_START & PMD_MASK;
  204. address >= TASK_SIZE && address < FIXADDR_TOP;
  205. address += PMD_SIZE) {
  206. struct page *page;
  207. spin_lock(&pgd_lock);
  208. list_for_each_entry(page, &pgd_list, lru) {
  209. spinlock_t *pgt_lock;
  210. pmd_t *ret;
  211. /* the pgt_lock only for Xen */
  212. pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
  213. spin_lock(pgt_lock);
  214. ret = vmalloc_sync_one(page_address(page), address);
  215. spin_unlock(pgt_lock);
  216. if (!ret)
  217. break;
  218. }
  219. spin_unlock(&pgd_lock);
  220. }
  221. }
  222. /*
  223. * 32-bit:
  224. *
  225. * Handle a fault on the vmalloc or module mapping area
  226. */
  227. static noinline int vmalloc_fault(unsigned long address)
  228. {
  229. unsigned long pgd_paddr;
  230. pmd_t *pmd_k;
  231. pte_t *pte_k;
  232. /* Make sure we are in vmalloc area: */
  233. if (!(address >= VMALLOC_START && address < VMALLOC_END))
  234. return -1;
  235. WARN_ON_ONCE(in_nmi());
  236. /*
  237. * Synchronize this task's top level page-table
  238. * with the 'reference' page table.
  239. *
  240. * Do _not_ use "current" here. We might be inside
  241. * an interrupt in the middle of a task switch..
  242. */
  243. pgd_paddr = read_cr3();
  244. pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
  245. if (!pmd_k)
  246. return -1;
  247. pte_k = pte_offset_kernel(pmd_k, address);
  248. if (!pte_present(*pte_k))
  249. return -1;
  250. return 0;
  251. }
  252. NOKPROBE_SYMBOL(vmalloc_fault);
  253. /*
  254. * Did it hit the DOS screen memory VA from vm86 mode?
  255. */
  256. static inline void
  257. check_v8086_mode(struct pt_regs *regs, unsigned long address,
  258. struct task_struct *tsk)
  259. {
  260. unsigned long bit;
  261. if (!v8086_mode(regs))
  262. return;
  263. bit = (address - 0xA0000) >> PAGE_SHIFT;
  264. if (bit < 32)
  265. tsk->thread.screen_bitmap |= 1 << bit;
  266. }
  267. static bool low_pfn(unsigned long pfn)
  268. {
  269. return pfn < max_low_pfn;
  270. }
  271. static void dump_pagetable(unsigned long address)
  272. {
  273. pgd_t *base = __va(read_cr3());
  274. pgd_t *pgd = &base[pgd_index(address)];
  275. pmd_t *pmd;
  276. pte_t *pte;
  277. #ifdef CONFIG_X86_PAE
  278. printk("*pdpt = %016Lx ", pgd_val(*pgd));
  279. if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
  280. goto out;
  281. #endif
  282. pmd = pmd_offset(pud_offset(pgd, address), address);
  283. printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
  284. /*
  285. * We must not directly access the pte in the highpte
  286. * case if the page table is located in highmem.
  287. * And let's rather not kmap-atomic the pte, just in case
  288. * it's allocated already:
  289. */
  290. if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
  291. goto out;
  292. pte = pte_offset_kernel(pmd, address);
  293. printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
  294. out:
  295. printk("\n");
  296. }
  297. #else /* CONFIG_X86_64: */
  298. void vmalloc_sync_all(void)
  299. {
  300. sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
  301. }
  302. /*
  303. * 64-bit:
  304. *
  305. * Handle a fault on the vmalloc area
  306. *
  307. * This assumes no large pages in there.
  308. */
  309. static noinline int vmalloc_fault(unsigned long address)
  310. {
  311. pgd_t *pgd, *pgd_ref;
  312. pud_t *pud, *pud_ref;
  313. pmd_t *pmd, *pmd_ref;
  314. pte_t *pte, *pte_ref;
  315. /* Make sure we are in vmalloc area: */
  316. if (!(address >= VMALLOC_START && address < VMALLOC_END))
  317. return -1;
  318. WARN_ON_ONCE(in_nmi());
  319. /*
  320. * Copy kernel mappings over when needed. This can also
  321. * happen within a race in page table update. In the later
  322. * case just flush:
  323. */
  324. pgd = pgd_offset(current->active_mm, address);
  325. pgd_ref = pgd_offset_k(address);
  326. if (pgd_none(*pgd_ref))
  327. return -1;
  328. if (pgd_none(*pgd)) {
  329. set_pgd(pgd, *pgd_ref);
  330. arch_flush_lazy_mmu_mode();
  331. } else {
  332. BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
  333. }
  334. /*
  335. * Below here mismatches are bugs because these lower tables
  336. * are shared:
  337. */
  338. pud = pud_offset(pgd, address);
  339. pud_ref = pud_offset(pgd_ref, address);
  340. if (pud_none(*pud_ref))
  341. return -1;
  342. if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
  343. BUG();
  344. pmd = pmd_offset(pud, address);
  345. pmd_ref = pmd_offset(pud_ref, address);
  346. if (pmd_none(*pmd_ref))
  347. return -1;
  348. if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
  349. BUG();
  350. pte_ref = pte_offset_kernel(pmd_ref, address);
  351. if (!pte_present(*pte_ref))
  352. return -1;
  353. pte = pte_offset_kernel(pmd, address);
  354. /*
  355. * Don't use pte_page here, because the mappings can point
  356. * outside mem_map, and the NUMA hash lookup cannot handle
  357. * that:
  358. */
  359. if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
  360. BUG();
  361. return 0;
  362. }
  363. NOKPROBE_SYMBOL(vmalloc_fault);
  364. #ifdef CONFIG_CPU_SUP_AMD
  365. static const char errata93_warning[] =
  366. KERN_ERR
  367. "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
  368. "******* Working around it, but it may cause SEGVs or burn power.\n"
  369. "******* Please consider a BIOS update.\n"
  370. "******* Disabling USB legacy in the BIOS may also help.\n";
  371. #endif
  372. /*
  373. * No vm86 mode in 64-bit mode:
  374. */
  375. static inline void
  376. check_v8086_mode(struct pt_regs *regs, unsigned long address,
  377. struct task_struct *tsk)
  378. {
  379. }
  380. static int bad_address(void *p)
  381. {
  382. unsigned long dummy;
  383. return probe_kernel_address((unsigned long *)p, dummy);
  384. }
  385. static void dump_pagetable(unsigned long address)
  386. {
  387. pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK);
  388. pgd_t *pgd = base + pgd_index(address);
  389. pud_t *pud;
  390. pmd_t *pmd;
  391. pte_t *pte;
  392. if (bad_address(pgd))
  393. goto bad;
  394. printk("PGD %lx ", pgd_val(*pgd));
  395. if (!pgd_present(*pgd))
  396. goto out;
  397. pud = pud_offset(pgd, address);
  398. if (bad_address(pud))
  399. goto bad;
  400. printk("PUD %lx ", pud_val(*pud));
  401. if (!pud_present(*pud) || pud_large(*pud))
  402. goto out;
  403. pmd = pmd_offset(pud, address);
  404. if (bad_address(pmd))
  405. goto bad;
  406. printk("PMD %lx ", pmd_val(*pmd));
  407. if (!pmd_present(*pmd) || pmd_large(*pmd))
  408. goto out;
  409. pte = pte_offset_kernel(pmd, address);
  410. if (bad_address(pte))
  411. goto bad;
  412. printk("PTE %lx", pte_val(*pte));
  413. out:
  414. printk("\n");
  415. return;
  416. bad:
  417. printk("BAD\n");
  418. }
  419. #endif /* CONFIG_X86_64 */
  420. /*
  421. * Workaround for K8 erratum #93 & buggy BIOS.
  422. *
  423. * BIOS SMM functions are required to use a specific workaround
  424. * to avoid corruption of the 64bit RIP register on C stepping K8.
  425. *
  426. * A lot of BIOS that didn't get tested properly miss this.
  427. *
  428. * The OS sees this as a page fault with the upper 32bits of RIP cleared.
  429. * Try to work around it here.
  430. *
  431. * Note we only handle faults in kernel here.
  432. * Does nothing on 32-bit.
  433. */
  434. static int is_errata93(struct pt_regs *regs, unsigned long address)
  435. {
  436. #if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
  437. if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
  438. || boot_cpu_data.x86 != 0xf)
  439. return 0;
  440. if (address != regs->ip)
  441. return 0;
  442. if ((address >> 32) != 0)
  443. return 0;
  444. address |= 0xffffffffUL << 32;
  445. if ((address >= (u64)_stext && address <= (u64)_etext) ||
  446. (address >= MODULES_VADDR && address <= MODULES_END)) {
  447. printk_once(errata93_warning);
  448. regs->ip = address;
  449. return 1;
  450. }
  451. #endif
  452. return 0;
  453. }
  454. /*
  455. * Work around K8 erratum #100 K8 in compat mode occasionally jumps
  456. * to illegal addresses >4GB.
  457. *
  458. * We catch this in the page fault handler because these addresses
  459. * are not reachable. Just detect this case and return. Any code
  460. * segment in LDT is compatibility mode.
  461. */
  462. static int is_errata100(struct pt_regs *regs, unsigned long address)
  463. {
  464. #ifdef CONFIG_X86_64
  465. if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
  466. return 1;
  467. #endif
  468. return 0;
  469. }
  470. static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
  471. {
  472. #ifdef CONFIG_X86_F00F_BUG
  473. unsigned long nr;
  474. /*
  475. * Pentium F0 0F C7 C8 bug workaround:
  476. */
  477. if (boot_cpu_has_bug(X86_BUG_F00F)) {
  478. nr = (address - idt_descr.address) >> 3;
  479. if (nr == 6) {
  480. do_invalid_op(regs, 0);
  481. return 1;
  482. }
  483. }
  484. #endif
  485. return 0;
  486. }
  487. static const char nx_warning[] = KERN_CRIT
  488. "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
  489. static void
  490. show_fault_oops(struct pt_regs *regs, unsigned long error_code,
  491. unsigned long address)
  492. {
  493. if (!oops_may_print())
  494. return;
  495. if (error_code & PF_INSTR) {
  496. unsigned int level;
  497. pgd_t *pgd;
  498. pte_t *pte;
  499. pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
  500. pgd += pgd_index(address);
  501. pte = lookup_address_in_pgd(pgd, address, &level);
  502. if (pte && pte_present(*pte) && !pte_exec(*pte))
  503. printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
  504. }
  505. printk(KERN_ALERT "BUG: unable to handle kernel ");
  506. if (address < PAGE_SIZE)
  507. printk(KERN_CONT "NULL pointer dereference");
  508. else
  509. printk(KERN_CONT "paging request");
  510. printk(KERN_CONT " at %p\n", (void *) address);
  511. printk(KERN_ALERT "IP:");
  512. printk_address(regs->ip);
  513. dump_pagetable(address);
  514. }
  515. static noinline void
  516. pgtable_bad(struct pt_regs *regs, unsigned long error_code,
  517. unsigned long address)
  518. {
  519. struct task_struct *tsk;
  520. unsigned long flags;
  521. int sig;
  522. flags = oops_begin();
  523. tsk = current;
  524. sig = SIGKILL;
  525. printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
  526. tsk->comm, address);
  527. dump_pagetable(address);
  528. tsk->thread.cr2 = address;
  529. tsk->thread.trap_nr = X86_TRAP_PF;
  530. tsk->thread.error_code = error_code;
  531. if (__die("Bad pagetable", regs, error_code))
  532. sig = 0;
  533. oops_end(flags, regs, sig);
  534. }
  535. static noinline void
  536. no_context(struct pt_regs *regs, unsigned long error_code,
  537. unsigned long address, int signal, int si_code)
  538. {
  539. struct task_struct *tsk = current;
  540. unsigned long *stackend;
  541. unsigned long flags;
  542. int sig;
  543. /* Are we prepared to handle this kernel fault? */
  544. if (fixup_exception(regs)) {
  545. /*
  546. * Any interrupt that takes a fault gets the fixup. This makes
  547. * the below recursive fault logic only apply to a faults from
  548. * task context.
  549. */
  550. if (in_interrupt())
  551. return;
  552. /*
  553. * Per the above we're !in_interrupt(), aka. task context.
  554. *
  555. * In this case we need to make sure we're not recursively
  556. * faulting through the emulate_vsyscall() logic.
  557. */
  558. if (current_thread_info()->sig_on_uaccess_error && signal) {
  559. tsk->thread.trap_nr = X86_TRAP_PF;
  560. tsk->thread.error_code = error_code | PF_USER;
  561. tsk->thread.cr2 = address;
  562. /* XXX: hwpoison faults will set the wrong code. */
  563. force_sig_info_fault(signal, si_code, address, tsk, 0);
  564. }
  565. /*
  566. * Barring that, we can do the fixup and be happy.
  567. */
  568. return;
  569. }
  570. /*
  571. * 32-bit:
  572. *
  573. * Valid to do another page fault here, because if this fault
  574. * had been triggered by is_prefetch fixup_exception would have
  575. * handled it.
  576. *
  577. * 64-bit:
  578. *
  579. * Hall of shame of CPU/BIOS bugs.
  580. */
  581. if (is_prefetch(regs, error_code, address))
  582. return;
  583. if (is_errata93(regs, address))
  584. return;
  585. /*
  586. * Oops. The kernel tried to access some bad page. We'll have to
  587. * terminate things with extreme prejudice:
  588. */
  589. flags = oops_begin();
  590. show_fault_oops(regs, error_code, address);
  591. stackend = end_of_stack(tsk);
  592. if (tsk != &init_task && *stackend != STACK_END_MAGIC)
  593. printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
  594. tsk->thread.cr2 = address;
  595. tsk->thread.trap_nr = X86_TRAP_PF;
  596. tsk->thread.error_code = error_code;
  597. sig = SIGKILL;
  598. if (__die("Oops", regs, error_code))
  599. sig = 0;
  600. /* Executive summary in case the body of the oops scrolled away */
  601. printk(KERN_DEFAULT "CR2: %016lx\n", address);
  602. oops_end(flags, regs, sig);
  603. }
  604. /*
  605. * Print out info about fatal segfaults, if the show_unhandled_signals
  606. * sysctl is set:
  607. */
  608. static inline void
  609. show_signal_msg(struct pt_regs *regs, unsigned long error_code,
  610. unsigned long address, struct task_struct *tsk)
  611. {
  612. if (!unhandled_signal(tsk, SIGSEGV))
  613. return;
  614. if (!printk_ratelimit())
  615. return;
  616. printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
  617. task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
  618. tsk->comm, task_pid_nr(tsk), address,
  619. (void *)regs->ip, (void *)regs->sp, error_code);
  620. print_vma_addr(KERN_CONT " in ", regs->ip);
  621. printk(KERN_CONT "\n");
  622. }
  623. static void
  624. __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
  625. unsigned long address, int si_code)
  626. {
  627. struct task_struct *tsk = current;
  628. /* User mode accesses just cause a SIGSEGV */
  629. if (error_code & PF_USER) {
  630. /*
  631. * It's possible to have interrupts off here:
  632. */
  633. local_irq_enable();
  634. /*
  635. * Valid to do another page fault here because this one came
  636. * from user space:
  637. */
  638. if (is_prefetch(regs, error_code, address))
  639. return;
  640. if (is_errata100(regs, address))
  641. return;
  642. #ifdef CONFIG_X86_64
  643. /*
  644. * Instruction fetch faults in the vsyscall page might need
  645. * emulation.
  646. */
  647. if (unlikely((error_code & PF_INSTR) &&
  648. ((address & ~0xfff) == VSYSCALL_ADDR))) {
  649. if (emulate_vsyscall(regs, address))
  650. return;
  651. }
  652. #endif
  653. /* Kernel addresses are always protection faults: */
  654. if (address >= TASK_SIZE)
  655. error_code |= PF_PROT;
  656. if (likely(show_unhandled_signals))
  657. show_signal_msg(regs, error_code, address, tsk);
  658. tsk->thread.cr2 = address;
  659. tsk->thread.error_code = error_code;
  660. tsk->thread.trap_nr = X86_TRAP_PF;
  661. force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
  662. return;
  663. }
  664. if (is_f00f_bug(regs, address))
  665. return;
  666. no_context(regs, error_code, address, SIGSEGV, si_code);
  667. }
  668. static noinline void
  669. bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
  670. unsigned long address)
  671. {
  672. __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
  673. }
  674. static void
  675. __bad_area(struct pt_regs *regs, unsigned long error_code,
  676. unsigned long address, int si_code)
  677. {
  678. struct mm_struct *mm = current->mm;
  679. /*
  680. * Something tried to access memory that isn't in our memory map..
  681. * Fix it, but check if it's kernel or user first..
  682. */
  683. up_read(&mm->mmap_sem);
  684. __bad_area_nosemaphore(regs, error_code, address, si_code);
  685. }
  686. static noinline void
  687. bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
  688. {
  689. __bad_area(regs, error_code, address, SEGV_MAPERR);
  690. }
  691. static noinline void
  692. bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
  693. unsigned long address)
  694. {
  695. __bad_area(regs, error_code, address, SEGV_ACCERR);
  696. }
  697. static void
  698. do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
  699. unsigned int fault)
  700. {
  701. struct task_struct *tsk = current;
  702. struct mm_struct *mm = tsk->mm;
  703. int code = BUS_ADRERR;
  704. up_read(&mm->mmap_sem);
  705. /* Kernel mode? Handle exceptions or die: */
  706. if (!(error_code & PF_USER)) {
  707. no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
  708. return;
  709. }
  710. /* User-space => ok to do another page fault: */
  711. if (is_prefetch(regs, error_code, address))
  712. return;
  713. tsk->thread.cr2 = address;
  714. tsk->thread.error_code = error_code;
  715. tsk->thread.trap_nr = X86_TRAP_PF;
  716. #ifdef CONFIG_MEMORY_FAILURE
  717. if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
  718. printk(KERN_ERR
  719. "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
  720. tsk->comm, tsk->pid, address);
  721. code = BUS_MCEERR_AR;
  722. }
  723. #endif
  724. force_sig_info_fault(SIGBUS, code, address, tsk, fault);
  725. }
  726. static noinline void
  727. mm_fault_error(struct pt_regs *regs, unsigned long error_code,
  728. unsigned long address, unsigned int fault)
  729. {
  730. if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
  731. up_read(&current->mm->mmap_sem);
  732. no_context(regs, error_code, address, 0, 0);
  733. return;
  734. }
  735. if (fault & VM_FAULT_OOM) {
  736. /* Kernel mode? Handle exceptions or die: */
  737. if (!(error_code & PF_USER)) {
  738. up_read(&current->mm->mmap_sem);
  739. no_context(regs, error_code, address,
  740. SIGSEGV, SEGV_MAPERR);
  741. return;
  742. }
  743. up_read(&current->mm->mmap_sem);
  744. /*
  745. * We ran out of memory, call the OOM killer, and return the
  746. * userspace (which will retry the fault, or kill us if we got
  747. * oom-killed):
  748. */
  749. pagefault_out_of_memory();
  750. } else {
  751. if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
  752. VM_FAULT_HWPOISON_LARGE))
  753. do_sigbus(regs, error_code, address, fault);
  754. else
  755. BUG();
  756. }
  757. }
  758. static int spurious_fault_check(unsigned long error_code, pte_t *pte)
  759. {
  760. if ((error_code & PF_WRITE) && !pte_write(*pte))
  761. return 0;
  762. if ((error_code & PF_INSTR) && !pte_exec(*pte))
  763. return 0;
  764. return 1;
  765. }
  766. /*
  767. * Handle a spurious fault caused by a stale TLB entry.
  768. *
  769. * This allows us to lazily refresh the TLB when increasing the
  770. * permissions of a kernel page (RO -> RW or NX -> X). Doing it
  771. * eagerly is very expensive since that implies doing a full
  772. * cross-processor TLB flush, even if no stale TLB entries exist
  773. * on other processors.
  774. *
  775. * There are no security implications to leaving a stale TLB when
  776. * increasing the permissions on a page.
  777. */
  778. static noinline int
  779. spurious_fault(unsigned long error_code, unsigned long address)
  780. {
  781. pgd_t *pgd;
  782. pud_t *pud;
  783. pmd_t *pmd;
  784. pte_t *pte;
  785. int ret;
  786. /* Reserved-bit violation or user access to kernel space? */
  787. if (error_code & (PF_USER | PF_RSVD))
  788. return 0;
  789. pgd = init_mm.pgd + pgd_index(address);
  790. if (!pgd_present(*pgd))
  791. return 0;
  792. pud = pud_offset(pgd, address);
  793. if (!pud_present(*pud))
  794. return 0;
  795. if (pud_large(*pud))
  796. return spurious_fault_check(error_code, (pte_t *) pud);
  797. pmd = pmd_offset(pud, address);
  798. if (!pmd_present(*pmd))
  799. return 0;
  800. if (pmd_large(*pmd))
  801. return spurious_fault_check(error_code, (pte_t *) pmd);
  802. pte = pte_offset_kernel(pmd, address);
  803. if (!pte_present(*pte))
  804. return 0;
  805. ret = spurious_fault_check(error_code, pte);
  806. if (!ret)
  807. return 0;
  808. /*
  809. * Make sure we have permissions in PMD.
  810. * If not, then there's a bug in the page tables:
  811. */
  812. ret = spurious_fault_check(error_code, (pte_t *) pmd);
  813. WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
  814. return ret;
  815. }
  816. NOKPROBE_SYMBOL(spurious_fault);
  817. int show_unhandled_signals = 1;
  818. static inline int
  819. access_error(unsigned long error_code, struct vm_area_struct *vma)
  820. {
  821. if (error_code & PF_WRITE) {
  822. /* write, present and write, not present: */
  823. if (unlikely(!(vma->vm_flags & VM_WRITE)))
  824. return 1;
  825. return 0;
  826. }
  827. /* read, present: */
  828. if (unlikely(error_code & PF_PROT))
  829. return 1;
  830. /* read, not present: */
  831. if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
  832. return 1;
  833. return 0;
  834. }
  835. static int fault_in_kernel_space(unsigned long address)
  836. {
  837. return address >= TASK_SIZE_MAX;
  838. }
  839. static inline bool smap_violation(int error_code, struct pt_regs *regs)
  840. {
  841. if (!IS_ENABLED(CONFIG_X86_SMAP))
  842. return false;
  843. if (!static_cpu_has(X86_FEATURE_SMAP))
  844. return false;
  845. if (error_code & PF_USER)
  846. return false;
  847. if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
  848. return false;
  849. return true;
  850. }
  851. /*
  852. * This routine handles page faults. It determines the address,
  853. * and the problem, and then passes it off to one of the appropriate
  854. * routines.
  855. *
  856. * This function must have noinline because both callers
  857. * {,trace_}do_page_fault() have notrace on. Having this an actual function
  858. * guarantees there's a function trace entry.
  859. */
  860. static noinline void
  861. __do_page_fault(struct pt_regs *regs, unsigned long error_code,
  862. unsigned long address)
  863. {
  864. struct vm_area_struct *vma;
  865. struct task_struct *tsk;
  866. struct mm_struct *mm;
  867. int fault;
  868. unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
  869. tsk = current;
  870. mm = tsk->mm;
  871. /*
  872. * Detect and handle instructions that would cause a page fault for
  873. * both a tracked kernel page and a userspace page.
  874. */
  875. if (kmemcheck_active(regs))
  876. kmemcheck_hide(regs);
  877. prefetchw(&mm->mmap_sem);
  878. if (unlikely(kmmio_fault(regs, address)))
  879. return;
  880. /*
  881. * We fault-in kernel-space virtual memory on-demand. The
  882. * 'reference' page table is init_mm.pgd.
  883. *
  884. * NOTE! We MUST NOT take any locks for this case. We may
  885. * be in an interrupt or a critical region, and should
  886. * only copy the information from the master page table,
  887. * nothing more.
  888. *
  889. * This verifies that the fault happens in kernel space
  890. * (error_code & 4) == 0, and that the fault was not a
  891. * protection error (error_code & 9) == 0.
  892. */
  893. if (unlikely(fault_in_kernel_space(address))) {
  894. if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
  895. if (vmalloc_fault(address) >= 0)
  896. return;
  897. if (kmemcheck_fault(regs, address, error_code))
  898. return;
  899. }
  900. /* Can handle a stale RO->RW TLB: */
  901. if (spurious_fault(error_code, address))
  902. return;
  903. /* kprobes don't want to hook the spurious faults: */
  904. if (kprobes_fault(regs))
  905. return;
  906. /*
  907. * Don't take the mm semaphore here. If we fixup a prefetch
  908. * fault we could otherwise deadlock:
  909. */
  910. bad_area_nosemaphore(regs, error_code, address);
  911. return;
  912. }
  913. /* kprobes don't want to hook the spurious faults: */
  914. if (unlikely(kprobes_fault(regs)))
  915. return;
  916. if (unlikely(error_code & PF_RSVD))
  917. pgtable_bad(regs, error_code, address);
  918. if (unlikely(smap_violation(error_code, regs))) {
  919. bad_area_nosemaphore(regs, error_code, address);
  920. return;
  921. }
  922. /*
  923. * If we're in an interrupt, have no user context or are running
  924. * in an atomic region then we must not take the fault:
  925. */
  926. if (unlikely(in_atomic() || !mm)) {
  927. bad_area_nosemaphore(regs, error_code, address);
  928. return;
  929. }
  930. /*
  931. * It's safe to allow irq's after cr2 has been saved and the
  932. * vmalloc fault has been handled.
  933. *
  934. * User-mode registers count as a user access even for any
  935. * potential system fault or CPU buglet:
  936. */
  937. if (user_mode_vm(regs)) {
  938. local_irq_enable();
  939. error_code |= PF_USER;
  940. flags |= FAULT_FLAG_USER;
  941. } else {
  942. if (regs->flags & X86_EFLAGS_IF)
  943. local_irq_enable();
  944. }
  945. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
  946. if (error_code & PF_WRITE)
  947. flags |= FAULT_FLAG_WRITE;
  948. /*
  949. * When running in the kernel we expect faults to occur only to
  950. * addresses in user space. All other faults represent errors in
  951. * the kernel and should generate an OOPS. Unfortunately, in the
  952. * case of an erroneous fault occurring in a code path which already
  953. * holds mmap_sem we will deadlock attempting to validate the fault
  954. * against the address space. Luckily the kernel only validly
  955. * references user space from well defined areas of code, which are
  956. * listed in the exceptions table.
  957. *
  958. * As the vast majority of faults will be valid we will only perform
  959. * the source reference check when there is a possibility of a
  960. * deadlock. Attempt to lock the address space, if we cannot we then
  961. * validate the source. If this is invalid we can skip the address
  962. * space check, thus avoiding the deadlock:
  963. */
  964. if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
  965. if ((error_code & PF_USER) == 0 &&
  966. !search_exception_tables(regs->ip)) {
  967. bad_area_nosemaphore(regs, error_code, address);
  968. return;
  969. }
  970. retry:
  971. down_read(&mm->mmap_sem);
  972. } else {
  973. /*
  974. * The above down_read_trylock() might have succeeded in
  975. * which case we'll have missed the might_sleep() from
  976. * down_read():
  977. */
  978. might_sleep();
  979. }
  980. vma = find_vma(mm, address);
  981. if (unlikely(!vma)) {
  982. bad_area(regs, error_code, address);
  983. return;
  984. }
  985. if (likely(vma->vm_start <= address))
  986. goto good_area;
  987. if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
  988. bad_area(regs, error_code, address);
  989. return;
  990. }
  991. if (error_code & PF_USER) {
  992. /*
  993. * Accessing the stack below %sp is always a bug.
  994. * The large cushion allows instructions like enter
  995. * and pusha to work. ("enter $65535, $31" pushes
  996. * 32 pointers and then decrements %sp by 65535.)
  997. */
  998. if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
  999. bad_area(regs, error_code, address);
  1000. return;
  1001. }
  1002. }
  1003. if (unlikely(expand_stack(vma, address))) {
  1004. bad_area(regs, error_code, address);
  1005. return;
  1006. }
  1007. /*
  1008. * Ok, we have a good vm_area for this memory access, so
  1009. * we can handle it..
  1010. */
  1011. good_area:
  1012. if (unlikely(access_error(error_code, vma))) {
  1013. bad_area_access_error(regs, error_code, address);
  1014. return;
  1015. }
  1016. /*
  1017. * If for any reason at all we couldn't handle the fault,
  1018. * make sure we exit gracefully rather than endlessly redo
  1019. * the fault:
  1020. */
  1021. fault = handle_mm_fault(mm, vma, address, flags);
  1022. /*
  1023. * If we need to retry but a fatal signal is pending, handle the
  1024. * signal first. We do not need to release the mmap_sem because it
  1025. * would already be released in __lock_page_or_retry in mm/filemap.c.
  1026. */
  1027. if (unlikely((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)))
  1028. return;
  1029. if (unlikely(fault & VM_FAULT_ERROR)) {
  1030. mm_fault_error(regs, error_code, address, fault);
  1031. return;
  1032. }
  1033. /*
  1034. * Major/minor page fault accounting is only done on the
  1035. * initial attempt. If we go through a retry, it is extremely
  1036. * likely that the page will be found in page cache at that point.
  1037. */
  1038. if (flags & FAULT_FLAG_ALLOW_RETRY) {
  1039. if (fault & VM_FAULT_MAJOR) {
  1040. tsk->maj_flt++;
  1041. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
  1042. regs, address);
  1043. } else {
  1044. tsk->min_flt++;
  1045. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
  1046. regs, address);
  1047. }
  1048. if (fault & VM_FAULT_RETRY) {
  1049. /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
  1050. * of starvation. */
  1051. flags &= ~FAULT_FLAG_ALLOW_RETRY;
  1052. flags |= FAULT_FLAG_TRIED;
  1053. goto retry;
  1054. }
  1055. }
  1056. check_v8086_mode(regs, address, tsk);
  1057. up_read(&mm->mmap_sem);
  1058. }
  1059. NOKPROBE_SYMBOL(__do_page_fault);
  1060. dotraplinkage void notrace
  1061. do_page_fault(struct pt_regs *regs, unsigned long error_code)
  1062. {
  1063. unsigned long address = read_cr2(); /* Get the faulting address */
  1064. enum ctx_state prev_state;
  1065. /*
  1066. * We must have this function tagged with __kprobes, notrace and call
  1067. * read_cr2() before calling anything else. To avoid calling any kind
  1068. * of tracing machinery before we've observed the CR2 value.
  1069. *
  1070. * exception_{enter,exit}() contain all sorts of tracepoints.
  1071. */
  1072. prev_state = exception_enter();
  1073. __do_page_fault(regs, error_code, address);
  1074. exception_exit(prev_state);
  1075. }
  1076. NOKPROBE_SYMBOL(do_page_fault);
  1077. #ifdef CONFIG_TRACING
  1078. static nokprobe_inline void
  1079. trace_page_fault_entries(unsigned long address, struct pt_regs *regs,
  1080. unsigned long error_code)
  1081. {
  1082. if (user_mode(regs))
  1083. trace_page_fault_user(address, regs, error_code);
  1084. else
  1085. trace_page_fault_kernel(address, regs, error_code);
  1086. }
  1087. dotraplinkage void notrace
  1088. trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
  1089. {
  1090. /*
  1091. * The exception_enter and tracepoint processing could
  1092. * trigger another page faults (user space callchain
  1093. * reading) and destroy the original cr2 value, so read
  1094. * the faulting address now.
  1095. */
  1096. unsigned long address = read_cr2();
  1097. enum ctx_state prev_state;
  1098. prev_state = exception_enter();
  1099. trace_page_fault_entries(address, regs, error_code);
  1100. __do_page_fault(regs, error_code, address);
  1101. exception_exit(prev_state);
  1102. }
  1103. NOKPROBE_SYMBOL(trace_do_page_fault);
  1104. #endif /* CONFIG_TRACING */