fault.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449
  1. /*
  2. * Copyright (C) 1995 Linus Torvalds
  3. * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
  4. * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
  5. */
  6. #include <linux/sched.h> /* test_thread_flag(), ... */
  7. #include <linux/kdebug.h> /* oops_begin/end, ... */
  8. #include <linux/module.h> /* search_exception_table */
  9. #include <linux/bootmem.h> /* max_low_pfn */
  10. #include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */
  11. #include <linux/mmiotrace.h> /* kmmio_handler, ... */
  12. #include <linux/perf_event.h> /* perf_sw_event */
  13. #include <linux/hugetlb.h> /* hstate_index_to_shift */
  14. #include <linux/prefetch.h> /* prefetchw */
  15. #include <linux/context_tracking.h> /* exception_enter(), ... */
  16. #include <linux/uaccess.h> /* faulthandler_disabled() */
  17. #include <asm/cpufeature.h> /* boot_cpu_has, ... */
  18. #include <asm/traps.h> /* dotraplinkage, ... */
  19. #include <asm/pgalloc.h> /* pgd_*(), ... */
  20. #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
  21. #include <asm/fixmap.h> /* VSYSCALL_ADDR */
  22. #include <asm/vsyscall.h> /* emulate_vsyscall */
  23. #include <asm/vm86.h> /* struct vm86 */
  24. #include <asm/mmu_context.h> /* vma_pkey() */
  25. #define CREATE_TRACE_POINTS
  26. #include <asm/trace/exceptions.h>
  27. /*
  28. * Page fault error code bits:
  29. *
  30. * bit 0 == 0: no page found 1: protection fault
  31. * bit 1 == 0: read access 1: write access
  32. * bit 2 == 0: kernel-mode access 1: user-mode access
  33. * bit 3 == 1: use of reserved bit detected
  34. * bit 4 == 1: fault was an instruction fetch
  35. * bit 5 == 1: protection keys block access
  36. */
  37. enum x86_pf_error_code {
  38. PF_PROT = 1 << 0,
  39. PF_WRITE = 1 << 1,
  40. PF_USER = 1 << 2,
  41. PF_RSVD = 1 << 3,
  42. PF_INSTR = 1 << 4,
  43. PF_PK = 1 << 5,
  44. };
  45. /*
  46. * Returns 0 if mmiotrace is disabled, or if the fault is not
  47. * handled by mmiotrace:
  48. */
  49. static nokprobe_inline int
  50. kmmio_fault(struct pt_regs *regs, unsigned long addr)
  51. {
  52. if (unlikely(is_kmmio_active()))
  53. if (kmmio_handler(regs, addr) == 1)
  54. return -1;
  55. return 0;
  56. }
  57. static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
  58. {
  59. int ret = 0;
  60. /* kprobe_running() needs smp_processor_id() */
  61. if (kprobes_built_in() && !user_mode(regs)) {
  62. preempt_disable();
  63. if (kprobe_running() && kprobe_fault_handler(regs, 14))
  64. ret = 1;
  65. preempt_enable();
  66. }
  67. return ret;
  68. }
  69. /*
  70. * Prefetch quirks:
  71. *
  72. * 32-bit mode:
  73. *
  74. * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
  75. * Check that here and ignore it.
  76. *
  77. * 64-bit mode:
  78. *
  79. * Sometimes the CPU reports invalid exceptions on prefetch.
  80. * Check that here and ignore it.
  81. *
  82. * Opcode checker based on code by Richard Brunner.
  83. */
  84. static inline int
  85. check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
  86. unsigned char opcode, int *prefetch)
  87. {
  88. unsigned char instr_hi = opcode & 0xf0;
  89. unsigned char instr_lo = opcode & 0x0f;
  90. switch (instr_hi) {
  91. case 0x20:
  92. case 0x30:
  93. /*
  94. * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
  95. * In X86_64 long mode, the CPU will signal invalid
  96. * opcode if some of these prefixes are present so
  97. * X86_64 will never get here anyway
  98. */
  99. return ((instr_lo & 7) == 0x6);
  100. #ifdef CONFIG_X86_64
  101. case 0x40:
  102. /*
  103. * In AMD64 long mode 0x40..0x4F are valid REX prefixes
  104. * Need to figure out under what instruction mode the
  105. * instruction was issued. Could check the LDT for lm,
  106. * but for now it's good enough to assume that long
  107. * mode only uses well known segments or kernel.
  108. */
  109. return (!user_mode(regs) || user_64bit_mode(regs));
  110. #endif
  111. case 0x60:
  112. /* 0x64 thru 0x67 are valid prefixes in all modes. */
  113. return (instr_lo & 0xC) == 0x4;
  114. case 0xF0:
  115. /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
  116. return !instr_lo || (instr_lo>>1) == 1;
  117. case 0x00:
  118. /* Prefetch instruction is 0x0F0D or 0x0F18 */
  119. if (probe_kernel_address(instr, opcode))
  120. return 0;
  121. *prefetch = (instr_lo == 0xF) &&
  122. (opcode == 0x0D || opcode == 0x18);
  123. return 0;
  124. default:
  125. return 0;
  126. }
  127. }
  128. static int
  129. is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
  130. {
  131. unsigned char *max_instr;
  132. unsigned char *instr;
  133. int prefetch = 0;
  134. /*
  135. * If it was a exec (instruction fetch) fault on NX page, then
  136. * do not ignore the fault:
  137. */
  138. if (error_code & PF_INSTR)
  139. return 0;
  140. instr = (void *)convert_ip_to_linear(current, regs);
  141. max_instr = instr + 15;
  142. if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX)
  143. return 0;
  144. while (instr < max_instr) {
  145. unsigned char opcode;
  146. if (probe_kernel_address(instr, opcode))
  147. break;
  148. instr++;
  149. if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
  150. break;
  151. }
  152. return prefetch;
  153. }
  154. /*
  155. * A protection key fault means that the PKRU value did not allow
  156. * access to some PTE. Userspace can figure out what PKRU was
  157. * from the XSAVE state, and this function fills out a field in
  158. * siginfo so userspace can discover which protection key was set
  159. * on the PTE.
  160. *
  161. * If we get here, we know that the hardware signaled a PF_PK
  162. * fault and that there was a VMA once we got in the fault
  163. * handler. It does *not* guarantee that the VMA we find here
  164. * was the one that we faulted on.
  165. *
  166. * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4);
  167. * 2. T1 : set PKRU to deny access to pkey=4, touches page
  168. * 3. T1 : faults...
  169. * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
  170. * 5. T1 : enters fault handler, takes mmap_sem, etc...
  171. * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
  172. * faulted on a pte with its pkey=4.
  173. */
  174. static void fill_sig_info_pkey(int si_code, siginfo_t *info,
  175. struct vm_area_struct *vma)
  176. {
  177. /* This is effectively an #ifdef */
  178. if (!boot_cpu_has(X86_FEATURE_OSPKE))
  179. return;
  180. /* Fault not from Protection Keys: nothing to do */
  181. if (si_code != SEGV_PKUERR)
  182. return;
  183. /*
  184. * force_sig_info_fault() is called from a number of
  185. * contexts, some of which have a VMA and some of which
  186. * do not. The PF_PK handing happens after we have a
  187. * valid VMA, so we should never reach this without a
  188. * valid VMA.
  189. */
  190. if (!vma) {
  191. WARN_ONCE(1, "PKU fault with no VMA passed in");
  192. info->si_pkey = 0;
  193. return;
  194. }
  195. /*
  196. * si_pkey should be thought of as a strong hint, but not
  197. * absolutely guranteed to be 100% accurate because of
  198. * the race explained above.
  199. */
  200. info->si_pkey = vma_pkey(vma);
  201. }
  202. static void
  203. force_sig_info_fault(int si_signo, int si_code, unsigned long address,
  204. struct task_struct *tsk, struct vm_area_struct *vma,
  205. int fault)
  206. {
  207. unsigned lsb = 0;
  208. siginfo_t info;
  209. info.si_signo = si_signo;
  210. info.si_errno = 0;
  211. info.si_code = si_code;
  212. info.si_addr = (void __user *)address;
  213. if (fault & VM_FAULT_HWPOISON_LARGE)
  214. lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
  215. if (fault & VM_FAULT_HWPOISON)
  216. lsb = PAGE_SHIFT;
  217. info.si_addr_lsb = lsb;
  218. fill_sig_info_pkey(si_code, &info, vma);
  219. force_sig_info(si_signo, &info, tsk);
  220. }
  221. DEFINE_SPINLOCK(pgd_lock);
  222. LIST_HEAD(pgd_list);
  223. #ifdef CONFIG_X86_32
  224. static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
  225. {
  226. unsigned index = pgd_index(address);
  227. pgd_t *pgd_k;
  228. pud_t *pud, *pud_k;
  229. pmd_t *pmd, *pmd_k;
  230. pgd += index;
  231. pgd_k = init_mm.pgd + index;
  232. if (!pgd_present(*pgd_k))
  233. return NULL;
  234. /*
  235. * set_pgd(pgd, *pgd_k); here would be useless on PAE
  236. * and redundant with the set_pmd() on non-PAE. As would
  237. * set_pud.
  238. */
  239. pud = pud_offset(pgd, address);
  240. pud_k = pud_offset(pgd_k, address);
  241. if (!pud_present(*pud_k))
  242. return NULL;
  243. pmd = pmd_offset(pud, address);
  244. pmd_k = pmd_offset(pud_k, address);
  245. if (!pmd_present(*pmd_k))
  246. return NULL;
  247. if (!pmd_present(*pmd))
  248. set_pmd(pmd, *pmd_k);
  249. else
  250. BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
  251. return pmd_k;
  252. }
  253. void vmalloc_sync_all(void)
  254. {
  255. unsigned long address;
  256. if (SHARED_KERNEL_PMD)
  257. return;
  258. for (address = VMALLOC_START & PMD_MASK;
  259. address >= TASK_SIZE && address < FIXADDR_TOP;
  260. address += PMD_SIZE) {
  261. struct page *page;
  262. spin_lock(&pgd_lock);
  263. list_for_each_entry(page, &pgd_list, lru) {
  264. spinlock_t *pgt_lock;
  265. pmd_t *ret;
  266. /* the pgt_lock only for Xen */
  267. pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
  268. spin_lock(pgt_lock);
  269. ret = vmalloc_sync_one(page_address(page), address);
  270. spin_unlock(pgt_lock);
  271. if (!ret)
  272. break;
  273. }
  274. spin_unlock(&pgd_lock);
  275. }
  276. }
  277. /*
  278. * 32-bit:
  279. *
  280. * Handle a fault on the vmalloc or module mapping area
  281. */
  282. static noinline int vmalloc_fault(unsigned long address)
  283. {
  284. unsigned long pgd_paddr;
  285. pmd_t *pmd_k;
  286. pte_t *pte_k;
  287. /* Make sure we are in vmalloc area: */
  288. if (!(address >= VMALLOC_START && address < VMALLOC_END))
  289. return -1;
  290. WARN_ON_ONCE(in_nmi());
  291. /*
  292. * Synchronize this task's top level page-table
  293. * with the 'reference' page table.
  294. *
  295. * Do _not_ use "current" here. We might be inside
  296. * an interrupt in the middle of a task switch..
  297. */
  298. pgd_paddr = read_cr3();
  299. pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
  300. if (!pmd_k)
  301. return -1;
  302. pte_k = pte_offset_kernel(pmd_k, address);
  303. if (!pte_present(*pte_k))
  304. return -1;
  305. return 0;
  306. }
  307. NOKPROBE_SYMBOL(vmalloc_fault);
  308. /*
  309. * Did it hit the DOS screen memory VA from vm86 mode?
  310. */
  311. static inline void
  312. check_v8086_mode(struct pt_regs *regs, unsigned long address,
  313. struct task_struct *tsk)
  314. {
  315. #ifdef CONFIG_VM86
  316. unsigned long bit;
  317. if (!v8086_mode(regs) || !tsk->thread.vm86)
  318. return;
  319. bit = (address - 0xA0000) >> PAGE_SHIFT;
  320. if (bit < 32)
  321. tsk->thread.vm86->screen_bitmap |= 1 << bit;
  322. #endif
  323. }
  324. static bool low_pfn(unsigned long pfn)
  325. {
  326. return pfn < max_low_pfn;
  327. }
  328. static void dump_pagetable(unsigned long address)
  329. {
  330. pgd_t *base = __va(read_cr3());
  331. pgd_t *pgd = &base[pgd_index(address)];
  332. pmd_t *pmd;
  333. pte_t *pte;
  334. #ifdef CONFIG_X86_PAE
  335. printk("*pdpt = %016Lx ", pgd_val(*pgd));
  336. if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
  337. goto out;
  338. #endif
  339. pmd = pmd_offset(pud_offset(pgd, address), address);
  340. printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
  341. /*
  342. * We must not directly access the pte in the highpte
  343. * case if the page table is located in highmem.
  344. * And let's rather not kmap-atomic the pte, just in case
  345. * it's allocated already:
  346. */
  347. if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
  348. goto out;
  349. pte = pte_offset_kernel(pmd, address);
  350. printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
  351. out:
  352. printk("\n");
  353. }
  354. #else /* CONFIG_X86_64: */
  355. void vmalloc_sync_all(void)
  356. {
  357. sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END, 0);
  358. }
  359. /*
  360. * 64-bit:
  361. *
  362. * Handle a fault on the vmalloc area
  363. *
  364. * This assumes no large pages in there.
  365. */
  366. static noinline int vmalloc_fault(unsigned long address)
  367. {
  368. pgd_t *pgd, *pgd_ref;
  369. pud_t *pud, *pud_ref;
  370. pmd_t *pmd, *pmd_ref;
  371. pte_t *pte, *pte_ref;
  372. /* Make sure we are in vmalloc area: */
  373. if (!(address >= VMALLOC_START && address < VMALLOC_END))
  374. return -1;
  375. WARN_ON_ONCE(in_nmi());
  376. /*
  377. * Copy kernel mappings over when needed. This can also
  378. * happen within a race in page table update. In the later
  379. * case just flush:
  380. */
  381. pgd = pgd_offset(current->active_mm, address);
  382. pgd_ref = pgd_offset_k(address);
  383. if (pgd_none(*pgd_ref))
  384. return -1;
  385. if (pgd_none(*pgd)) {
  386. set_pgd(pgd, *pgd_ref);
  387. arch_flush_lazy_mmu_mode();
  388. } else {
  389. BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
  390. }
  391. /*
  392. * Below here mismatches are bugs because these lower tables
  393. * are shared:
  394. */
  395. pud = pud_offset(pgd, address);
  396. pud_ref = pud_offset(pgd_ref, address);
  397. if (pud_none(*pud_ref))
  398. return -1;
  399. if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
  400. BUG();
  401. pmd = pmd_offset(pud, address);
  402. pmd_ref = pmd_offset(pud_ref, address);
  403. if (pmd_none(*pmd_ref))
  404. return -1;
  405. if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
  406. BUG();
  407. pte_ref = pte_offset_kernel(pmd_ref, address);
  408. if (!pte_present(*pte_ref))
  409. return -1;
  410. pte = pte_offset_kernel(pmd, address);
  411. /*
  412. * Don't use pte_page here, because the mappings can point
  413. * outside mem_map, and the NUMA hash lookup cannot handle
  414. * that:
  415. */
  416. if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
  417. BUG();
  418. return 0;
  419. }
  420. NOKPROBE_SYMBOL(vmalloc_fault);
  421. #ifdef CONFIG_CPU_SUP_AMD
  422. static const char errata93_warning[] =
  423. KERN_ERR
  424. "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
  425. "******* Working around it, but it may cause SEGVs or burn power.\n"
  426. "******* Please consider a BIOS update.\n"
  427. "******* Disabling USB legacy in the BIOS may also help.\n";
  428. #endif
  429. /*
  430. * No vm86 mode in 64-bit mode:
  431. */
  432. static inline void
  433. check_v8086_mode(struct pt_regs *regs, unsigned long address,
  434. struct task_struct *tsk)
  435. {
  436. }
  437. static int bad_address(void *p)
  438. {
  439. unsigned long dummy;
  440. return probe_kernel_address((unsigned long *)p, dummy);
  441. }
  442. static void dump_pagetable(unsigned long address)
  443. {
  444. pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK);
  445. pgd_t *pgd = base + pgd_index(address);
  446. pud_t *pud;
  447. pmd_t *pmd;
  448. pte_t *pte;
  449. if (bad_address(pgd))
  450. goto bad;
  451. printk("PGD %lx ", pgd_val(*pgd));
  452. if (!pgd_present(*pgd))
  453. goto out;
  454. pud = pud_offset(pgd, address);
  455. if (bad_address(pud))
  456. goto bad;
  457. printk("PUD %lx ", pud_val(*pud));
  458. if (!pud_present(*pud) || pud_large(*pud))
  459. goto out;
  460. pmd = pmd_offset(pud, address);
  461. if (bad_address(pmd))
  462. goto bad;
  463. printk("PMD %lx ", pmd_val(*pmd));
  464. if (!pmd_present(*pmd) || pmd_large(*pmd))
  465. goto out;
  466. pte = pte_offset_kernel(pmd, address);
  467. if (bad_address(pte))
  468. goto bad;
  469. printk("PTE %lx", pte_val(*pte));
  470. out:
  471. printk("\n");
  472. return;
  473. bad:
  474. printk("BAD\n");
  475. }
  476. #endif /* CONFIG_X86_64 */
  477. /*
  478. * Workaround for K8 erratum #93 & buggy BIOS.
  479. *
  480. * BIOS SMM functions are required to use a specific workaround
  481. * to avoid corruption of the 64bit RIP register on C stepping K8.
  482. *
  483. * A lot of BIOS that didn't get tested properly miss this.
  484. *
  485. * The OS sees this as a page fault with the upper 32bits of RIP cleared.
  486. * Try to work around it here.
  487. *
  488. * Note we only handle faults in kernel here.
  489. * Does nothing on 32-bit.
  490. */
  491. static int is_errata93(struct pt_regs *regs, unsigned long address)
  492. {
  493. #if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
  494. if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
  495. || boot_cpu_data.x86 != 0xf)
  496. return 0;
  497. if (address != regs->ip)
  498. return 0;
  499. if ((address >> 32) != 0)
  500. return 0;
  501. address |= 0xffffffffUL << 32;
  502. if ((address >= (u64)_stext && address <= (u64)_etext) ||
  503. (address >= MODULES_VADDR && address <= MODULES_END)) {
  504. printk_once(errata93_warning);
  505. regs->ip = address;
  506. return 1;
  507. }
  508. #endif
  509. return 0;
  510. }
  511. /*
  512. * Work around K8 erratum #100 K8 in compat mode occasionally jumps
  513. * to illegal addresses >4GB.
  514. *
  515. * We catch this in the page fault handler because these addresses
  516. * are not reachable. Just detect this case and return. Any code
  517. * segment in LDT is compatibility mode.
  518. */
  519. static int is_errata100(struct pt_regs *regs, unsigned long address)
  520. {
  521. #ifdef CONFIG_X86_64
  522. if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
  523. return 1;
  524. #endif
  525. return 0;
  526. }
  527. static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
  528. {
  529. #ifdef CONFIG_X86_F00F_BUG
  530. unsigned long nr;
  531. /*
  532. * Pentium F0 0F C7 C8 bug workaround:
  533. */
  534. if (boot_cpu_has_bug(X86_BUG_F00F)) {
  535. nr = (address - idt_descr.address) >> 3;
  536. if (nr == 6) {
  537. do_invalid_op(regs, 0);
  538. return 1;
  539. }
  540. }
  541. #endif
  542. return 0;
  543. }
  544. static const char nx_warning[] = KERN_CRIT
  545. "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
  546. static const char smep_warning[] = KERN_CRIT
  547. "unable to execute userspace code (SMEP?) (uid: %d)\n";
  548. static void
  549. show_fault_oops(struct pt_regs *regs, unsigned long error_code,
  550. unsigned long address)
  551. {
  552. if (!oops_may_print())
  553. return;
  554. if (error_code & PF_INSTR) {
  555. unsigned int level;
  556. pgd_t *pgd;
  557. pte_t *pte;
  558. pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
  559. pgd += pgd_index(address);
  560. pte = lookup_address_in_pgd(pgd, address, &level);
  561. if (pte && pte_present(*pte) && !pte_exec(*pte))
  562. printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
  563. if (pte && pte_present(*pte) && pte_exec(*pte) &&
  564. (pgd_flags(*pgd) & _PAGE_USER) &&
  565. (__read_cr4() & X86_CR4_SMEP))
  566. printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
  567. }
  568. printk(KERN_ALERT "BUG: unable to handle kernel ");
  569. if (address < PAGE_SIZE)
  570. printk(KERN_CONT "NULL pointer dereference");
  571. else
  572. printk(KERN_CONT "paging request");
  573. printk(KERN_CONT " at %p\n", (void *) address);
  574. printk(KERN_ALERT "IP:");
  575. printk_address(regs->ip);
  576. dump_pagetable(address);
  577. }
  578. static noinline void
  579. pgtable_bad(struct pt_regs *regs, unsigned long error_code,
  580. unsigned long address)
  581. {
  582. struct task_struct *tsk;
  583. unsigned long flags;
  584. int sig;
  585. flags = oops_begin();
  586. tsk = current;
  587. sig = SIGKILL;
  588. printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
  589. tsk->comm, address);
  590. dump_pagetable(address);
  591. tsk->thread.cr2 = address;
  592. tsk->thread.trap_nr = X86_TRAP_PF;
  593. tsk->thread.error_code = error_code;
  594. if (__die("Bad pagetable", regs, error_code))
  595. sig = 0;
  596. oops_end(flags, regs, sig);
  597. }
  598. static noinline void
  599. no_context(struct pt_regs *regs, unsigned long error_code,
  600. unsigned long address, int signal, int si_code)
  601. {
  602. struct task_struct *tsk = current;
  603. unsigned long flags;
  604. int sig;
  605. /* No context means no VMA to pass down */
  606. struct vm_area_struct *vma = NULL;
  607. /* Are we prepared to handle this kernel fault? */
  608. if (fixup_exception(regs)) {
  609. /*
  610. * Any interrupt that takes a fault gets the fixup. This makes
  611. * the below recursive fault logic only apply to a faults from
  612. * task context.
  613. */
  614. if (in_interrupt())
  615. return;
  616. /*
  617. * Per the above we're !in_interrupt(), aka. task context.
  618. *
  619. * In this case we need to make sure we're not recursively
  620. * faulting through the emulate_vsyscall() logic.
  621. */
  622. if (current_thread_info()->sig_on_uaccess_error && signal) {
  623. tsk->thread.trap_nr = X86_TRAP_PF;
  624. tsk->thread.error_code = error_code | PF_USER;
  625. tsk->thread.cr2 = address;
  626. /* XXX: hwpoison faults will set the wrong code. */
  627. force_sig_info_fault(signal, si_code, address,
  628. tsk, vma, 0);
  629. }
  630. /*
  631. * Barring that, we can do the fixup and be happy.
  632. */
  633. return;
  634. }
  635. /*
  636. * 32-bit:
  637. *
  638. * Valid to do another page fault here, because if this fault
  639. * had been triggered by is_prefetch fixup_exception would have
  640. * handled it.
  641. *
  642. * 64-bit:
  643. *
  644. * Hall of shame of CPU/BIOS bugs.
  645. */
  646. if (is_prefetch(regs, error_code, address))
  647. return;
  648. if (is_errata93(regs, address))
  649. return;
  650. /*
  651. * Oops. The kernel tried to access some bad page. We'll have to
  652. * terminate things with extreme prejudice:
  653. */
  654. flags = oops_begin();
  655. show_fault_oops(regs, error_code, address);
  656. if (task_stack_end_corrupted(tsk))
  657. printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
  658. tsk->thread.cr2 = address;
  659. tsk->thread.trap_nr = X86_TRAP_PF;
  660. tsk->thread.error_code = error_code;
  661. sig = SIGKILL;
  662. if (__die("Oops", regs, error_code))
  663. sig = 0;
  664. /* Executive summary in case the body of the oops scrolled away */
  665. printk(KERN_DEFAULT "CR2: %016lx\n", address);
  666. oops_end(flags, regs, sig);
  667. }
  668. /*
  669. * Print out info about fatal segfaults, if the show_unhandled_signals
  670. * sysctl is set:
  671. */
  672. static inline void
  673. show_signal_msg(struct pt_regs *regs, unsigned long error_code,
  674. unsigned long address, struct task_struct *tsk)
  675. {
  676. if (!unhandled_signal(tsk, SIGSEGV))
  677. return;
  678. if (!printk_ratelimit())
  679. return;
  680. printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
  681. task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
  682. tsk->comm, task_pid_nr(tsk), address,
  683. (void *)regs->ip, (void *)regs->sp, error_code);
  684. print_vma_addr(KERN_CONT " in ", regs->ip);
  685. printk(KERN_CONT "\n");
  686. }
  687. static void
  688. __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
  689. unsigned long address, struct vm_area_struct *vma,
  690. int si_code)
  691. {
  692. struct task_struct *tsk = current;
  693. /* User mode accesses just cause a SIGSEGV */
  694. if (error_code & PF_USER) {
  695. /*
  696. * It's possible to have interrupts off here:
  697. */
  698. local_irq_enable();
  699. /*
  700. * Valid to do another page fault here because this one came
  701. * from user space:
  702. */
  703. if (is_prefetch(regs, error_code, address))
  704. return;
  705. if (is_errata100(regs, address))
  706. return;
  707. #ifdef CONFIG_X86_64
  708. /*
  709. * Instruction fetch faults in the vsyscall page might need
  710. * emulation.
  711. */
  712. if (unlikely((error_code & PF_INSTR) &&
  713. ((address & ~0xfff) == VSYSCALL_ADDR))) {
  714. if (emulate_vsyscall(regs, address))
  715. return;
  716. }
  717. #endif
  718. /* Kernel addresses are always protection faults: */
  719. if (address >= TASK_SIZE)
  720. error_code |= PF_PROT;
  721. if (likely(show_unhandled_signals))
  722. show_signal_msg(regs, error_code, address, tsk);
  723. tsk->thread.cr2 = address;
  724. tsk->thread.error_code = error_code;
  725. tsk->thread.trap_nr = X86_TRAP_PF;
  726. force_sig_info_fault(SIGSEGV, si_code, address, tsk, vma, 0);
  727. return;
  728. }
  729. if (is_f00f_bug(regs, address))
  730. return;
  731. no_context(regs, error_code, address, SIGSEGV, si_code);
  732. }
  733. static noinline void
  734. bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
  735. unsigned long address, struct vm_area_struct *vma)
  736. {
  737. __bad_area_nosemaphore(regs, error_code, address, vma, SEGV_MAPERR);
  738. }
  739. static void
  740. __bad_area(struct pt_regs *regs, unsigned long error_code,
  741. unsigned long address, struct vm_area_struct *vma, int si_code)
  742. {
  743. struct mm_struct *mm = current->mm;
  744. /*
  745. * Something tried to access memory that isn't in our memory map..
  746. * Fix it, but check if it's kernel or user first..
  747. */
  748. up_read(&mm->mmap_sem);
  749. __bad_area_nosemaphore(regs, error_code, address, vma, si_code);
  750. }
  751. static noinline void
  752. bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
  753. {
  754. __bad_area(regs, error_code, address, NULL, SEGV_MAPERR);
  755. }
  756. static inline bool bad_area_access_from_pkeys(unsigned long error_code,
  757. struct vm_area_struct *vma)
  758. {
  759. /* This code is always called on the current mm */
  760. bool foreign = false;
  761. if (!boot_cpu_has(X86_FEATURE_OSPKE))
  762. return false;
  763. if (error_code & PF_PK)
  764. return true;
  765. /* this checks permission keys on the VMA: */
  766. if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE),
  767. (error_code & PF_INSTR), foreign))
  768. return true;
  769. return false;
  770. }
  771. static noinline void
  772. bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
  773. unsigned long address, struct vm_area_struct *vma)
  774. {
  775. /*
  776. * This OSPKE check is not strictly necessary at runtime.
  777. * But, doing it this way allows compiler optimizations
  778. * if pkeys are compiled out.
  779. */
  780. if (bad_area_access_from_pkeys(error_code, vma))
  781. __bad_area(regs, error_code, address, vma, SEGV_PKUERR);
  782. else
  783. __bad_area(regs, error_code, address, vma, SEGV_ACCERR);
  784. }
  785. static void
  786. do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
  787. struct vm_area_struct *vma, unsigned int fault)
  788. {
  789. struct task_struct *tsk = current;
  790. int code = BUS_ADRERR;
  791. /* Kernel mode? Handle exceptions or die: */
  792. if (!(error_code & PF_USER)) {
  793. no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
  794. return;
  795. }
  796. /* User-space => ok to do another page fault: */
  797. if (is_prefetch(regs, error_code, address))
  798. return;
  799. tsk->thread.cr2 = address;
  800. tsk->thread.error_code = error_code;
  801. tsk->thread.trap_nr = X86_TRAP_PF;
  802. #ifdef CONFIG_MEMORY_FAILURE
  803. if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
  804. printk(KERN_ERR
  805. "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
  806. tsk->comm, tsk->pid, address);
  807. code = BUS_MCEERR_AR;
  808. }
  809. #endif
  810. force_sig_info_fault(SIGBUS, code, address, tsk, vma, fault);
  811. }
  812. static noinline void
  813. mm_fault_error(struct pt_regs *regs, unsigned long error_code,
  814. unsigned long address, struct vm_area_struct *vma,
  815. unsigned int fault)
  816. {
  817. if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
  818. no_context(regs, error_code, address, 0, 0);
  819. return;
  820. }
  821. if (fault & VM_FAULT_OOM) {
  822. /* Kernel mode? Handle exceptions or die: */
  823. if (!(error_code & PF_USER)) {
  824. no_context(regs, error_code, address,
  825. SIGSEGV, SEGV_MAPERR);
  826. return;
  827. }
  828. /*
  829. * We ran out of memory, call the OOM killer, and return the
  830. * userspace (which will retry the fault, or kill us if we got
  831. * oom-killed):
  832. */
  833. pagefault_out_of_memory();
  834. } else {
  835. if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
  836. VM_FAULT_HWPOISON_LARGE))
  837. do_sigbus(regs, error_code, address, vma, fault);
  838. else if (fault & VM_FAULT_SIGSEGV)
  839. bad_area_nosemaphore(regs, error_code, address, vma);
  840. else
  841. BUG();
  842. }
  843. }
  844. static int spurious_fault_check(unsigned long error_code, pte_t *pte)
  845. {
  846. if ((error_code & PF_WRITE) && !pte_write(*pte))
  847. return 0;
  848. if ((error_code & PF_INSTR) && !pte_exec(*pte))
  849. return 0;
  850. /*
  851. * Note: We do not do lazy flushing on protection key
  852. * changes, so no spurious fault will ever set PF_PK.
  853. */
  854. if ((error_code & PF_PK))
  855. return 1;
  856. return 1;
  857. }
  858. /*
  859. * Handle a spurious fault caused by a stale TLB entry.
  860. *
  861. * This allows us to lazily refresh the TLB when increasing the
  862. * permissions of a kernel page (RO -> RW or NX -> X). Doing it
  863. * eagerly is very expensive since that implies doing a full
  864. * cross-processor TLB flush, even if no stale TLB entries exist
  865. * on other processors.
  866. *
  867. * Spurious faults may only occur if the TLB contains an entry with
  868. * fewer permission than the page table entry. Non-present (P = 0)
  869. * and reserved bit (R = 1) faults are never spurious.
  870. *
  871. * There are no security implications to leaving a stale TLB when
  872. * increasing the permissions on a page.
  873. *
  874. * Returns non-zero if a spurious fault was handled, zero otherwise.
  875. *
  876. * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3
  877. * (Optional Invalidation).
  878. */
  879. static noinline int
  880. spurious_fault(unsigned long error_code, unsigned long address)
  881. {
  882. pgd_t *pgd;
  883. pud_t *pud;
  884. pmd_t *pmd;
  885. pte_t *pte;
  886. int ret;
  887. /*
  888. * Only writes to RO or instruction fetches from NX may cause
  889. * spurious faults.
  890. *
  891. * These could be from user or supervisor accesses but the TLB
  892. * is only lazily flushed after a kernel mapping protection
  893. * change, so user accesses are not expected to cause spurious
  894. * faults.
  895. */
  896. if (error_code != (PF_WRITE | PF_PROT)
  897. && error_code != (PF_INSTR | PF_PROT))
  898. return 0;
  899. pgd = init_mm.pgd + pgd_index(address);
  900. if (!pgd_present(*pgd))
  901. return 0;
  902. pud = pud_offset(pgd, address);
  903. if (!pud_present(*pud))
  904. return 0;
  905. if (pud_large(*pud))
  906. return spurious_fault_check(error_code, (pte_t *) pud);
  907. pmd = pmd_offset(pud, address);
  908. if (!pmd_present(*pmd))
  909. return 0;
  910. if (pmd_large(*pmd))
  911. return spurious_fault_check(error_code, (pte_t *) pmd);
  912. pte = pte_offset_kernel(pmd, address);
  913. if (!pte_present(*pte))
  914. return 0;
  915. ret = spurious_fault_check(error_code, pte);
  916. if (!ret)
  917. return 0;
  918. /*
  919. * Make sure we have permissions in PMD.
  920. * If not, then there's a bug in the page tables:
  921. */
  922. ret = spurious_fault_check(error_code, (pte_t *) pmd);
  923. WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
  924. return ret;
  925. }
  926. NOKPROBE_SYMBOL(spurious_fault);
  927. int show_unhandled_signals = 1;
  928. static inline int
  929. access_error(unsigned long error_code, struct vm_area_struct *vma)
  930. {
  931. /* This is only called for the current mm, so: */
  932. bool foreign = false;
  933. /*
  934. * Access or read was blocked by protection keys. We do
  935. * this check before any others because we do not want
  936. * to, for instance, confuse a protection-key-denied
  937. * write with one for which we should do a COW.
  938. */
  939. if (error_code & PF_PK)
  940. return 1;
  941. /*
  942. * Make sure to check the VMA so that we do not perform
  943. * faults just to hit a PF_PK as soon as we fill in a
  944. * page.
  945. */
  946. if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE),
  947. (error_code & PF_INSTR), foreign))
  948. return 1;
  949. if (error_code & PF_WRITE) {
  950. /* write, present and write, not present: */
  951. if (unlikely(!(vma->vm_flags & VM_WRITE)))
  952. return 1;
  953. return 0;
  954. }
  955. /* read, present: */
  956. if (unlikely(error_code & PF_PROT))
  957. return 1;
  958. /* read, not present: */
  959. if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
  960. return 1;
  961. return 0;
  962. }
  963. static int fault_in_kernel_space(unsigned long address)
  964. {
  965. return address >= TASK_SIZE_MAX;
  966. }
  967. static inline bool smap_violation(int error_code, struct pt_regs *regs)
  968. {
  969. if (!IS_ENABLED(CONFIG_X86_SMAP))
  970. return false;
  971. if (!static_cpu_has(X86_FEATURE_SMAP))
  972. return false;
  973. if (error_code & PF_USER)
  974. return false;
  975. if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
  976. return false;
  977. return true;
  978. }
  979. /*
  980. * This routine handles page faults. It determines the address,
  981. * and the problem, and then passes it off to one of the appropriate
  982. * routines.
  983. *
  984. * This function must have noinline because both callers
  985. * {,trace_}do_page_fault() have notrace on. Having this an actual function
  986. * guarantees there's a function trace entry.
  987. */
  988. static noinline void
  989. __do_page_fault(struct pt_regs *regs, unsigned long error_code,
  990. unsigned long address)
  991. {
  992. struct vm_area_struct *vma;
  993. struct task_struct *tsk;
  994. struct mm_struct *mm;
  995. int fault, major = 0;
  996. unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
  997. tsk = current;
  998. mm = tsk->mm;
  999. /*
  1000. * Detect and handle instructions that would cause a page fault for
  1001. * both a tracked kernel page and a userspace page.
  1002. */
  1003. if (kmemcheck_active(regs))
  1004. kmemcheck_hide(regs);
  1005. prefetchw(&mm->mmap_sem);
  1006. if (unlikely(kmmio_fault(regs, address)))
  1007. return;
  1008. /*
  1009. * We fault-in kernel-space virtual memory on-demand. The
  1010. * 'reference' page table is init_mm.pgd.
  1011. *
  1012. * NOTE! We MUST NOT take any locks for this case. We may
  1013. * be in an interrupt or a critical region, and should
  1014. * only copy the information from the master page table,
  1015. * nothing more.
  1016. *
  1017. * This verifies that the fault happens in kernel space
  1018. * (error_code & 4) == 0, and that the fault was not a
  1019. * protection error (error_code & 9) == 0.
  1020. */
  1021. if (unlikely(fault_in_kernel_space(address))) {
  1022. if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
  1023. if (vmalloc_fault(address) >= 0)
  1024. return;
  1025. if (kmemcheck_fault(regs, address, error_code))
  1026. return;
  1027. }
  1028. /* Can handle a stale RO->RW TLB: */
  1029. if (spurious_fault(error_code, address))
  1030. return;
  1031. /* kprobes don't want to hook the spurious faults: */
  1032. if (kprobes_fault(regs))
  1033. return;
  1034. /*
  1035. * Don't take the mm semaphore here. If we fixup a prefetch
  1036. * fault we could otherwise deadlock:
  1037. */
  1038. bad_area_nosemaphore(regs, error_code, address, NULL);
  1039. return;
  1040. }
  1041. /* kprobes don't want to hook the spurious faults: */
  1042. if (unlikely(kprobes_fault(regs)))
  1043. return;
  1044. if (unlikely(error_code & PF_RSVD))
  1045. pgtable_bad(regs, error_code, address);
  1046. if (unlikely(smap_violation(error_code, regs))) {
  1047. bad_area_nosemaphore(regs, error_code, address, NULL);
  1048. return;
  1049. }
  1050. /*
  1051. * If we're in an interrupt, have no user context or are running
  1052. * in a region with pagefaults disabled then we must not take the fault
  1053. */
  1054. if (unlikely(faulthandler_disabled() || !mm)) {
  1055. bad_area_nosemaphore(regs, error_code, address, NULL);
  1056. return;
  1057. }
  1058. /*
  1059. * It's safe to allow irq's after cr2 has been saved and the
  1060. * vmalloc fault has been handled.
  1061. *
  1062. * User-mode registers count as a user access even for any
  1063. * potential system fault or CPU buglet:
  1064. */
  1065. if (user_mode(regs)) {
  1066. local_irq_enable();
  1067. error_code |= PF_USER;
  1068. flags |= FAULT_FLAG_USER;
  1069. } else {
  1070. if (regs->flags & X86_EFLAGS_IF)
  1071. local_irq_enable();
  1072. }
  1073. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
  1074. if (error_code & PF_WRITE)
  1075. flags |= FAULT_FLAG_WRITE;
  1076. if (error_code & PF_INSTR)
  1077. flags |= FAULT_FLAG_INSTRUCTION;
  1078. /*
  1079. * When running in the kernel we expect faults to occur only to
  1080. * addresses in user space. All other faults represent errors in
  1081. * the kernel and should generate an OOPS. Unfortunately, in the
  1082. * case of an erroneous fault occurring in a code path which already
  1083. * holds mmap_sem we will deadlock attempting to validate the fault
  1084. * against the address space. Luckily the kernel only validly
  1085. * references user space from well defined areas of code, which are
  1086. * listed in the exceptions table.
  1087. *
  1088. * As the vast majority of faults will be valid we will only perform
  1089. * the source reference check when there is a possibility of a
  1090. * deadlock. Attempt to lock the address space, if we cannot we then
  1091. * validate the source. If this is invalid we can skip the address
  1092. * space check, thus avoiding the deadlock:
  1093. */
  1094. if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
  1095. if ((error_code & PF_USER) == 0 &&
  1096. !search_exception_tables(regs->ip)) {
  1097. bad_area_nosemaphore(regs, error_code, address, NULL);
  1098. return;
  1099. }
  1100. retry:
  1101. down_read(&mm->mmap_sem);
  1102. } else {
  1103. /*
  1104. * The above down_read_trylock() might have succeeded in
  1105. * which case we'll have missed the might_sleep() from
  1106. * down_read():
  1107. */
  1108. might_sleep();
  1109. }
  1110. vma = find_vma(mm, address);
  1111. if (unlikely(!vma)) {
  1112. bad_area(regs, error_code, address);
  1113. return;
  1114. }
  1115. if (likely(vma->vm_start <= address))
  1116. goto good_area;
  1117. if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
  1118. bad_area(regs, error_code, address);
  1119. return;
  1120. }
  1121. if (error_code & PF_USER) {
  1122. /*
  1123. * Accessing the stack below %sp is always a bug.
  1124. * The large cushion allows instructions like enter
  1125. * and pusha to work. ("enter $65535, $31" pushes
  1126. * 32 pointers and then decrements %sp by 65535.)
  1127. */
  1128. if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
  1129. bad_area(regs, error_code, address);
  1130. return;
  1131. }
  1132. }
  1133. if (unlikely(expand_stack(vma, address))) {
  1134. bad_area(regs, error_code, address);
  1135. return;
  1136. }
  1137. /*
  1138. * Ok, we have a good vm_area for this memory access, so
  1139. * we can handle it..
  1140. */
  1141. good_area:
  1142. if (unlikely(access_error(error_code, vma))) {
  1143. bad_area_access_error(regs, error_code, address, vma);
  1144. return;
  1145. }
  1146. /*
  1147. * If for any reason at all we couldn't handle the fault,
  1148. * make sure we exit gracefully rather than endlessly redo
  1149. * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if
  1150. * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
  1151. */
  1152. fault = handle_mm_fault(mm, vma, address, flags);
  1153. major |= fault & VM_FAULT_MAJOR;
  1154. /*
  1155. * If we need to retry the mmap_sem has already been released,
  1156. * and if there is a fatal signal pending there is no guarantee
  1157. * that we made any progress. Handle this case first.
  1158. */
  1159. if (unlikely(fault & VM_FAULT_RETRY)) {
  1160. /* Retry at most once */
  1161. if (flags & FAULT_FLAG_ALLOW_RETRY) {
  1162. flags &= ~FAULT_FLAG_ALLOW_RETRY;
  1163. flags |= FAULT_FLAG_TRIED;
  1164. if (!fatal_signal_pending(tsk))
  1165. goto retry;
  1166. }
  1167. /* User mode? Just return to handle the fatal exception */
  1168. if (flags & FAULT_FLAG_USER)
  1169. return;
  1170. /* Not returning to user mode? Handle exceptions or die: */
  1171. no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
  1172. return;
  1173. }
  1174. up_read(&mm->mmap_sem);
  1175. if (unlikely(fault & VM_FAULT_ERROR)) {
  1176. mm_fault_error(regs, error_code, address, vma, fault);
  1177. return;
  1178. }
  1179. /*
  1180. * Major/minor page fault accounting. If any of the events
  1181. * returned VM_FAULT_MAJOR, we account it as a major fault.
  1182. */
  1183. if (major) {
  1184. tsk->maj_flt++;
  1185. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
  1186. } else {
  1187. tsk->min_flt++;
  1188. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
  1189. }
  1190. check_v8086_mode(regs, address, tsk);
  1191. }
  1192. NOKPROBE_SYMBOL(__do_page_fault);
  1193. dotraplinkage void notrace
  1194. do_page_fault(struct pt_regs *regs, unsigned long error_code)
  1195. {
  1196. unsigned long address = read_cr2(); /* Get the faulting address */
  1197. enum ctx_state prev_state;
  1198. /*
  1199. * We must have this function tagged with __kprobes, notrace and call
  1200. * read_cr2() before calling anything else. To avoid calling any kind
  1201. * of tracing machinery before we've observed the CR2 value.
  1202. *
  1203. * exception_{enter,exit}() contain all sorts of tracepoints.
  1204. */
  1205. prev_state = exception_enter();
  1206. __do_page_fault(regs, error_code, address);
  1207. exception_exit(prev_state);
  1208. }
  1209. NOKPROBE_SYMBOL(do_page_fault);
  1210. #ifdef CONFIG_TRACING
  1211. static nokprobe_inline void
  1212. trace_page_fault_entries(unsigned long address, struct pt_regs *regs,
  1213. unsigned long error_code)
  1214. {
  1215. if (user_mode(regs))
  1216. trace_page_fault_user(address, regs, error_code);
  1217. else
  1218. trace_page_fault_kernel(address, regs, error_code);
  1219. }
  1220. dotraplinkage void notrace
  1221. trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
  1222. {
  1223. /*
  1224. * The exception_enter and tracepoint processing could
  1225. * trigger another page faults (user space callchain
  1226. * reading) and destroy the original cr2 value, so read
  1227. * the faulting address now.
  1228. */
  1229. unsigned long address = read_cr2();
  1230. enum ctx_state prev_state;
  1231. prev_state = exception_enter();
  1232. trace_page_fault_entries(address, regs, error_code);
  1233. __do_page_fault(regs, error_code, address);
  1234. exception_exit(prev_state);
  1235. }
  1236. NOKPROBE_SYMBOL(trace_do_page_fault);
  1237. #endif /* CONFIG_TRACING */