fault.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430
  1. /*
  2. * Copyright (C) 1995 Linus Torvalds
  3. * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
  4. * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
  5. */
  6. #include <linux/sched.h> /* test_thread_flag(), ... */
  7. #include <linux/kdebug.h> /* oops_begin/end, ... */
  8. #include <linux/module.h> /* search_exception_table */
  9. #include <linux/bootmem.h> /* max_low_pfn */
  10. #include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */
  11. #include <linux/mmiotrace.h> /* kmmio_handler, ... */
  12. #include <linux/perf_event.h> /* perf_sw_event */
  13. #include <linux/hugetlb.h> /* hstate_index_to_shift */
  14. #include <linux/prefetch.h> /* prefetchw */
  15. #include <linux/context_tracking.h> /* exception_enter(), ... */
  16. #include <linux/uaccess.h> /* faulthandler_disabled() */
  17. #include <asm/cpufeature.h> /* boot_cpu_has, ... */
  18. #include <asm/traps.h> /* dotraplinkage, ... */
  19. #include <asm/pgalloc.h> /* pgd_*(), ... */
  20. #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
  21. #include <asm/fixmap.h> /* VSYSCALL_ADDR */
  22. #include <asm/vsyscall.h> /* emulate_vsyscall */
  23. #include <asm/vm86.h> /* struct vm86 */
  24. #include <asm/mmu_context.h> /* vma_pkey() */
  25. #define CREATE_TRACE_POINTS
  26. #include <asm/trace/exceptions.h>
  27. /*
  28. * Page fault error code bits:
  29. *
  30. * bit 0 == 0: no page found 1: protection fault
  31. * bit 1 == 0: read access 1: write access
  32. * bit 2 == 0: kernel-mode access 1: user-mode access
  33. * bit 3 == 1: use of reserved bit detected
  34. * bit 4 == 1: fault was an instruction fetch
  35. * bit 5 == 1: protection keys block access
  36. */
  37. enum x86_pf_error_code {
  38. PF_PROT = 1 << 0,
  39. PF_WRITE = 1 << 1,
  40. PF_USER = 1 << 2,
  41. PF_RSVD = 1 << 3,
  42. PF_INSTR = 1 << 4,
  43. PF_PK = 1 << 5,
  44. };
  45. /*
  46. * Returns 0 if mmiotrace is disabled, or if the fault is not
  47. * handled by mmiotrace:
  48. */
  49. static nokprobe_inline int
  50. kmmio_fault(struct pt_regs *regs, unsigned long addr)
  51. {
  52. if (unlikely(is_kmmio_active()))
  53. if (kmmio_handler(regs, addr) == 1)
  54. return -1;
  55. return 0;
  56. }
  57. static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
  58. {
  59. int ret = 0;
  60. /* kprobe_running() needs smp_processor_id() */
  61. if (kprobes_built_in() && !user_mode(regs)) {
  62. preempt_disable();
  63. if (kprobe_running() && kprobe_fault_handler(regs, 14))
  64. ret = 1;
  65. preempt_enable();
  66. }
  67. return ret;
  68. }
  69. /*
  70. * Prefetch quirks:
  71. *
  72. * 32-bit mode:
  73. *
  74. * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
  75. * Check that here and ignore it.
  76. *
  77. * 64-bit mode:
  78. *
  79. * Sometimes the CPU reports invalid exceptions on prefetch.
  80. * Check that here and ignore it.
  81. *
  82. * Opcode checker based on code by Richard Brunner.
  83. */
  84. static inline int
  85. check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
  86. unsigned char opcode, int *prefetch)
  87. {
  88. unsigned char instr_hi = opcode & 0xf0;
  89. unsigned char instr_lo = opcode & 0x0f;
  90. switch (instr_hi) {
  91. case 0x20:
  92. case 0x30:
  93. /*
  94. * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
  95. * In X86_64 long mode, the CPU will signal invalid
  96. * opcode if some of these prefixes are present so
  97. * X86_64 will never get here anyway
  98. */
  99. return ((instr_lo & 7) == 0x6);
  100. #ifdef CONFIG_X86_64
  101. case 0x40:
  102. /*
  103. * In AMD64 long mode 0x40..0x4F are valid REX prefixes
  104. * Need to figure out under what instruction mode the
  105. * instruction was issued. Could check the LDT for lm,
  106. * but for now it's good enough to assume that long
  107. * mode only uses well known segments or kernel.
  108. */
  109. return (!user_mode(regs) || user_64bit_mode(regs));
  110. #endif
  111. case 0x60:
  112. /* 0x64 thru 0x67 are valid prefixes in all modes. */
  113. return (instr_lo & 0xC) == 0x4;
  114. case 0xF0:
  115. /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
  116. return !instr_lo || (instr_lo>>1) == 1;
  117. case 0x00:
  118. /* Prefetch instruction is 0x0F0D or 0x0F18 */
  119. if (probe_kernel_address(instr, opcode))
  120. return 0;
  121. *prefetch = (instr_lo == 0xF) &&
  122. (opcode == 0x0D || opcode == 0x18);
  123. return 0;
  124. default:
  125. return 0;
  126. }
  127. }
  128. static int
  129. is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
  130. {
  131. unsigned char *max_instr;
  132. unsigned char *instr;
  133. int prefetch = 0;
  134. /*
  135. * If it was a exec (instruction fetch) fault on NX page, then
  136. * do not ignore the fault:
  137. */
  138. if (error_code & PF_INSTR)
  139. return 0;
  140. instr = (void *)convert_ip_to_linear(current, regs);
  141. max_instr = instr + 15;
  142. if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX)
  143. return 0;
  144. while (instr < max_instr) {
  145. unsigned char opcode;
  146. if (probe_kernel_address(instr, opcode))
  147. break;
  148. instr++;
  149. if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
  150. break;
  151. }
  152. return prefetch;
  153. }
  154. /*
  155. * A protection key fault means that the PKRU value did not allow
  156. * access to some PTE. Userspace can figure out what PKRU was
  157. * from the XSAVE state, and this function fills out a field in
  158. * siginfo so userspace can discover which protection key was set
  159. * on the PTE.
  160. *
  161. * If we get here, we know that the hardware signaled a PF_PK
  162. * fault and that there was a VMA once we got in the fault
  163. * handler. It does *not* guarantee that the VMA we find here
  164. * was the one that we faulted on.
  165. *
  166. * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4);
  167. * 2. T1 : set PKRU to deny access to pkey=4, touches page
  168. * 3. T1 : faults...
  169. * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
  170. * 5. T1 : enters fault handler, takes mmap_sem, etc...
  171. * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
  172. * faulted on a pte with its pkey=4.
  173. */
  174. static void fill_sig_info_pkey(int si_code, siginfo_t *info,
  175. struct vm_area_struct *vma)
  176. {
  177. /* This is effectively an #ifdef */
  178. if (!boot_cpu_has(X86_FEATURE_OSPKE))
  179. return;
  180. /* Fault not from Protection Keys: nothing to do */
  181. if (si_code != SEGV_PKUERR)
  182. return;
  183. /*
  184. * force_sig_info_fault() is called from a number of
  185. * contexts, some of which have a VMA and some of which
  186. * do not. The PF_PK handing happens after we have a
  187. * valid VMA, so we should never reach this without a
  188. * valid VMA.
  189. */
  190. if (!vma) {
  191. WARN_ONCE(1, "PKU fault with no VMA passed in");
  192. info->si_pkey = 0;
  193. return;
  194. }
  195. /*
  196. * si_pkey should be thought of as a strong hint, but not
  197. * absolutely guranteed to be 100% accurate because of
  198. * the race explained above.
  199. */
  200. info->si_pkey = vma_pkey(vma);
  201. }
  202. static void
  203. force_sig_info_fault(int si_signo, int si_code, unsigned long address,
  204. struct task_struct *tsk, struct vm_area_struct *vma,
  205. int fault)
  206. {
  207. unsigned lsb = 0;
  208. siginfo_t info;
  209. info.si_signo = si_signo;
  210. info.si_errno = 0;
  211. info.si_code = si_code;
  212. info.si_addr = (void __user *)address;
  213. if (fault & VM_FAULT_HWPOISON_LARGE)
  214. lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
  215. if (fault & VM_FAULT_HWPOISON)
  216. lsb = PAGE_SHIFT;
  217. info.si_addr_lsb = lsb;
  218. fill_sig_info_pkey(si_code, &info, vma);
  219. force_sig_info(si_signo, &info, tsk);
  220. }
  221. DEFINE_SPINLOCK(pgd_lock);
  222. LIST_HEAD(pgd_list);
  223. #ifdef CONFIG_X86_32
  224. static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
  225. {
  226. unsigned index = pgd_index(address);
  227. pgd_t *pgd_k;
  228. pud_t *pud, *pud_k;
  229. pmd_t *pmd, *pmd_k;
  230. pgd += index;
  231. pgd_k = init_mm.pgd + index;
  232. if (!pgd_present(*pgd_k))
  233. return NULL;
  234. /*
  235. * set_pgd(pgd, *pgd_k); here would be useless on PAE
  236. * and redundant with the set_pmd() on non-PAE. As would
  237. * set_pud.
  238. */
  239. pud = pud_offset(pgd, address);
  240. pud_k = pud_offset(pgd_k, address);
  241. if (!pud_present(*pud_k))
  242. return NULL;
  243. pmd = pmd_offset(pud, address);
  244. pmd_k = pmd_offset(pud_k, address);
  245. if (!pmd_present(*pmd_k))
  246. return NULL;
  247. if (!pmd_present(*pmd))
  248. set_pmd(pmd, *pmd_k);
  249. else
  250. BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
  251. return pmd_k;
  252. }
  253. void vmalloc_sync_all(void)
  254. {
  255. unsigned long address;
  256. if (SHARED_KERNEL_PMD)
  257. return;
  258. for (address = VMALLOC_START & PMD_MASK;
  259. address >= TASK_SIZE && address < FIXADDR_TOP;
  260. address += PMD_SIZE) {
  261. struct page *page;
  262. spin_lock(&pgd_lock);
  263. list_for_each_entry(page, &pgd_list, lru) {
  264. spinlock_t *pgt_lock;
  265. pmd_t *ret;
  266. /* the pgt_lock only for Xen */
  267. pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
  268. spin_lock(pgt_lock);
  269. ret = vmalloc_sync_one(page_address(page), address);
  270. spin_unlock(pgt_lock);
  271. if (!ret)
  272. break;
  273. }
  274. spin_unlock(&pgd_lock);
  275. }
  276. }
  277. /*
  278. * 32-bit:
  279. *
  280. * Handle a fault on the vmalloc or module mapping area
  281. */
  282. static noinline int vmalloc_fault(unsigned long address)
  283. {
  284. unsigned long pgd_paddr;
  285. pmd_t *pmd_k;
  286. pte_t *pte_k;
  287. /* Make sure we are in vmalloc area: */
  288. if (!(address >= VMALLOC_START && address < VMALLOC_END))
  289. return -1;
  290. WARN_ON_ONCE(in_nmi());
  291. /*
  292. * Synchronize this task's top level page-table
  293. * with the 'reference' page table.
  294. *
  295. * Do _not_ use "current" here. We might be inside
  296. * an interrupt in the middle of a task switch..
  297. */
  298. pgd_paddr = read_cr3();
  299. pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
  300. if (!pmd_k)
  301. return -1;
  302. pte_k = pte_offset_kernel(pmd_k, address);
  303. if (!pte_present(*pte_k))
  304. return -1;
  305. return 0;
  306. }
  307. NOKPROBE_SYMBOL(vmalloc_fault);
  308. /*
  309. * Did it hit the DOS screen memory VA from vm86 mode?
  310. */
  311. static inline void
  312. check_v8086_mode(struct pt_regs *regs, unsigned long address,
  313. struct task_struct *tsk)
  314. {
  315. #ifdef CONFIG_VM86
  316. unsigned long bit;
  317. if (!v8086_mode(regs) || !tsk->thread.vm86)
  318. return;
  319. bit = (address - 0xA0000) >> PAGE_SHIFT;
  320. if (bit < 32)
  321. tsk->thread.vm86->screen_bitmap |= 1 << bit;
  322. #endif
  323. }
  324. static bool low_pfn(unsigned long pfn)
  325. {
  326. return pfn < max_low_pfn;
  327. }
  328. static void dump_pagetable(unsigned long address)
  329. {
  330. pgd_t *base = __va(read_cr3());
  331. pgd_t *pgd = &base[pgd_index(address)];
  332. pmd_t *pmd;
  333. pte_t *pte;
  334. #ifdef CONFIG_X86_PAE
  335. printk("*pdpt = %016Lx ", pgd_val(*pgd));
  336. if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
  337. goto out;
  338. #endif
  339. pmd = pmd_offset(pud_offset(pgd, address), address);
  340. printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
  341. /*
  342. * We must not directly access the pte in the highpte
  343. * case if the page table is located in highmem.
  344. * And let's rather not kmap-atomic the pte, just in case
  345. * it's allocated already:
  346. */
  347. if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
  348. goto out;
  349. pte = pte_offset_kernel(pmd, address);
  350. printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
  351. out:
  352. printk("\n");
  353. }
  354. #else /* CONFIG_X86_64: */
  355. void vmalloc_sync_all(void)
  356. {
  357. sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END, 0);
  358. }
  359. /*
  360. * 64-bit:
  361. *
  362. * Handle a fault on the vmalloc area
  363. *
  364. * This assumes no large pages in there.
  365. */
  366. static noinline int vmalloc_fault(unsigned long address)
  367. {
  368. pgd_t *pgd, *pgd_ref;
  369. pud_t *pud, *pud_ref;
  370. pmd_t *pmd, *pmd_ref;
  371. pte_t *pte, *pte_ref;
  372. /* Make sure we are in vmalloc area: */
  373. if (!(address >= VMALLOC_START && address < VMALLOC_END))
  374. return -1;
  375. WARN_ON_ONCE(in_nmi());
  376. /*
  377. * Copy kernel mappings over when needed. This can also
  378. * happen within a race in page table update. In the later
  379. * case just flush:
  380. */
  381. pgd = pgd_offset(current->active_mm, address);
  382. pgd_ref = pgd_offset_k(address);
  383. if (pgd_none(*pgd_ref))
  384. return -1;
  385. if (pgd_none(*pgd)) {
  386. set_pgd(pgd, *pgd_ref);
  387. arch_flush_lazy_mmu_mode();
  388. } else {
  389. BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
  390. }
  391. /*
  392. * Below here mismatches are bugs because these lower tables
  393. * are shared:
  394. */
  395. pud = pud_offset(pgd, address);
  396. pud_ref = pud_offset(pgd_ref, address);
  397. if (pud_none(*pud_ref))
  398. return -1;
  399. if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
  400. BUG();
  401. pmd = pmd_offset(pud, address);
  402. pmd_ref = pmd_offset(pud_ref, address);
  403. if (pmd_none(*pmd_ref))
  404. return -1;
  405. if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
  406. BUG();
  407. pte_ref = pte_offset_kernel(pmd_ref, address);
  408. if (!pte_present(*pte_ref))
  409. return -1;
  410. pte = pte_offset_kernel(pmd, address);
  411. /*
  412. * Don't use pte_page here, because the mappings can point
  413. * outside mem_map, and the NUMA hash lookup cannot handle
  414. * that:
  415. */
  416. if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
  417. BUG();
  418. return 0;
  419. }
  420. NOKPROBE_SYMBOL(vmalloc_fault);
  421. #ifdef CONFIG_CPU_SUP_AMD
  422. static const char errata93_warning[] =
  423. KERN_ERR
  424. "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
  425. "******* Working around it, but it may cause SEGVs or burn power.\n"
  426. "******* Please consider a BIOS update.\n"
  427. "******* Disabling USB legacy in the BIOS may also help.\n";
  428. #endif
  429. /*
  430. * No vm86 mode in 64-bit mode:
  431. */
  432. static inline void
  433. check_v8086_mode(struct pt_regs *regs, unsigned long address,
  434. struct task_struct *tsk)
  435. {
  436. }
  437. static int bad_address(void *p)
  438. {
  439. unsigned long dummy;
  440. return probe_kernel_address((unsigned long *)p, dummy);
  441. }
  442. static void dump_pagetable(unsigned long address)
  443. {
  444. pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK);
  445. pgd_t *pgd = base + pgd_index(address);
  446. pud_t *pud;
  447. pmd_t *pmd;
  448. pte_t *pte;
  449. if (bad_address(pgd))
  450. goto bad;
  451. printk("PGD %lx ", pgd_val(*pgd));
  452. if (!pgd_present(*pgd))
  453. goto out;
  454. pud = pud_offset(pgd, address);
  455. if (bad_address(pud))
  456. goto bad;
  457. printk("PUD %lx ", pud_val(*pud));
  458. if (!pud_present(*pud) || pud_large(*pud))
  459. goto out;
  460. pmd = pmd_offset(pud, address);
  461. if (bad_address(pmd))
  462. goto bad;
  463. printk("PMD %lx ", pmd_val(*pmd));
  464. if (!pmd_present(*pmd) || pmd_large(*pmd))
  465. goto out;
  466. pte = pte_offset_kernel(pmd, address);
  467. if (bad_address(pte))
  468. goto bad;
  469. printk("PTE %lx", pte_val(*pte));
  470. out:
  471. printk("\n");
  472. return;
  473. bad:
  474. printk("BAD\n");
  475. }
  476. #endif /* CONFIG_X86_64 */
  477. /*
  478. * Workaround for K8 erratum #93 & buggy BIOS.
  479. *
  480. * BIOS SMM functions are required to use a specific workaround
  481. * to avoid corruption of the 64bit RIP register on C stepping K8.
  482. *
  483. * A lot of BIOS that didn't get tested properly miss this.
  484. *
  485. * The OS sees this as a page fault with the upper 32bits of RIP cleared.
  486. * Try to work around it here.
  487. *
  488. * Note we only handle faults in kernel here.
  489. * Does nothing on 32-bit.
  490. */
  491. static int is_errata93(struct pt_regs *regs, unsigned long address)
  492. {
  493. #if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
  494. if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
  495. || boot_cpu_data.x86 != 0xf)
  496. return 0;
  497. if (address != regs->ip)
  498. return 0;
  499. if ((address >> 32) != 0)
  500. return 0;
  501. address |= 0xffffffffUL << 32;
  502. if ((address >= (u64)_stext && address <= (u64)_etext) ||
  503. (address >= MODULES_VADDR && address <= MODULES_END)) {
  504. printk_once(errata93_warning);
  505. regs->ip = address;
  506. return 1;
  507. }
  508. #endif
  509. return 0;
  510. }
  511. /*
  512. * Work around K8 erratum #100 K8 in compat mode occasionally jumps
  513. * to illegal addresses >4GB.
  514. *
  515. * We catch this in the page fault handler because these addresses
  516. * are not reachable. Just detect this case and return. Any code
  517. * segment in LDT is compatibility mode.
  518. */
  519. static int is_errata100(struct pt_regs *regs, unsigned long address)
  520. {
  521. #ifdef CONFIG_X86_64
  522. if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
  523. return 1;
  524. #endif
  525. return 0;
  526. }
  527. static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
  528. {
  529. #ifdef CONFIG_X86_F00F_BUG
  530. unsigned long nr;
  531. /*
  532. * Pentium F0 0F C7 C8 bug workaround:
  533. */
  534. if (boot_cpu_has_bug(X86_BUG_F00F)) {
  535. nr = (address - idt_descr.address) >> 3;
  536. if (nr == 6) {
  537. do_invalid_op(regs, 0);
  538. return 1;
  539. }
  540. }
  541. #endif
  542. return 0;
  543. }
  544. static const char nx_warning[] = KERN_CRIT
  545. "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
  546. static const char smep_warning[] = KERN_CRIT
  547. "unable to execute userspace code (SMEP?) (uid: %d)\n";
  548. static void
  549. show_fault_oops(struct pt_regs *regs, unsigned long error_code,
  550. unsigned long address)
  551. {
  552. if (!oops_may_print())
  553. return;
  554. if (error_code & PF_INSTR) {
  555. unsigned int level;
  556. pgd_t *pgd;
  557. pte_t *pte;
  558. pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
  559. pgd += pgd_index(address);
  560. pte = lookup_address_in_pgd(pgd, address, &level);
  561. if (pte && pte_present(*pte) && !pte_exec(*pte))
  562. printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
  563. if (pte && pte_present(*pte) && pte_exec(*pte) &&
  564. (pgd_flags(*pgd) & _PAGE_USER) &&
  565. (__read_cr4() & X86_CR4_SMEP))
  566. printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
  567. }
  568. printk(KERN_ALERT "BUG: unable to handle kernel ");
  569. if (address < PAGE_SIZE)
  570. printk(KERN_CONT "NULL pointer dereference");
  571. else
  572. printk(KERN_CONT "paging request");
  573. printk(KERN_CONT " at %p\n", (void *) address);
  574. printk(KERN_ALERT "IP:");
  575. printk_address(regs->ip);
  576. dump_pagetable(address);
  577. }
  578. static noinline void
  579. pgtable_bad(struct pt_regs *regs, unsigned long error_code,
  580. unsigned long address)
  581. {
  582. struct task_struct *tsk;
  583. unsigned long flags;
  584. int sig;
  585. flags = oops_begin();
  586. tsk = current;
  587. sig = SIGKILL;
  588. printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
  589. tsk->comm, address);
  590. dump_pagetable(address);
  591. tsk->thread.cr2 = address;
  592. tsk->thread.trap_nr = X86_TRAP_PF;
  593. tsk->thread.error_code = error_code;
  594. if (__die("Bad pagetable", regs, error_code))
  595. sig = 0;
  596. oops_end(flags, regs, sig);
  597. }
  598. static noinline void
  599. no_context(struct pt_regs *regs, unsigned long error_code,
  600. unsigned long address, int signal, int si_code)
  601. {
  602. struct task_struct *tsk = current;
  603. unsigned long flags;
  604. int sig;
  605. /* No context means no VMA to pass down */
  606. struct vm_area_struct *vma = NULL;
  607. /* Are we prepared to handle this kernel fault? */
  608. if (fixup_exception(regs)) {
  609. /*
  610. * Any interrupt that takes a fault gets the fixup. This makes
  611. * the below recursive fault logic only apply to a faults from
  612. * task context.
  613. */
  614. if (in_interrupt())
  615. return;
  616. /*
  617. * Per the above we're !in_interrupt(), aka. task context.
  618. *
  619. * In this case we need to make sure we're not recursively
  620. * faulting through the emulate_vsyscall() logic.
  621. */
  622. if (current_thread_info()->sig_on_uaccess_error && signal) {
  623. tsk->thread.trap_nr = X86_TRAP_PF;
  624. tsk->thread.error_code = error_code | PF_USER;
  625. tsk->thread.cr2 = address;
  626. /* XXX: hwpoison faults will set the wrong code. */
  627. force_sig_info_fault(signal, si_code, address,
  628. tsk, vma, 0);
  629. }
  630. /*
  631. * Barring that, we can do the fixup and be happy.
  632. */
  633. return;
  634. }
  635. /*
  636. * 32-bit:
  637. *
  638. * Valid to do another page fault here, because if this fault
  639. * had been triggered by is_prefetch fixup_exception would have
  640. * handled it.
  641. *
  642. * 64-bit:
  643. *
  644. * Hall of shame of CPU/BIOS bugs.
  645. */
  646. if (is_prefetch(regs, error_code, address))
  647. return;
  648. if (is_errata93(regs, address))
  649. return;
  650. /*
  651. * Oops. The kernel tried to access some bad page. We'll have to
  652. * terminate things with extreme prejudice:
  653. */
  654. flags = oops_begin();
  655. show_fault_oops(regs, error_code, address);
  656. if (task_stack_end_corrupted(tsk))
  657. printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
  658. tsk->thread.cr2 = address;
  659. tsk->thread.trap_nr = X86_TRAP_PF;
  660. tsk->thread.error_code = error_code;
  661. sig = SIGKILL;
  662. if (__die("Oops", regs, error_code))
  663. sig = 0;
  664. /* Executive summary in case the body of the oops scrolled away */
  665. printk(KERN_DEFAULT "CR2: %016lx\n", address);
  666. oops_end(flags, regs, sig);
  667. }
  668. /*
  669. * Print out info about fatal segfaults, if the show_unhandled_signals
  670. * sysctl is set:
  671. */
  672. static inline void
  673. show_signal_msg(struct pt_regs *regs, unsigned long error_code,
  674. unsigned long address, struct task_struct *tsk)
  675. {
  676. if (!unhandled_signal(tsk, SIGSEGV))
  677. return;
  678. if (!printk_ratelimit())
  679. return;
  680. printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
  681. task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
  682. tsk->comm, task_pid_nr(tsk), address,
  683. (void *)regs->ip, (void *)regs->sp, error_code);
  684. print_vma_addr(KERN_CONT " in ", regs->ip);
  685. printk(KERN_CONT "\n");
  686. }
  687. static void
  688. __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
  689. unsigned long address, struct vm_area_struct *vma,
  690. int si_code)
  691. {
  692. struct task_struct *tsk = current;
  693. /* User mode accesses just cause a SIGSEGV */
  694. if (error_code & PF_USER) {
  695. /*
  696. * It's possible to have interrupts off here:
  697. */
  698. local_irq_enable();
  699. /*
  700. * Valid to do another page fault here because this one came
  701. * from user space:
  702. */
  703. if (is_prefetch(regs, error_code, address))
  704. return;
  705. if (is_errata100(regs, address))
  706. return;
  707. #ifdef CONFIG_X86_64
  708. /*
  709. * Instruction fetch faults in the vsyscall page might need
  710. * emulation.
  711. */
  712. if (unlikely((error_code & PF_INSTR) &&
  713. ((address & ~0xfff) == VSYSCALL_ADDR))) {
  714. if (emulate_vsyscall(regs, address))
  715. return;
  716. }
  717. #endif
  718. /* Kernel addresses are always protection faults: */
  719. if (address >= TASK_SIZE)
  720. error_code |= PF_PROT;
  721. if (likely(show_unhandled_signals))
  722. show_signal_msg(regs, error_code, address, tsk);
  723. tsk->thread.cr2 = address;
  724. tsk->thread.error_code = error_code;
  725. tsk->thread.trap_nr = X86_TRAP_PF;
  726. force_sig_info_fault(SIGSEGV, si_code, address, tsk, vma, 0);
  727. return;
  728. }
  729. if (is_f00f_bug(regs, address))
  730. return;
  731. no_context(regs, error_code, address, SIGSEGV, si_code);
  732. }
  733. static noinline void
  734. bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
  735. unsigned long address, struct vm_area_struct *vma)
  736. {
  737. __bad_area_nosemaphore(regs, error_code, address, vma, SEGV_MAPERR);
  738. }
  739. static void
  740. __bad_area(struct pt_regs *regs, unsigned long error_code,
  741. unsigned long address, struct vm_area_struct *vma, int si_code)
  742. {
  743. struct mm_struct *mm = current->mm;
  744. /*
  745. * Something tried to access memory that isn't in our memory map..
  746. * Fix it, but check if it's kernel or user first..
  747. */
  748. up_read(&mm->mmap_sem);
  749. __bad_area_nosemaphore(regs, error_code, address, vma, si_code);
  750. }
  751. static noinline void
  752. bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
  753. {
  754. __bad_area(regs, error_code, address, NULL, SEGV_MAPERR);
  755. }
  756. static inline bool bad_area_access_from_pkeys(unsigned long error_code,
  757. struct vm_area_struct *vma)
  758. {
  759. if (!boot_cpu_has(X86_FEATURE_OSPKE))
  760. return false;
  761. if (error_code & PF_PK)
  762. return true;
  763. return false;
  764. }
  765. static noinline void
  766. bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
  767. unsigned long address, struct vm_area_struct *vma)
  768. {
  769. /*
  770. * This OSPKE check is not strictly necessary at runtime.
  771. * But, doing it this way allows compiler optimizations
  772. * if pkeys are compiled out.
  773. */
  774. if (bad_area_access_from_pkeys(error_code, vma))
  775. __bad_area(regs, error_code, address, vma, SEGV_PKUERR);
  776. else
  777. __bad_area(regs, error_code, address, vma, SEGV_ACCERR);
  778. }
  779. static void
  780. do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
  781. struct vm_area_struct *vma, unsigned int fault)
  782. {
  783. struct task_struct *tsk = current;
  784. int code = BUS_ADRERR;
  785. /* Kernel mode? Handle exceptions or die: */
  786. if (!(error_code & PF_USER)) {
  787. no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
  788. return;
  789. }
  790. /* User-space => ok to do another page fault: */
  791. if (is_prefetch(regs, error_code, address))
  792. return;
  793. tsk->thread.cr2 = address;
  794. tsk->thread.error_code = error_code;
  795. tsk->thread.trap_nr = X86_TRAP_PF;
  796. #ifdef CONFIG_MEMORY_FAILURE
  797. if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
  798. printk(KERN_ERR
  799. "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
  800. tsk->comm, tsk->pid, address);
  801. code = BUS_MCEERR_AR;
  802. }
  803. #endif
  804. force_sig_info_fault(SIGBUS, code, address, tsk, vma, fault);
  805. }
  806. static noinline void
  807. mm_fault_error(struct pt_regs *regs, unsigned long error_code,
  808. unsigned long address, struct vm_area_struct *vma,
  809. unsigned int fault)
  810. {
  811. if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
  812. no_context(regs, error_code, address, 0, 0);
  813. return;
  814. }
  815. if (fault & VM_FAULT_OOM) {
  816. /* Kernel mode? Handle exceptions or die: */
  817. if (!(error_code & PF_USER)) {
  818. no_context(regs, error_code, address,
  819. SIGSEGV, SEGV_MAPERR);
  820. return;
  821. }
  822. /*
  823. * We ran out of memory, call the OOM killer, and return the
  824. * userspace (which will retry the fault, or kill us if we got
  825. * oom-killed):
  826. */
  827. pagefault_out_of_memory();
  828. } else {
  829. if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
  830. VM_FAULT_HWPOISON_LARGE))
  831. do_sigbus(regs, error_code, address, vma, fault);
  832. else if (fault & VM_FAULT_SIGSEGV)
  833. bad_area_nosemaphore(regs, error_code, address, vma);
  834. else
  835. BUG();
  836. }
  837. }
  838. static int spurious_fault_check(unsigned long error_code, pte_t *pte)
  839. {
  840. if ((error_code & PF_WRITE) && !pte_write(*pte))
  841. return 0;
  842. if ((error_code & PF_INSTR) && !pte_exec(*pte))
  843. return 0;
  844. /*
  845. * Note: We do not do lazy flushing on protection key
  846. * changes, so no spurious fault will ever set PF_PK.
  847. */
  848. if ((error_code & PF_PK))
  849. return 1;
  850. return 1;
  851. }
  852. /*
  853. * Handle a spurious fault caused by a stale TLB entry.
  854. *
  855. * This allows us to lazily refresh the TLB when increasing the
  856. * permissions of a kernel page (RO -> RW or NX -> X). Doing it
  857. * eagerly is very expensive since that implies doing a full
  858. * cross-processor TLB flush, even if no stale TLB entries exist
  859. * on other processors.
  860. *
  861. * Spurious faults may only occur if the TLB contains an entry with
  862. * fewer permission than the page table entry. Non-present (P = 0)
  863. * and reserved bit (R = 1) faults are never spurious.
  864. *
  865. * There are no security implications to leaving a stale TLB when
  866. * increasing the permissions on a page.
  867. *
  868. * Returns non-zero if a spurious fault was handled, zero otherwise.
  869. *
  870. * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3
  871. * (Optional Invalidation).
  872. */
  873. static noinline int
  874. spurious_fault(unsigned long error_code, unsigned long address)
  875. {
  876. pgd_t *pgd;
  877. pud_t *pud;
  878. pmd_t *pmd;
  879. pte_t *pte;
  880. int ret;
  881. /*
  882. * Only writes to RO or instruction fetches from NX may cause
  883. * spurious faults.
  884. *
  885. * These could be from user or supervisor accesses but the TLB
  886. * is only lazily flushed after a kernel mapping protection
  887. * change, so user accesses are not expected to cause spurious
  888. * faults.
  889. */
  890. if (error_code != (PF_WRITE | PF_PROT)
  891. && error_code != (PF_INSTR | PF_PROT))
  892. return 0;
  893. pgd = init_mm.pgd + pgd_index(address);
  894. if (!pgd_present(*pgd))
  895. return 0;
  896. pud = pud_offset(pgd, address);
  897. if (!pud_present(*pud))
  898. return 0;
  899. if (pud_large(*pud))
  900. return spurious_fault_check(error_code, (pte_t *) pud);
  901. pmd = pmd_offset(pud, address);
  902. if (!pmd_present(*pmd))
  903. return 0;
  904. if (pmd_large(*pmd))
  905. return spurious_fault_check(error_code, (pte_t *) pmd);
  906. pte = pte_offset_kernel(pmd, address);
  907. if (!pte_present(*pte))
  908. return 0;
  909. ret = spurious_fault_check(error_code, pte);
  910. if (!ret)
  911. return 0;
  912. /*
  913. * Make sure we have permissions in PMD.
  914. * If not, then there's a bug in the page tables:
  915. */
  916. ret = spurious_fault_check(error_code, (pte_t *) pmd);
  917. WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
  918. return ret;
  919. }
  920. NOKPROBE_SYMBOL(spurious_fault);
  921. int show_unhandled_signals = 1;
  922. static inline int
  923. access_error(unsigned long error_code, struct vm_area_struct *vma)
  924. {
  925. /*
  926. * Access or read was blocked by protection keys. We do
  927. * this check before any others because we do not want
  928. * to, for instance, confuse a protection-key-denied
  929. * write with one for which we should do a COW.
  930. */
  931. if (error_code & PF_PK)
  932. return 1;
  933. if (error_code & PF_WRITE) {
  934. /* write, present and write, not present: */
  935. if (unlikely(!(vma->vm_flags & VM_WRITE)))
  936. return 1;
  937. return 0;
  938. }
  939. /* read, present: */
  940. if (unlikely(error_code & PF_PROT))
  941. return 1;
  942. /* read, not present: */
  943. if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
  944. return 1;
  945. return 0;
  946. }
  947. static int fault_in_kernel_space(unsigned long address)
  948. {
  949. return address >= TASK_SIZE_MAX;
  950. }
  951. static inline bool smap_violation(int error_code, struct pt_regs *regs)
  952. {
  953. if (!IS_ENABLED(CONFIG_X86_SMAP))
  954. return false;
  955. if (!static_cpu_has(X86_FEATURE_SMAP))
  956. return false;
  957. if (error_code & PF_USER)
  958. return false;
  959. if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
  960. return false;
  961. return true;
  962. }
  963. /*
  964. * This routine handles page faults. It determines the address,
  965. * and the problem, and then passes it off to one of the appropriate
  966. * routines.
  967. *
  968. * This function must have noinline because both callers
  969. * {,trace_}do_page_fault() have notrace on. Having this an actual function
  970. * guarantees there's a function trace entry.
  971. */
  972. static noinline void
  973. __do_page_fault(struct pt_regs *regs, unsigned long error_code,
  974. unsigned long address)
  975. {
  976. struct vm_area_struct *vma;
  977. struct task_struct *tsk;
  978. struct mm_struct *mm;
  979. int fault, major = 0;
  980. unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
  981. tsk = current;
  982. mm = tsk->mm;
  983. /*
  984. * Detect and handle instructions that would cause a page fault for
  985. * both a tracked kernel page and a userspace page.
  986. */
  987. if (kmemcheck_active(regs))
  988. kmemcheck_hide(regs);
  989. prefetchw(&mm->mmap_sem);
  990. if (unlikely(kmmio_fault(regs, address)))
  991. return;
  992. /*
  993. * We fault-in kernel-space virtual memory on-demand. The
  994. * 'reference' page table is init_mm.pgd.
  995. *
  996. * NOTE! We MUST NOT take any locks for this case. We may
  997. * be in an interrupt or a critical region, and should
  998. * only copy the information from the master page table,
  999. * nothing more.
  1000. *
  1001. * This verifies that the fault happens in kernel space
  1002. * (error_code & 4) == 0, and that the fault was not a
  1003. * protection error (error_code & 9) == 0.
  1004. */
  1005. if (unlikely(fault_in_kernel_space(address))) {
  1006. if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
  1007. if (vmalloc_fault(address) >= 0)
  1008. return;
  1009. if (kmemcheck_fault(regs, address, error_code))
  1010. return;
  1011. }
  1012. /* Can handle a stale RO->RW TLB: */
  1013. if (spurious_fault(error_code, address))
  1014. return;
  1015. /* kprobes don't want to hook the spurious faults: */
  1016. if (kprobes_fault(regs))
  1017. return;
  1018. /*
  1019. * Don't take the mm semaphore here. If we fixup a prefetch
  1020. * fault we could otherwise deadlock:
  1021. */
  1022. bad_area_nosemaphore(regs, error_code, address, NULL);
  1023. return;
  1024. }
  1025. /* kprobes don't want to hook the spurious faults: */
  1026. if (unlikely(kprobes_fault(regs)))
  1027. return;
  1028. if (unlikely(error_code & PF_RSVD))
  1029. pgtable_bad(regs, error_code, address);
  1030. if (unlikely(smap_violation(error_code, regs))) {
  1031. bad_area_nosemaphore(regs, error_code, address, NULL);
  1032. return;
  1033. }
  1034. /*
  1035. * If we're in an interrupt, have no user context or are running
  1036. * in a region with pagefaults disabled then we must not take the fault
  1037. */
  1038. if (unlikely(faulthandler_disabled() || !mm)) {
  1039. bad_area_nosemaphore(regs, error_code, address, NULL);
  1040. return;
  1041. }
  1042. /*
  1043. * It's safe to allow irq's after cr2 has been saved and the
  1044. * vmalloc fault has been handled.
  1045. *
  1046. * User-mode registers count as a user access even for any
  1047. * potential system fault or CPU buglet:
  1048. */
  1049. if (user_mode(regs)) {
  1050. local_irq_enable();
  1051. error_code |= PF_USER;
  1052. flags |= FAULT_FLAG_USER;
  1053. } else {
  1054. if (regs->flags & X86_EFLAGS_IF)
  1055. local_irq_enable();
  1056. }
  1057. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
  1058. if (error_code & PF_WRITE)
  1059. flags |= FAULT_FLAG_WRITE;
  1060. /*
  1061. * When running in the kernel we expect faults to occur only to
  1062. * addresses in user space. All other faults represent errors in
  1063. * the kernel and should generate an OOPS. Unfortunately, in the
  1064. * case of an erroneous fault occurring in a code path which already
  1065. * holds mmap_sem we will deadlock attempting to validate the fault
  1066. * against the address space. Luckily the kernel only validly
  1067. * references user space from well defined areas of code, which are
  1068. * listed in the exceptions table.
  1069. *
  1070. * As the vast majority of faults will be valid we will only perform
  1071. * the source reference check when there is a possibility of a
  1072. * deadlock. Attempt to lock the address space, if we cannot we then
  1073. * validate the source. If this is invalid we can skip the address
  1074. * space check, thus avoiding the deadlock:
  1075. */
  1076. if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
  1077. if ((error_code & PF_USER) == 0 &&
  1078. !search_exception_tables(regs->ip)) {
  1079. bad_area_nosemaphore(regs, error_code, address, NULL);
  1080. return;
  1081. }
  1082. retry:
  1083. down_read(&mm->mmap_sem);
  1084. } else {
  1085. /*
  1086. * The above down_read_trylock() might have succeeded in
  1087. * which case we'll have missed the might_sleep() from
  1088. * down_read():
  1089. */
  1090. might_sleep();
  1091. }
  1092. vma = find_vma(mm, address);
  1093. if (unlikely(!vma)) {
  1094. bad_area(regs, error_code, address);
  1095. return;
  1096. }
  1097. if (likely(vma->vm_start <= address))
  1098. goto good_area;
  1099. if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
  1100. bad_area(regs, error_code, address);
  1101. return;
  1102. }
  1103. if (error_code & PF_USER) {
  1104. /*
  1105. * Accessing the stack below %sp is always a bug.
  1106. * The large cushion allows instructions like enter
  1107. * and pusha to work. ("enter $65535, $31" pushes
  1108. * 32 pointers and then decrements %sp by 65535.)
  1109. */
  1110. if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
  1111. bad_area(regs, error_code, address);
  1112. return;
  1113. }
  1114. }
  1115. if (unlikely(expand_stack(vma, address))) {
  1116. bad_area(regs, error_code, address);
  1117. return;
  1118. }
  1119. /*
  1120. * Ok, we have a good vm_area for this memory access, so
  1121. * we can handle it..
  1122. */
  1123. good_area:
  1124. if (unlikely(access_error(error_code, vma))) {
  1125. bad_area_access_error(regs, error_code, address, vma);
  1126. return;
  1127. }
  1128. /*
  1129. * If for any reason at all we couldn't handle the fault,
  1130. * make sure we exit gracefully rather than endlessly redo
  1131. * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if
  1132. * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
  1133. */
  1134. fault = handle_mm_fault(mm, vma, address, flags);
  1135. major |= fault & VM_FAULT_MAJOR;
  1136. /*
  1137. * If we need to retry the mmap_sem has already been released,
  1138. * and if there is a fatal signal pending there is no guarantee
  1139. * that we made any progress. Handle this case first.
  1140. */
  1141. if (unlikely(fault & VM_FAULT_RETRY)) {
  1142. /* Retry at most once */
  1143. if (flags & FAULT_FLAG_ALLOW_RETRY) {
  1144. flags &= ~FAULT_FLAG_ALLOW_RETRY;
  1145. flags |= FAULT_FLAG_TRIED;
  1146. if (!fatal_signal_pending(tsk))
  1147. goto retry;
  1148. }
  1149. /* User mode? Just return to handle the fatal exception */
  1150. if (flags & FAULT_FLAG_USER)
  1151. return;
  1152. /* Not returning to user mode? Handle exceptions or die: */
  1153. no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
  1154. return;
  1155. }
  1156. up_read(&mm->mmap_sem);
  1157. if (unlikely(fault & VM_FAULT_ERROR)) {
  1158. mm_fault_error(regs, error_code, address, vma, fault);
  1159. return;
  1160. }
  1161. /*
  1162. * Major/minor page fault accounting. If any of the events
  1163. * returned VM_FAULT_MAJOR, we account it as a major fault.
  1164. */
  1165. if (major) {
  1166. tsk->maj_flt++;
  1167. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
  1168. } else {
  1169. tsk->min_flt++;
  1170. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
  1171. }
  1172. check_v8086_mode(regs, address, tsk);
  1173. }
  1174. NOKPROBE_SYMBOL(__do_page_fault);
  1175. dotraplinkage void notrace
  1176. do_page_fault(struct pt_regs *regs, unsigned long error_code)
  1177. {
  1178. unsigned long address = read_cr2(); /* Get the faulting address */
  1179. enum ctx_state prev_state;
  1180. /*
  1181. * We must have this function tagged with __kprobes, notrace and call
  1182. * read_cr2() before calling anything else. To avoid calling any kind
  1183. * of tracing machinery before we've observed the CR2 value.
  1184. *
  1185. * exception_{enter,exit}() contain all sorts of tracepoints.
  1186. */
  1187. prev_state = exception_enter();
  1188. __do_page_fault(regs, error_code, address);
  1189. exception_exit(prev_state);
  1190. }
  1191. NOKPROBE_SYMBOL(do_page_fault);
  1192. #ifdef CONFIG_TRACING
  1193. static nokprobe_inline void
  1194. trace_page_fault_entries(unsigned long address, struct pt_regs *regs,
  1195. unsigned long error_code)
  1196. {
  1197. if (user_mode(regs))
  1198. trace_page_fault_user(address, regs, error_code);
  1199. else
  1200. trace_page_fault_kernel(address, regs, error_code);
  1201. }
  1202. dotraplinkage void notrace
  1203. trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
  1204. {
  1205. /*
  1206. * The exception_enter and tracepoint processing could
  1207. * trigger another page faults (user space callchain
  1208. * reading) and destroy the original cr2 value, so read
  1209. * the faulting address now.
  1210. */
  1211. unsigned long address = read_cr2();
  1212. enum ctx_state prev_state;
  1213. prev_state = exception_enter();
  1214. trace_page_fault_entries(address, regs, error_code);
  1215. __do_page_fault(regs, error_code, address);
  1216. exception_exit(prev_state);
  1217. }
  1218. NOKPROBE_SYMBOL(trace_do_page_fault);
  1219. #endif /* CONFIG_TRACING */