fault.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 1995 Linus Torvalds
  4. * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
  5. * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
  6. */
  7. #include <linux/sched.h> /* test_thread_flag(), ... */
  8. #include <linux/sched/task_stack.h> /* task_stack_*(), ... */
  9. #include <linux/kdebug.h> /* oops_begin/end, ... */
  10. #include <linux/extable.h> /* search_exception_tables */
  11. #include <linux/bootmem.h> /* max_low_pfn */
  12. #include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */
  13. #include <linux/mmiotrace.h> /* kmmio_handler, ... */
  14. #include <linux/perf_event.h> /* perf_sw_event */
  15. #include <linux/hugetlb.h> /* hstate_index_to_shift */
  16. #include <linux/prefetch.h> /* prefetchw */
  17. #include <linux/context_tracking.h> /* exception_enter(), ... */
  18. #include <linux/uaccess.h> /* faulthandler_disabled() */
  19. #include <asm/cpufeature.h> /* boot_cpu_has, ... */
  20. #include <asm/traps.h> /* dotraplinkage, ... */
  21. #include <asm/pgalloc.h> /* pgd_*(), ... */
  22. #include <asm/fixmap.h> /* VSYSCALL_ADDR */
  23. #include <asm/vsyscall.h> /* emulate_vsyscall */
  24. #include <asm/vm86.h> /* struct vm86 */
  25. #include <asm/mmu_context.h> /* vma_pkey() */
  26. #define CREATE_TRACE_POINTS
  27. #include <asm/trace/exceptions.h>
  28. /*
  29. * Returns 0 if mmiotrace is disabled, or if the fault is not
  30. * handled by mmiotrace:
  31. */
  32. static nokprobe_inline int
  33. kmmio_fault(struct pt_regs *regs, unsigned long addr)
  34. {
  35. if (unlikely(is_kmmio_active()))
  36. if (kmmio_handler(regs, addr) == 1)
  37. return -1;
  38. return 0;
  39. }
  40. static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
  41. {
  42. int ret = 0;
  43. /* kprobe_running() needs smp_processor_id() */
  44. if (kprobes_built_in() && !user_mode(regs)) {
  45. preempt_disable();
  46. if (kprobe_running() && kprobe_fault_handler(regs, 14))
  47. ret = 1;
  48. preempt_enable();
  49. }
  50. return ret;
  51. }
  52. /*
  53. * Prefetch quirks:
  54. *
  55. * 32-bit mode:
  56. *
  57. * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
  58. * Check that here and ignore it.
  59. *
  60. * 64-bit mode:
  61. *
  62. * Sometimes the CPU reports invalid exceptions on prefetch.
  63. * Check that here and ignore it.
  64. *
  65. * Opcode checker based on code by Richard Brunner.
  66. */
  67. static inline int
  68. check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
  69. unsigned char opcode, int *prefetch)
  70. {
  71. unsigned char instr_hi = opcode & 0xf0;
  72. unsigned char instr_lo = opcode & 0x0f;
  73. switch (instr_hi) {
  74. case 0x20:
  75. case 0x30:
  76. /*
  77. * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
  78. * In X86_64 long mode, the CPU will signal invalid
  79. * opcode if some of these prefixes are present so
  80. * X86_64 will never get here anyway
  81. */
  82. return ((instr_lo & 7) == 0x6);
  83. #ifdef CONFIG_X86_64
  84. case 0x40:
  85. /*
  86. * In AMD64 long mode 0x40..0x4F are valid REX prefixes
  87. * Need to figure out under what instruction mode the
  88. * instruction was issued. Could check the LDT for lm,
  89. * but for now it's good enough to assume that long
  90. * mode only uses well known segments or kernel.
  91. */
  92. return (!user_mode(regs) || user_64bit_mode(regs));
  93. #endif
  94. case 0x60:
  95. /* 0x64 thru 0x67 are valid prefixes in all modes. */
  96. return (instr_lo & 0xC) == 0x4;
  97. case 0xF0:
  98. /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
  99. return !instr_lo || (instr_lo>>1) == 1;
  100. case 0x00:
  101. /* Prefetch instruction is 0x0F0D or 0x0F18 */
  102. if (probe_kernel_address(instr, opcode))
  103. return 0;
  104. *prefetch = (instr_lo == 0xF) &&
  105. (opcode == 0x0D || opcode == 0x18);
  106. return 0;
  107. default:
  108. return 0;
  109. }
  110. }
  111. static int
  112. is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
  113. {
  114. unsigned char *max_instr;
  115. unsigned char *instr;
  116. int prefetch = 0;
  117. /*
  118. * If it was a exec (instruction fetch) fault on NX page, then
  119. * do not ignore the fault:
  120. */
  121. if (error_code & X86_PF_INSTR)
  122. return 0;
  123. instr = (void *)convert_ip_to_linear(current, regs);
  124. max_instr = instr + 15;
  125. if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX)
  126. return 0;
  127. while (instr < max_instr) {
  128. unsigned char opcode;
  129. if (probe_kernel_address(instr, opcode))
  130. break;
  131. instr++;
  132. if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
  133. break;
  134. }
  135. return prefetch;
  136. }
  137. /*
  138. * A protection key fault means that the PKRU value did not allow
  139. * access to some PTE. Userspace can figure out what PKRU was
  140. * from the XSAVE state, and this function fills out a field in
  141. * siginfo so userspace can discover which protection key was set
  142. * on the PTE.
  143. *
  144. * If we get here, we know that the hardware signaled a X86_PF_PK
  145. * fault and that there was a VMA once we got in the fault
  146. * handler. It does *not* guarantee that the VMA we find here
  147. * was the one that we faulted on.
  148. *
  149. * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4);
  150. * 2. T1 : set PKRU to deny access to pkey=4, touches page
  151. * 3. T1 : faults...
  152. * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
  153. * 5. T1 : enters fault handler, takes mmap_sem, etc...
  154. * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
  155. * faulted on a pte with its pkey=4.
  156. */
  157. static void fill_sig_info_pkey(int si_signo, int si_code, siginfo_t *info,
  158. u32 *pkey)
  159. {
  160. /* This is effectively an #ifdef */
  161. if (!boot_cpu_has(X86_FEATURE_OSPKE))
  162. return;
  163. /* Fault not from Protection Keys: nothing to do */
  164. if ((si_code != SEGV_PKUERR) || (si_signo != SIGSEGV))
  165. return;
  166. /*
  167. * force_sig_info_fault() is called from a number of
  168. * contexts, some of which have a VMA and some of which
  169. * do not. The X86_PF_PK handing happens after we have a
  170. * valid VMA, so we should never reach this without a
  171. * valid VMA.
  172. */
  173. if (!pkey) {
  174. WARN_ONCE(1, "PKU fault with no VMA passed in");
  175. info->si_pkey = 0;
  176. return;
  177. }
  178. /*
  179. * si_pkey should be thought of as a strong hint, but not
  180. * absolutely guranteed to be 100% accurate because of
  181. * the race explained above.
  182. */
  183. info->si_pkey = *pkey;
  184. }
  185. static void
  186. force_sig_info_fault(int si_signo, int si_code, unsigned long address,
  187. struct task_struct *tsk, u32 *pkey, int fault)
  188. {
  189. unsigned lsb = 0;
  190. siginfo_t info;
  191. clear_siginfo(&info);
  192. info.si_signo = si_signo;
  193. info.si_errno = 0;
  194. info.si_code = si_code;
  195. info.si_addr = (void __user *)address;
  196. if (fault & VM_FAULT_HWPOISON_LARGE)
  197. lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
  198. if (fault & VM_FAULT_HWPOISON)
  199. lsb = PAGE_SHIFT;
  200. info.si_addr_lsb = lsb;
  201. fill_sig_info_pkey(si_signo, si_code, &info, pkey);
  202. force_sig_info(si_signo, &info, tsk);
  203. }
  204. DEFINE_SPINLOCK(pgd_lock);
  205. LIST_HEAD(pgd_list);
  206. #ifdef CONFIG_X86_32
  207. static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
  208. {
  209. unsigned index = pgd_index(address);
  210. pgd_t *pgd_k;
  211. p4d_t *p4d, *p4d_k;
  212. pud_t *pud, *pud_k;
  213. pmd_t *pmd, *pmd_k;
  214. pgd += index;
  215. pgd_k = init_mm.pgd + index;
  216. if (!pgd_present(*pgd_k))
  217. return NULL;
  218. /*
  219. * set_pgd(pgd, *pgd_k); here would be useless on PAE
  220. * and redundant with the set_pmd() on non-PAE. As would
  221. * set_p4d/set_pud.
  222. */
  223. p4d = p4d_offset(pgd, address);
  224. p4d_k = p4d_offset(pgd_k, address);
  225. if (!p4d_present(*p4d_k))
  226. return NULL;
  227. pud = pud_offset(p4d, address);
  228. pud_k = pud_offset(p4d_k, address);
  229. if (!pud_present(*pud_k))
  230. return NULL;
  231. pmd = pmd_offset(pud, address);
  232. pmd_k = pmd_offset(pud_k, address);
  233. if (!pmd_present(*pmd_k))
  234. return NULL;
  235. if (!pmd_present(*pmd))
  236. set_pmd(pmd, *pmd_k);
  237. else
  238. BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
  239. return pmd_k;
  240. }
  241. void vmalloc_sync_all(void)
  242. {
  243. unsigned long address;
  244. if (SHARED_KERNEL_PMD)
  245. return;
  246. for (address = VMALLOC_START & PMD_MASK;
  247. address >= TASK_SIZE_MAX && address < FIXADDR_TOP;
  248. address += PMD_SIZE) {
  249. struct page *page;
  250. spin_lock(&pgd_lock);
  251. list_for_each_entry(page, &pgd_list, lru) {
  252. spinlock_t *pgt_lock;
  253. pmd_t *ret;
  254. /* the pgt_lock only for Xen */
  255. pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
  256. spin_lock(pgt_lock);
  257. ret = vmalloc_sync_one(page_address(page), address);
  258. spin_unlock(pgt_lock);
  259. if (!ret)
  260. break;
  261. }
  262. spin_unlock(&pgd_lock);
  263. }
  264. }
  265. /*
  266. * 32-bit:
  267. *
  268. * Handle a fault on the vmalloc or module mapping area
  269. */
  270. static noinline int vmalloc_fault(unsigned long address)
  271. {
  272. unsigned long pgd_paddr;
  273. pmd_t *pmd_k;
  274. pte_t *pte_k;
  275. /* Make sure we are in vmalloc area: */
  276. if (!(address >= VMALLOC_START && address < VMALLOC_END))
  277. return -1;
  278. /*
  279. * Synchronize this task's top level page-table
  280. * with the 'reference' page table.
  281. *
  282. * Do _not_ use "current" here. We might be inside
  283. * an interrupt in the middle of a task switch..
  284. */
  285. pgd_paddr = read_cr3_pa();
  286. pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
  287. if (!pmd_k)
  288. return -1;
  289. if (pmd_large(*pmd_k))
  290. return 0;
  291. pte_k = pte_offset_kernel(pmd_k, address);
  292. if (!pte_present(*pte_k))
  293. return -1;
  294. return 0;
  295. }
  296. NOKPROBE_SYMBOL(vmalloc_fault);
  297. /*
  298. * Did it hit the DOS screen memory VA from vm86 mode?
  299. */
  300. static inline void
  301. check_v8086_mode(struct pt_regs *regs, unsigned long address,
  302. struct task_struct *tsk)
  303. {
  304. #ifdef CONFIG_VM86
  305. unsigned long bit;
  306. if (!v8086_mode(regs) || !tsk->thread.vm86)
  307. return;
  308. bit = (address - 0xA0000) >> PAGE_SHIFT;
  309. if (bit < 32)
  310. tsk->thread.vm86->screen_bitmap |= 1 << bit;
  311. #endif
  312. }
  313. static bool low_pfn(unsigned long pfn)
  314. {
  315. return pfn < max_low_pfn;
  316. }
  317. static void dump_pagetable(unsigned long address)
  318. {
  319. pgd_t *base = __va(read_cr3_pa());
  320. pgd_t *pgd = &base[pgd_index(address)];
  321. p4d_t *p4d;
  322. pud_t *pud;
  323. pmd_t *pmd;
  324. pte_t *pte;
  325. #ifdef CONFIG_X86_PAE
  326. pr_info("*pdpt = %016Lx ", pgd_val(*pgd));
  327. if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
  328. goto out;
  329. #define pr_pde pr_cont
  330. #else
  331. #define pr_pde pr_info
  332. #endif
  333. p4d = p4d_offset(pgd, address);
  334. pud = pud_offset(p4d, address);
  335. pmd = pmd_offset(pud, address);
  336. pr_pde("*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
  337. #undef pr_pde
  338. /*
  339. * We must not directly access the pte in the highpte
  340. * case if the page table is located in highmem.
  341. * And let's rather not kmap-atomic the pte, just in case
  342. * it's allocated already:
  343. */
  344. if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
  345. goto out;
  346. pte = pte_offset_kernel(pmd, address);
  347. pr_cont("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
  348. out:
  349. pr_cont("\n");
  350. }
  351. #else /* CONFIG_X86_64: */
  352. void vmalloc_sync_all(void)
  353. {
  354. sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
  355. }
  356. /*
  357. * 64-bit:
  358. *
  359. * Handle a fault on the vmalloc area
  360. */
  361. static noinline int vmalloc_fault(unsigned long address)
  362. {
  363. pgd_t *pgd, *pgd_k;
  364. p4d_t *p4d, *p4d_k;
  365. pud_t *pud;
  366. pmd_t *pmd;
  367. pte_t *pte;
  368. /* Make sure we are in vmalloc area: */
  369. if (!(address >= VMALLOC_START && address < VMALLOC_END))
  370. return -1;
  371. WARN_ON_ONCE(in_nmi());
  372. /*
  373. * Copy kernel mappings over when needed. This can also
  374. * happen within a race in page table update. In the later
  375. * case just flush:
  376. */
  377. pgd = (pgd_t *)__va(read_cr3_pa()) + pgd_index(address);
  378. pgd_k = pgd_offset_k(address);
  379. if (pgd_none(*pgd_k))
  380. return -1;
  381. if (pgtable_l5_enabled()) {
  382. if (pgd_none(*pgd)) {
  383. set_pgd(pgd, *pgd_k);
  384. arch_flush_lazy_mmu_mode();
  385. } else {
  386. BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_k));
  387. }
  388. }
  389. /* With 4-level paging, copying happens on the p4d level. */
  390. p4d = p4d_offset(pgd, address);
  391. p4d_k = p4d_offset(pgd_k, address);
  392. if (p4d_none(*p4d_k))
  393. return -1;
  394. if (p4d_none(*p4d) && !pgtable_l5_enabled()) {
  395. set_p4d(p4d, *p4d_k);
  396. arch_flush_lazy_mmu_mode();
  397. } else {
  398. BUG_ON(p4d_pfn(*p4d) != p4d_pfn(*p4d_k));
  399. }
  400. BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4);
  401. pud = pud_offset(p4d, address);
  402. if (pud_none(*pud))
  403. return -1;
  404. if (pud_large(*pud))
  405. return 0;
  406. pmd = pmd_offset(pud, address);
  407. if (pmd_none(*pmd))
  408. return -1;
  409. if (pmd_large(*pmd))
  410. return 0;
  411. pte = pte_offset_kernel(pmd, address);
  412. if (!pte_present(*pte))
  413. return -1;
  414. return 0;
  415. }
  416. NOKPROBE_SYMBOL(vmalloc_fault);
  417. #ifdef CONFIG_CPU_SUP_AMD
  418. static const char errata93_warning[] =
  419. KERN_ERR
  420. "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
  421. "******* Working around it, but it may cause SEGVs or burn power.\n"
  422. "******* Please consider a BIOS update.\n"
  423. "******* Disabling USB legacy in the BIOS may also help.\n";
  424. #endif
  425. /*
  426. * No vm86 mode in 64-bit mode:
  427. */
  428. static inline void
  429. check_v8086_mode(struct pt_regs *regs, unsigned long address,
  430. struct task_struct *tsk)
  431. {
  432. }
  433. static int bad_address(void *p)
  434. {
  435. unsigned long dummy;
  436. return probe_kernel_address((unsigned long *)p, dummy);
  437. }
  438. static void dump_pagetable(unsigned long address)
  439. {
  440. pgd_t *base = __va(read_cr3_pa());
  441. pgd_t *pgd = base + pgd_index(address);
  442. p4d_t *p4d;
  443. pud_t *pud;
  444. pmd_t *pmd;
  445. pte_t *pte;
  446. if (bad_address(pgd))
  447. goto bad;
  448. pr_info("PGD %lx ", pgd_val(*pgd));
  449. if (!pgd_present(*pgd))
  450. goto out;
  451. p4d = p4d_offset(pgd, address);
  452. if (bad_address(p4d))
  453. goto bad;
  454. pr_cont("P4D %lx ", p4d_val(*p4d));
  455. if (!p4d_present(*p4d) || p4d_large(*p4d))
  456. goto out;
  457. pud = pud_offset(p4d, address);
  458. if (bad_address(pud))
  459. goto bad;
  460. pr_cont("PUD %lx ", pud_val(*pud));
  461. if (!pud_present(*pud) || pud_large(*pud))
  462. goto out;
  463. pmd = pmd_offset(pud, address);
  464. if (bad_address(pmd))
  465. goto bad;
  466. pr_cont("PMD %lx ", pmd_val(*pmd));
  467. if (!pmd_present(*pmd) || pmd_large(*pmd))
  468. goto out;
  469. pte = pte_offset_kernel(pmd, address);
  470. if (bad_address(pte))
  471. goto bad;
  472. pr_cont("PTE %lx", pte_val(*pte));
  473. out:
  474. pr_cont("\n");
  475. return;
  476. bad:
  477. pr_info("BAD\n");
  478. }
  479. #endif /* CONFIG_X86_64 */
  480. /*
  481. * Workaround for K8 erratum #93 & buggy BIOS.
  482. *
  483. * BIOS SMM functions are required to use a specific workaround
  484. * to avoid corruption of the 64bit RIP register on C stepping K8.
  485. *
  486. * A lot of BIOS that didn't get tested properly miss this.
  487. *
  488. * The OS sees this as a page fault with the upper 32bits of RIP cleared.
  489. * Try to work around it here.
  490. *
  491. * Note we only handle faults in kernel here.
  492. * Does nothing on 32-bit.
  493. */
  494. static int is_errata93(struct pt_regs *regs, unsigned long address)
  495. {
  496. #if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
  497. if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
  498. || boot_cpu_data.x86 != 0xf)
  499. return 0;
  500. if (address != regs->ip)
  501. return 0;
  502. if ((address >> 32) != 0)
  503. return 0;
  504. address |= 0xffffffffUL << 32;
  505. if ((address >= (u64)_stext && address <= (u64)_etext) ||
  506. (address >= MODULES_VADDR && address <= MODULES_END)) {
  507. printk_once(errata93_warning);
  508. regs->ip = address;
  509. return 1;
  510. }
  511. #endif
  512. return 0;
  513. }
  514. /*
  515. * Work around K8 erratum #100 K8 in compat mode occasionally jumps
  516. * to illegal addresses >4GB.
  517. *
  518. * We catch this in the page fault handler because these addresses
  519. * are not reachable. Just detect this case and return. Any code
  520. * segment in LDT is compatibility mode.
  521. */
  522. static int is_errata100(struct pt_regs *regs, unsigned long address)
  523. {
  524. #ifdef CONFIG_X86_64
  525. if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
  526. return 1;
  527. #endif
  528. return 0;
  529. }
  530. static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
  531. {
  532. #ifdef CONFIG_X86_F00F_BUG
  533. unsigned long nr;
  534. /*
  535. * Pentium F0 0F C7 C8 bug workaround:
  536. */
  537. if (boot_cpu_has_bug(X86_BUG_F00F)) {
  538. nr = (address - idt_descr.address) >> 3;
  539. if (nr == 6) {
  540. do_invalid_op(regs, 0);
  541. return 1;
  542. }
  543. }
  544. #endif
  545. return 0;
  546. }
  547. static void
  548. show_fault_oops(struct pt_regs *regs, unsigned long error_code,
  549. unsigned long address)
  550. {
  551. if (!oops_may_print())
  552. return;
  553. if (error_code & X86_PF_INSTR) {
  554. unsigned int level;
  555. pgd_t *pgd;
  556. pte_t *pte;
  557. pgd = __va(read_cr3_pa());
  558. pgd += pgd_index(address);
  559. pte = lookup_address_in_pgd(pgd, address, &level);
  560. if (pte && pte_present(*pte) && !pte_exec(*pte))
  561. pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n",
  562. from_kuid(&init_user_ns, current_uid()));
  563. if (pte && pte_present(*pte) && pte_exec(*pte) &&
  564. (pgd_flags(*pgd) & _PAGE_USER) &&
  565. (__read_cr4() & X86_CR4_SMEP))
  566. pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n",
  567. from_kuid(&init_user_ns, current_uid()));
  568. }
  569. pr_alert("BUG: unable to handle kernel %s at %px\n",
  570. address < PAGE_SIZE ? "NULL pointer dereference" : "paging request",
  571. (void *)address);
  572. dump_pagetable(address);
  573. }
  574. static noinline void
  575. pgtable_bad(struct pt_regs *regs, unsigned long error_code,
  576. unsigned long address)
  577. {
  578. struct task_struct *tsk;
  579. unsigned long flags;
  580. int sig;
  581. flags = oops_begin();
  582. tsk = current;
  583. sig = SIGKILL;
  584. printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
  585. tsk->comm, address);
  586. dump_pagetable(address);
  587. tsk->thread.cr2 = address;
  588. tsk->thread.trap_nr = X86_TRAP_PF;
  589. tsk->thread.error_code = error_code;
  590. if (__die("Bad pagetable", regs, error_code))
  591. sig = 0;
  592. oops_end(flags, regs, sig);
  593. }
  594. static noinline void
  595. no_context(struct pt_regs *regs, unsigned long error_code,
  596. unsigned long address, int signal, int si_code)
  597. {
  598. struct task_struct *tsk = current;
  599. unsigned long flags;
  600. int sig;
  601. /* Are we prepared to handle this kernel fault? */
  602. if (fixup_exception(regs, X86_TRAP_PF)) {
  603. /*
  604. * Any interrupt that takes a fault gets the fixup. This makes
  605. * the below recursive fault logic only apply to a faults from
  606. * task context.
  607. */
  608. if (in_interrupt())
  609. return;
  610. /*
  611. * Per the above we're !in_interrupt(), aka. task context.
  612. *
  613. * In this case we need to make sure we're not recursively
  614. * faulting through the emulate_vsyscall() logic.
  615. */
  616. if (current->thread.sig_on_uaccess_err && signal) {
  617. tsk->thread.trap_nr = X86_TRAP_PF;
  618. tsk->thread.error_code = error_code | X86_PF_USER;
  619. tsk->thread.cr2 = address;
  620. /* XXX: hwpoison faults will set the wrong code. */
  621. force_sig_info_fault(signal, si_code, address,
  622. tsk, NULL, 0);
  623. }
  624. /*
  625. * Barring that, we can do the fixup and be happy.
  626. */
  627. return;
  628. }
  629. #ifdef CONFIG_VMAP_STACK
  630. /*
  631. * Stack overflow? During boot, we can fault near the initial
  632. * stack in the direct map, but that's not an overflow -- check
  633. * that we're in vmalloc space to avoid this.
  634. */
  635. if (is_vmalloc_addr((void *)address) &&
  636. (((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) ||
  637. address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) {
  638. unsigned long stack = this_cpu_read(orig_ist.ist[DOUBLEFAULT_STACK]) - sizeof(void *);
  639. /*
  640. * We're likely to be running with very little stack space
  641. * left. It's plausible that we'd hit this condition but
  642. * double-fault even before we get this far, in which case
  643. * we're fine: the double-fault handler will deal with it.
  644. *
  645. * We don't want to make it all the way into the oops code
  646. * and then double-fault, though, because we're likely to
  647. * break the console driver and lose most of the stack dump.
  648. */
  649. asm volatile ("movq %[stack], %%rsp\n\t"
  650. "call handle_stack_overflow\n\t"
  651. "1: jmp 1b"
  652. : ASM_CALL_CONSTRAINT
  653. : "D" ("kernel stack overflow (page fault)"),
  654. "S" (regs), "d" (address),
  655. [stack] "rm" (stack));
  656. unreachable();
  657. }
  658. #endif
  659. /*
  660. * 32-bit:
  661. *
  662. * Valid to do another page fault here, because if this fault
  663. * had been triggered by is_prefetch fixup_exception would have
  664. * handled it.
  665. *
  666. * 64-bit:
  667. *
  668. * Hall of shame of CPU/BIOS bugs.
  669. */
  670. if (is_prefetch(regs, error_code, address))
  671. return;
  672. if (is_errata93(regs, address))
  673. return;
  674. /*
  675. * Oops. The kernel tried to access some bad page. We'll have to
  676. * terminate things with extreme prejudice:
  677. */
  678. flags = oops_begin();
  679. show_fault_oops(regs, error_code, address);
  680. if (task_stack_end_corrupted(tsk))
  681. printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
  682. tsk->thread.cr2 = address;
  683. tsk->thread.trap_nr = X86_TRAP_PF;
  684. tsk->thread.error_code = error_code;
  685. sig = SIGKILL;
  686. if (__die("Oops", regs, error_code))
  687. sig = 0;
  688. /* Executive summary in case the body of the oops scrolled away */
  689. printk(KERN_DEFAULT "CR2: %016lx\n", address);
  690. oops_end(flags, regs, sig);
  691. }
  692. /*
  693. * Print out info about fatal segfaults, if the show_unhandled_signals
  694. * sysctl is set:
  695. */
  696. static inline void
  697. show_signal_msg(struct pt_regs *regs, unsigned long error_code,
  698. unsigned long address, struct task_struct *tsk)
  699. {
  700. const char *loglvl = task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG;
  701. if (!unhandled_signal(tsk, SIGSEGV))
  702. return;
  703. if (!printk_ratelimit())
  704. return;
  705. printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx",
  706. loglvl, tsk->comm, task_pid_nr(tsk), address,
  707. (void *)regs->ip, (void *)regs->sp, error_code);
  708. print_vma_addr(KERN_CONT " in ", regs->ip);
  709. printk(KERN_CONT "\n");
  710. show_opcodes((u8 *)regs->ip, loglvl);
  711. }
  712. static void
  713. __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
  714. unsigned long address, u32 *pkey, int si_code)
  715. {
  716. struct task_struct *tsk = current;
  717. /* User mode accesses just cause a SIGSEGV */
  718. if (error_code & X86_PF_USER) {
  719. /*
  720. * It's possible to have interrupts off here:
  721. */
  722. local_irq_enable();
  723. /*
  724. * Valid to do another page fault here because this one came
  725. * from user space:
  726. */
  727. if (is_prefetch(regs, error_code, address))
  728. return;
  729. if (is_errata100(regs, address))
  730. return;
  731. #ifdef CONFIG_X86_64
  732. /*
  733. * Instruction fetch faults in the vsyscall page might need
  734. * emulation.
  735. */
  736. if (unlikely((error_code & X86_PF_INSTR) &&
  737. ((address & ~0xfff) == VSYSCALL_ADDR))) {
  738. if (emulate_vsyscall(regs, address))
  739. return;
  740. }
  741. #endif
  742. /*
  743. * To avoid leaking information about the kernel page table
  744. * layout, pretend that user-mode accesses to kernel addresses
  745. * are always protection faults.
  746. */
  747. if (address >= TASK_SIZE_MAX)
  748. error_code |= X86_PF_PROT;
  749. if (likely(show_unhandled_signals))
  750. show_signal_msg(regs, error_code, address, tsk);
  751. tsk->thread.cr2 = address;
  752. tsk->thread.error_code = error_code;
  753. tsk->thread.trap_nr = X86_TRAP_PF;
  754. force_sig_info_fault(SIGSEGV, si_code, address, tsk, pkey, 0);
  755. return;
  756. }
  757. if (is_f00f_bug(regs, address))
  758. return;
  759. no_context(regs, error_code, address, SIGSEGV, si_code);
  760. }
  761. static noinline void
  762. bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
  763. unsigned long address, u32 *pkey)
  764. {
  765. __bad_area_nosemaphore(regs, error_code, address, pkey, SEGV_MAPERR);
  766. }
  767. static void
  768. __bad_area(struct pt_regs *regs, unsigned long error_code,
  769. unsigned long address, struct vm_area_struct *vma, int si_code)
  770. {
  771. struct mm_struct *mm = current->mm;
  772. u32 pkey;
  773. if (vma)
  774. pkey = vma_pkey(vma);
  775. /*
  776. * Something tried to access memory that isn't in our memory map..
  777. * Fix it, but check if it's kernel or user first..
  778. */
  779. up_read(&mm->mmap_sem);
  780. __bad_area_nosemaphore(regs, error_code, address,
  781. (vma) ? &pkey : NULL, si_code);
  782. }
  783. static noinline void
  784. bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
  785. {
  786. __bad_area(regs, error_code, address, NULL, SEGV_MAPERR);
  787. }
  788. static inline bool bad_area_access_from_pkeys(unsigned long error_code,
  789. struct vm_area_struct *vma)
  790. {
  791. /* This code is always called on the current mm */
  792. bool foreign = false;
  793. if (!boot_cpu_has(X86_FEATURE_OSPKE))
  794. return false;
  795. if (error_code & X86_PF_PK)
  796. return true;
  797. /* this checks permission keys on the VMA: */
  798. if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
  799. (error_code & X86_PF_INSTR), foreign))
  800. return true;
  801. return false;
  802. }
  803. static noinline void
  804. bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
  805. unsigned long address, struct vm_area_struct *vma)
  806. {
  807. /*
  808. * This OSPKE check is not strictly necessary at runtime.
  809. * But, doing it this way allows compiler optimizations
  810. * if pkeys are compiled out.
  811. */
  812. if (bad_area_access_from_pkeys(error_code, vma))
  813. __bad_area(regs, error_code, address, vma, SEGV_PKUERR);
  814. else
  815. __bad_area(regs, error_code, address, vma, SEGV_ACCERR);
  816. }
  817. static void
  818. do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
  819. u32 *pkey, unsigned int fault)
  820. {
  821. struct task_struct *tsk = current;
  822. int code = BUS_ADRERR;
  823. /* Kernel mode? Handle exceptions or die: */
  824. if (!(error_code & X86_PF_USER)) {
  825. no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
  826. return;
  827. }
  828. /* User-space => ok to do another page fault: */
  829. if (is_prefetch(regs, error_code, address))
  830. return;
  831. tsk->thread.cr2 = address;
  832. tsk->thread.error_code = error_code;
  833. tsk->thread.trap_nr = X86_TRAP_PF;
  834. #ifdef CONFIG_MEMORY_FAILURE
  835. if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
  836. printk(KERN_ERR
  837. "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
  838. tsk->comm, tsk->pid, address);
  839. code = BUS_MCEERR_AR;
  840. }
  841. #endif
  842. force_sig_info_fault(SIGBUS, code, address, tsk, pkey, fault);
  843. }
  844. static noinline void
  845. mm_fault_error(struct pt_regs *regs, unsigned long error_code,
  846. unsigned long address, u32 *pkey, unsigned int fault)
  847. {
  848. if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) {
  849. no_context(regs, error_code, address, 0, 0);
  850. return;
  851. }
  852. if (fault & VM_FAULT_OOM) {
  853. /* Kernel mode? Handle exceptions or die: */
  854. if (!(error_code & X86_PF_USER)) {
  855. no_context(regs, error_code, address,
  856. SIGSEGV, SEGV_MAPERR);
  857. return;
  858. }
  859. /*
  860. * We ran out of memory, call the OOM killer, and return the
  861. * userspace (which will retry the fault, or kill us if we got
  862. * oom-killed):
  863. */
  864. pagefault_out_of_memory();
  865. } else {
  866. if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
  867. VM_FAULT_HWPOISON_LARGE))
  868. do_sigbus(regs, error_code, address, pkey, fault);
  869. else if (fault & VM_FAULT_SIGSEGV)
  870. bad_area_nosemaphore(regs, error_code, address, pkey);
  871. else
  872. BUG();
  873. }
  874. }
  875. static int spurious_fault_check(unsigned long error_code, pte_t *pte)
  876. {
  877. if ((error_code & X86_PF_WRITE) && !pte_write(*pte))
  878. return 0;
  879. if ((error_code & X86_PF_INSTR) && !pte_exec(*pte))
  880. return 0;
  881. /*
  882. * Note: We do not do lazy flushing on protection key
  883. * changes, so no spurious fault will ever set X86_PF_PK.
  884. */
  885. if ((error_code & X86_PF_PK))
  886. return 1;
  887. return 1;
  888. }
  889. /*
  890. * Handle a spurious fault caused by a stale TLB entry.
  891. *
  892. * This allows us to lazily refresh the TLB when increasing the
  893. * permissions of a kernel page (RO -> RW or NX -> X). Doing it
  894. * eagerly is very expensive since that implies doing a full
  895. * cross-processor TLB flush, even if no stale TLB entries exist
  896. * on other processors.
  897. *
  898. * Spurious faults may only occur if the TLB contains an entry with
  899. * fewer permission than the page table entry. Non-present (P = 0)
  900. * and reserved bit (R = 1) faults are never spurious.
  901. *
  902. * There are no security implications to leaving a stale TLB when
  903. * increasing the permissions on a page.
  904. *
  905. * Returns non-zero if a spurious fault was handled, zero otherwise.
  906. *
  907. * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3
  908. * (Optional Invalidation).
  909. */
  910. static noinline int
  911. spurious_fault(unsigned long error_code, unsigned long address)
  912. {
  913. pgd_t *pgd;
  914. p4d_t *p4d;
  915. pud_t *pud;
  916. pmd_t *pmd;
  917. pte_t *pte;
  918. int ret;
  919. /*
  920. * Only writes to RO or instruction fetches from NX may cause
  921. * spurious faults.
  922. *
  923. * These could be from user or supervisor accesses but the TLB
  924. * is only lazily flushed after a kernel mapping protection
  925. * change, so user accesses are not expected to cause spurious
  926. * faults.
  927. */
  928. if (error_code != (X86_PF_WRITE | X86_PF_PROT) &&
  929. error_code != (X86_PF_INSTR | X86_PF_PROT))
  930. return 0;
  931. pgd = init_mm.pgd + pgd_index(address);
  932. if (!pgd_present(*pgd))
  933. return 0;
  934. p4d = p4d_offset(pgd, address);
  935. if (!p4d_present(*p4d))
  936. return 0;
  937. if (p4d_large(*p4d))
  938. return spurious_fault_check(error_code, (pte_t *) p4d);
  939. pud = pud_offset(p4d, address);
  940. if (!pud_present(*pud))
  941. return 0;
  942. if (pud_large(*pud))
  943. return spurious_fault_check(error_code, (pte_t *) pud);
  944. pmd = pmd_offset(pud, address);
  945. if (!pmd_present(*pmd))
  946. return 0;
  947. if (pmd_large(*pmd))
  948. return spurious_fault_check(error_code, (pte_t *) pmd);
  949. pte = pte_offset_kernel(pmd, address);
  950. if (!pte_present(*pte))
  951. return 0;
  952. ret = spurious_fault_check(error_code, pte);
  953. if (!ret)
  954. return 0;
  955. /*
  956. * Make sure we have permissions in PMD.
  957. * If not, then there's a bug in the page tables:
  958. */
  959. ret = spurious_fault_check(error_code, (pte_t *) pmd);
  960. WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
  961. return ret;
  962. }
  963. NOKPROBE_SYMBOL(spurious_fault);
  964. int show_unhandled_signals = 1;
  965. static inline int
  966. access_error(unsigned long error_code, struct vm_area_struct *vma)
  967. {
  968. /* This is only called for the current mm, so: */
  969. bool foreign = false;
  970. /*
  971. * Read or write was blocked by protection keys. This is
  972. * always an unconditional error and can never result in
  973. * a follow-up action to resolve the fault, like a COW.
  974. */
  975. if (error_code & X86_PF_PK)
  976. return 1;
  977. /*
  978. * Make sure to check the VMA so that we do not perform
  979. * faults just to hit a X86_PF_PK as soon as we fill in a
  980. * page.
  981. */
  982. if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
  983. (error_code & X86_PF_INSTR), foreign))
  984. return 1;
  985. if (error_code & X86_PF_WRITE) {
  986. /* write, present and write, not present: */
  987. if (unlikely(!(vma->vm_flags & VM_WRITE)))
  988. return 1;
  989. return 0;
  990. }
  991. /* read, present: */
  992. if (unlikely(error_code & X86_PF_PROT))
  993. return 1;
  994. /* read, not present: */
  995. if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
  996. return 1;
  997. return 0;
  998. }
  999. static int fault_in_kernel_space(unsigned long address)
  1000. {
  1001. return address >= TASK_SIZE_MAX;
  1002. }
  1003. static inline bool smap_violation(int error_code, struct pt_regs *regs)
  1004. {
  1005. if (!IS_ENABLED(CONFIG_X86_SMAP))
  1006. return false;
  1007. if (!static_cpu_has(X86_FEATURE_SMAP))
  1008. return false;
  1009. if (error_code & X86_PF_USER)
  1010. return false;
  1011. if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
  1012. return false;
  1013. return true;
  1014. }
  1015. /*
  1016. * This routine handles page faults. It determines the address,
  1017. * and the problem, and then passes it off to one of the appropriate
  1018. * routines.
  1019. */
  1020. static noinline void
  1021. __do_page_fault(struct pt_regs *regs, unsigned long error_code,
  1022. unsigned long address)
  1023. {
  1024. struct vm_area_struct *vma;
  1025. struct task_struct *tsk;
  1026. struct mm_struct *mm;
  1027. int fault, major = 0;
  1028. unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
  1029. u32 pkey;
  1030. tsk = current;
  1031. mm = tsk->mm;
  1032. prefetchw(&mm->mmap_sem);
  1033. if (unlikely(kmmio_fault(regs, address)))
  1034. return;
  1035. /*
  1036. * We fault-in kernel-space virtual memory on-demand. The
  1037. * 'reference' page table is init_mm.pgd.
  1038. *
  1039. * NOTE! We MUST NOT take any locks for this case. We may
  1040. * be in an interrupt or a critical region, and should
  1041. * only copy the information from the master page table,
  1042. * nothing more.
  1043. *
  1044. * This verifies that the fault happens in kernel space
  1045. * (error_code & 4) == 0, and that the fault was not a
  1046. * protection error (error_code & 9) == 0.
  1047. */
  1048. if (unlikely(fault_in_kernel_space(address))) {
  1049. if (!(error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
  1050. if (vmalloc_fault(address) >= 0)
  1051. return;
  1052. }
  1053. /* Can handle a stale RO->RW TLB: */
  1054. if (spurious_fault(error_code, address))
  1055. return;
  1056. /* kprobes don't want to hook the spurious faults: */
  1057. if (kprobes_fault(regs))
  1058. return;
  1059. /*
  1060. * Don't take the mm semaphore here. If we fixup a prefetch
  1061. * fault we could otherwise deadlock:
  1062. */
  1063. bad_area_nosemaphore(regs, error_code, address, NULL);
  1064. return;
  1065. }
  1066. /* kprobes don't want to hook the spurious faults: */
  1067. if (unlikely(kprobes_fault(regs)))
  1068. return;
  1069. if (unlikely(error_code & X86_PF_RSVD))
  1070. pgtable_bad(regs, error_code, address);
  1071. if (unlikely(smap_violation(error_code, regs))) {
  1072. bad_area_nosemaphore(regs, error_code, address, NULL);
  1073. return;
  1074. }
  1075. /*
  1076. * If we're in an interrupt, have no user context or are running
  1077. * in a region with pagefaults disabled then we must not take the fault
  1078. */
  1079. if (unlikely(faulthandler_disabled() || !mm)) {
  1080. bad_area_nosemaphore(regs, error_code, address, NULL);
  1081. return;
  1082. }
  1083. /*
  1084. * It's safe to allow irq's after cr2 has been saved and the
  1085. * vmalloc fault has been handled.
  1086. *
  1087. * User-mode registers count as a user access even for any
  1088. * potential system fault or CPU buglet:
  1089. */
  1090. if (user_mode(regs)) {
  1091. local_irq_enable();
  1092. error_code |= X86_PF_USER;
  1093. flags |= FAULT_FLAG_USER;
  1094. } else {
  1095. if (regs->flags & X86_EFLAGS_IF)
  1096. local_irq_enable();
  1097. }
  1098. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
  1099. if (error_code & X86_PF_WRITE)
  1100. flags |= FAULT_FLAG_WRITE;
  1101. if (error_code & X86_PF_INSTR)
  1102. flags |= FAULT_FLAG_INSTRUCTION;
  1103. /*
  1104. * When running in the kernel we expect faults to occur only to
  1105. * addresses in user space. All other faults represent errors in
  1106. * the kernel and should generate an OOPS. Unfortunately, in the
  1107. * case of an erroneous fault occurring in a code path which already
  1108. * holds mmap_sem we will deadlock attempting to validate the fault
  1109. * against the address space. Luckily the kernel only validly
  1110. * references user space from well defined areas of code, which are
  1111. * listed in the exceptions table.
  1112. *
  1113. * As the vast majority of faults will be valid we will only perform
  1114. * the source reference check when there is a possibility of a
  1115. * deadlock. Attempt to lock the address space, if we cannot we then
  1116. * validate the source. If this is invalid we can skip the address
  1117. * space check, thus avoiding the deadlock:
  1118. */
  1119. if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
  1120. if (!(error_code & X86_PF_USER) &&
  1121. !search_exception_tables(regs->ip)) {
  1122. bad_area_nosemaphore(regs, error_code, address, NULL);
  1123. return;
  1124. }
  1125. retry:
  1126. down_read(&mm->mmap_sem);
  1127. } else {
  1128. /*
  1129. * The above down_read_trylock() might have succeeded in
  1130. * which case we'll have missed the might_sleep() from
  1131. * down_read():
  1132. */
  1133. might_sleep();
  1134. }
  1135. vma = find_vma(mm, address);
  1136. if (unlikely(!vma)) {
  1137. bad_area(regs, error_code, address);
  1138. return;
  1139. }
  1140. if (likely(vma->vm_start <= address))
  1141. goto good_area;
  1142. if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
  1143. bad_area(regs, error_code, address);
  1144. return;
  1145. }
  1146. if (error_code & X86_PF_USER) {
  1147. /*
  1148. * Accessing the stack below %sp is always a bug.
  1149. * The large cushion allows instructions like enter
  1150. * and pusha to work. ("enter $65535, $31" pushes
  1151. * 32 pointers and then decrements %sp by 65535.)
  1152. */
  1153. if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
  1154. bad_area(regs, error_code, address);
  1155. return;
  1156. }
  1157. }
  1158. if (unlikely(expand_stack(vma, address))) {
  1159. bad_area(regs, error_code, address);
  1160. return;
  1161. }
  1162. /*
  1163. * Ok, we have a good vm_area for this memory access, so
  1164. * we can handle it..
  1165. */
  1166. good_area:
  1167. if (unlikely(access_error(error_code, vma))) {
  1168. bad_area_access_error(regs, error_code, address, vma);
  1169. return;
  1170. }
  1171. /*
  1172. * If for any reason at all we couldn't handle the fault,
  1173. * make sure we exit gracefully rather than endlessly redo
  1174. * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if
  1175. * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
  1176. *
  1177. * Note that handle_userfault() may also release and reacquire mmap_sem
  1178. * (and not return with VM_FAULT_RETRY), when returning to userland to
  1179. * repeat the page fault later with a VM_FAULT_NOPAGE retval
  1180. * (potentially after handling any pending signal during the return to
  1181. * userland). The return to userland is identified whenever
  1182. * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
  1183. * Thus we have to be careful about not touching vma after handling the
  1184. * fault, so we read the pkey beforehand.
  1185. */
  1186. pkey = vma_pkey(vma);
  1187. fault = handle_mm_fault(vma, address, flags);
  1188. major |= fault & VM_FAULT_MAJOR;
  1189. /*
  1190. * If we need to retry the mmap_sem has already been released,
  1191. * and if there is a fatal signal pending there is no guarantee
  1192. * that we made any progress. Handle this case first.
  1193. */
  1194. if (unlikely(fault & VM_FAULT_RETRY)) {
  1195. /* Retry at most once */
  1196. if (flags & FAULT_FLAG_ALLOW_RETRY) {
  1197. flags &= ~FAULT_FLAG_ALLOW_RETRY;
  1198. flags |= FAULT_FLAG_TRIED;
  1199. if (!fatal_signal_pending(tsk))
  1200. goto retry;
  1201. }
  1202. /* User mode? Just return to handle the fatal exception */
  1203. if (flags & FAULT_FLAG_USER)
  1204. return;
  1205. /* Not returning to user mode? Handle exceptions or die: */
  1206. no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
  1207. return;
  1208. }
  1209. up_read(&mm->mmap_sem);
  1210. if (unlikely(fault & VM_FAULT_ERROR)) {
  1211. mm_fault_error(regs, error_code, address, &pkey, fault);
  1212. return;
  1213. }
  1214. /*
  1215. * Major/minor page fault accounting. If any of the events
  1216. * returned VM_FAULT_MAJOR, we account it as a major fault.
  1217. */
  1218. if (major) {
  1219. tsk->maj_flt++;
  1220. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
  1221. } else {
  1222. tsk->min_flt++;
  1223. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
  1224. }
  1225. check_v8086_mode(regs, address, tsk);
  1226. }
  1227. NOKPROBE_SYMBOL(__do_page_fault);
  1228. static nokprobe_inline void
  1229. trace_page_fault_entries(unsigned long address, struct pt_regs *regs,
  1230. unsigned long error_code)
  1231. {
  1232. if (user_mode(regs))
  1233. trace_page_fault_user(address, regs, error_code);
  1234. else
  1235. trace_page_fault_kernel(address, regs, error_code);
  1236. }
  1237. /*
  1238. * We must have this function blacklisted from kprobes, tagged with notrace
  1239. * and call read_cr2() before calling anything else. To avoid calling any
  1240. * kind of tracing machinery before we've observed the CR2 value.
  1241. *
  1242. * exception_{enter,exit}() contains all sorts of tracepoints.
  1243. */
  1244. dotraplinkage void notrace
  1245. do_page_fault(struct pt_regs *regs, unsigned long error_code)
  1246. {
  1247. unsigned long address = read_cr2(); /* Get the faulting address */
  1248. enum ctx_state prev_state;
  1249. prev_state = exception_enter();
  1250. if (trace_pagefault_enabled())
  1251. trace_page_fault_entries(address, regs, error_code);
  1252. __do_page_fault(regs, error_code, address);
  1253. exception_exit(prev_state);
  1254. }
  1255. NOKPROBE_SYMBOL(do_page_fault);