fault.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576
  1. /*
  2. * PowerPC version
  3. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4. *
  5. * Derived from "arch/i386/mm/fault.c"
  6. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  7. *
  8. * Modified by Cort Dougan and Paul Mackerras.
  9. *
  10. * Modified for PPC64 by Dave Engebretsen (engebret@ibm.com)
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * as published by the Free Software Foundation; either version
  15. * 2 of the License, or (at your option) any later version.
  16. */
  17. #include <linux/signal.h>
  18. #include <linux/sched.h>
  19. #include <linux/sched/task_stack.h>
  20. #include <linux/kernel.h>
  21. #include <linux/errno.h>
  22. #include <linux/string.h>
  23. #include <linux/types.h>
  24. #include <linux/ptrace.h>
  25. #include <linux/mman.h>
  26. #include <linux/mm.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/highmem.h>
  29. #include <linux/extable.h>
  30. #include <linux/kprobes.h>
  31. #include <linux/kdebug.h>
  32. #include <linux/perf_event.h>
  33. #include <linux/ratelimit.h>
  34. #include <linux/context_tracking.h>
  35. #include <linux/hugetlb.h>
  36. #include <linux/uaccess.h>
  37. #include <asm/firmware.h>
  38. #include <asm/page.h>
  39. #include <asm/pgtable.h>
  40. #include <asm/mmu.h>
  41. #include <asm/mmu_context.h>
  42. #include <asm/tlbflush.h>
  43. #include <asm/siginfo.h>
  44. #include <asm/debug.h>
  45. #include "icswx.h"
  46. #ifdef CONFIG_KPROBES
  47. static inline int notify_page_fault(struct pt_regs *regs)
  48. {
  49. int ret = 0;
  50. /* kprobe_running() needs smp_processor_id() */
  51. if (!user_mode(regs)) {
  52. preempt_disable();
  53. if (kprobe_running() && kprobe_fault_handler(regs, 11))
  54. ret = 1;
  55. preempt_enable();
  56. }
  57. return ret;
  58. }
  59. #else
  60. static inline int notify_page_fault(struct pt_regs *regs)
  61. {
  62. return 0;
  63. }
  64. #endif
  65. /*
  66. * Check whether the instruction at regs->nip is a store using
  67. * an update addressing form which will update r1.
  68. */
  69. static int store_updates_sp(struct pt_regs *regs)
  70. {
  71. unsigned int inst;
  72. if (get_user(inst, (unsigned int __user *)regs->nip))
  73. return 0;
  74. /* check for 1 in the rA field */
  75. if (((inst >> 16) & 0x1f) != 1)
  76. return 0;
  77. /* check major opcode */
  78. switch (inst >> 26) {
  79. case 37: /* stwu */
  80. case 39: /* stbu */
  81. case 45: /* sthu */
  82. case 53: /* stfsu */
  83. case 55: /* stfdu */
  84. return 1;
  85. case 62: /* std or stdu */
  86. return (inst & 3) == 1;
  87. case 31:
  88. /* check minor opcode */
  89. switch ((inst >> 1) & 0x3ff) {
  90. case 181: /* stdux */
  91. case 183: /* stwux */
  92. case 247: /* stbux */
  93. case 439: /* sthux */
  94. case 695: /* stfsux */
  95. case 759: /* stfdux */
  96. return 1;
  97. }
  98. }
  99. return 0;
  100. }
  101. /*
  102. * do_page_fault error handling helpers
  103. */
  104. #define MM_FAULT_RETURN 0
  105. #define MM_FAULT_CONTINUE -1
  106. #define MM_FAULT_ERR(sig) (sig)
  107. static int do_sigbus(struct pt_regs *regs, unsigned long address,
  108. unsigned int fault)
  109. {
  110. siginfo_t info;
  111. unsigned int lsb = 0;
  112. up_read(&current->mm->mmap_sem);
  113. if (!user_mode(regs))
  114. return MM_FAULT_ERR(SIGBUS);
  115. current->thread.trap_nr = BUS_ADRERR;
  116. info.si_signo = SIGBUS;
  117. info.si_errno = 0;
  118. info.si_code = BUS_ADRERR;
  119. info.si_addr = (void __user *)address;
  120. #ifdef CONFIG_MEMORY_FAILURE
  121. if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
  122. pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
  123. current->comm, current->pid, address);
  124. info.si_code = BUS_MCEERR_AR;
  125. }
  126. if (fault & VM_FAULT_HWPOISON_LARGE)
  127. lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
  128. if (fault & VM_FAULT_HWPOISON)
  129. lsb = PAGE_SHIFT;
  130. #endif
  131. info.si_addr_lsb = lsb;
  132. force_sig_info(SIGBUS, &info, current);
  133. return MM_FAULT_RETURN;
  134. }
  135. static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
  136. {
  137. /*
  138. * Pagefault was interrupted by SIGKILL. We have no reason to
  139. * continue the pagefault.
  140. */
  141. if (fatal_signal_pending(current)) {
  142. /*
  143. * If we have retry set, the mmap semaphore will have
  144. * alrady been released in __lock_page_or_retry(). Else
  145. * we release it now.
  146. */
  147. if (!(fault & VM_FAULT_RETRY))
  148. up_read(&current->mm->mmap_sem);
  149. /* Coming from kernel, we need to deal with uaccess fixups */
  150. if (user_mode(regs))
  151. return MM_FAULT_RETURN;
  152. return MM_FAULT_ERR(SIGKILL);
  153. }
  154. /* No fault: be happy */
  155. if (!(fault & VM_FAULT_ERROR))
  156. return MM_FAULT_CONTINUE;
  157. /* Out of memory */
  158. if (fault & VM_FAULT_OOM) {
  159. up_read(&current->mm->mmap_sem);
  160. /*
  161. * We ran out of memory, or some other thing happened to us that
  162. * made us unable to handle the page fault gracefully.
  163. */
  164. if (!user_mode(regs))
  165. return MM_FAULT_ERR(SIGKILL);
  166. pagefault_out_of_memory();
  167. return MM_FAULT_RETURN;
  168. }
  169. if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE))
  170. return do_sigbus(regs, addr, fault);
  171. /* We don't understand the fault code, this is fatal */
  172. BUG();
  173. return MM_FAULT_CONTINUE;
  174. }
  175. /*
  176. * For 600- and 800-family processors, the error_code parameter is DSISR
  177. * for a data fault, SRR1 for an instruction fault. For 400-family processors
  178. * the error_code parameter is ESR for a data fault, 0 for an instruction
  179. * fault.
  180. * For 64-bit processors, the error_code parameter is
  181. * - DSISR for a non-SLB data access fault,
  182. * - SRR1 & 0x08000000 for a non-SLB instruction access fault
  183. * - 0 any SLB fault.
  184. *
  185. * The return value is 0 if the fault was handled, or the signal
  186. * number if this is a kernel fault that can't be handled here.
  187. */
  188. int do_page_fault(struct pt_regs *regs, unsigned long address,
  189. unsigned long error_code)
  190. {
  191. enum ctx_state prev_state = exception_enter();
  192. struct vm_area_struct * vma;
  193. struct mm_struct *mm = current->mm;
  194. unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
  195. int code = SEGV_MAPERR;
  196. int is_write = 0;
  197. int trap = TRAP(regs);
  198. int is_exec = trap == 0x400;
  199. int fault;
  200. int rc = 0, store_update_sp = 0;
  201. #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
  202. /*
  203. * Fortunately the bit assignments in SRR1 for an instruction
  204. * fault and DSISR for a data fault are mostly the same for the
  205. * bits we are interested in. But there are some bits which
  206. * indicate errors in DSISR but can validly be set in SRR1.
  207. */
  208. if (trap == 0x400)
  209. error_code &= 0x48200000;
  210. else
  211. is_write = error_code & DSISR_ISSTORE;
  212. #else
  213. is_write = error_code & ESR_DST;
  214. #endif /* CONFIG_4xx || CONFIG_BOOKE */
  215. #ifdef CONFIG_PPC_ICSWX
  216. /*
  217. * we need to do this early because this "data storage
  218. * interrupt" does not update the DAR/DEAR so we don't want to
  219. * look at it
  220. */
  221. if (error_code & ICSWX_DSI_UCT) {
  222. rc = acop_handle_fault(regs, address, error_code);
  223. if (rc)
  224. goto bail;
  225. }
  226. #endif /* CONFIG_PPC_ICSWX */
  227. if (notify_page_fault(regs))
  228. goto bail;
  229. if (unlikely(debugger_fault_handler(regs)))
  230. goto bail;
  231. /*
  232. * The kernel should never take an execute fault nor should it
  233. * take a page fault to a kernel address.
  234. */
  235. if (!user_mode(regs) && (is_exec || (address >= TASK_SIZE))) {
  236. rc = SIGSEGV;
  237. goto bail;
  238. }
  239. #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
  240. defined(CONFIG_PPC_BOOK3S_64))
  241. if (error_code & DSISR_DABRMATCH) {
  242. /* breakpoint match */
  243. do_break(regs, address, error_code);
  244. goto bail;
  245. }
  246. #endif
  247. /* We restore the interrupt state now */
  248. if (!arch_irq_disabled_regs(regs))
  249. local_irq_enable();
  250. if (faulthandler_disabled() || mm == NULL) {
  251. if (!user_mode(regs)) {
  252. rc = SIGSEGV;
  253. goto bail;
  254. }
  255. /* faulthandler_disabled() in user mode is really bad,
  256. as is current->mm == NULL. */
  257. printk(KERN_EMERG "Page fault in user mode with "
  258. "faulthandler_disabled() = %d mm = %p\n",
  259. faulthandler_disabled(), mm);
  260. printk(KERN_EMERG "NIP = %lx MSR = %lx\n",
  261. regs->nip, regs->msr);
  262. die("Weird page fault", regs, SIGSEGV);
  263. }
  264. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
  265. /*
  266. * We want to do this outside mmap_sem, because reading code around nip
  267. * can result in fault, which will cause a deadlock when called with
  268. * mmap_sem held
  269. */
  270. if (user_mode(regs))
  271. store_update_sp = store_updates_sp(regs);
  272. if (user_mode(regs))
  273. flags |= FAULT_FLAG_USER;
  274. /* When running in the kernel we expect faults to occur only to
  275. * addresses in user space. All other faults represent errors in the
  276. * kernel and should generate an OOPS. Unfortunately, in the case of an
  277. * erroneous fault occurring in a code path which already holds mmap_sem
  278. * we will deadlock attempting to validate the fault against the
  279. * address space. Luckily the kernel only validly references user
  280. * space from well defined areas of code, which are listed in the
  281. * exceptions table.
  282. *
  283. * As the vast majority of faults will be valid we will only perform
  284. * the source reference check when there is a possibility of a deadlock.
  285. * Attempt to lock the address space, if we cannot we then validate the
  286. * source. If this is invalid we can skip the address space check,
  287. * thus avoiding the deadlock.
  288. */
  289. if (!down_read_trylock(&mm->mmap_sem)) {
  290. if (!user_mode(regs) && !search_exception_tables(regs->nip))
  291. goto bad_area_nosemaphore;
  292. retry:
  293. down_read(&mm->mmap_sem);
  294. } else {
  295. /*
  296. * The above down_read_trylock() might have succeeded in
  297. * which case we'll have missed the might_sleep() from
  298. * down_read():
  299. */
  300. might_sleep();
  301. }
  302. vma = find_vma(mm, address);
  303. if (!vma)
  304. goto bad_area;
  305. if (vma->vm_start <= address)
  306. goto good_area;
  307. if (!(vma->vm_flags & VM_GROWSDOWN))
  308. goto bad_area;
  309. /*
  310. * N.B. The POWER/Open ABI allows programs to access up to
  311. * 288 bytes below the stack pointer.
  312. * The kernel signal delivery code writes up to about 1.5kB
  313. * below the stack pointer (r1) before decrementing it.
  314. * The exec code can write slightly over 640kB to the stack
  315. * before setting the user r1. Thus we allow the stack to
  316. * expand to 1MB without further checks.
  317. */
  318. if (address + 0x100000 < vma->vm_end) {
  319. /* get user regs even if this fault is in kernel mode */
  320. struct pt_regs *uregs = current->thread.regs;
  321. if (uregs == NULL)
  322. goto bad_area;
  323. /*
  324. * A user-mode access to an address a long way below
  325. * the stack pointer is only valid if the instruction
  326. * is one which would update the stack pointer to the
  327. * address accessed if the instruction completed,
  328. * i.e. either stwu rs,n(r1) or stwux rs,r1,rb
  329. * (or the byte, halfword, float or double forms).
  330. *
  331. * If we don't check this then any write to the area
  332. * between the last mapped region and the stack will
  333. * expand the stack rather than segfaulting.
  334. */
  335. if (address + 2048 < uregs->gpr[1] && !store_update_sp)
  336. goto bad_area;
  337. }
  338. if (expand_stack(vma, address))
  339. goto bad_area;
  340. good_area:
  341. code = SEGV_ACCERR;
  342. #if defined(CONFIG_6xx)
  343. if (error_code & 0x95700000)
  344. /* an error such as lwarx to I/O controller space,
  345. address matching DABR, eciwx, etc. */
  346. goto bad_area;
  347. #endif /* CONFIG_6xx */
  348. #if defined(CONFIG_8xx)
  349. /* The MPC8xx seems to always set 0x80000000, which is
  350. * "undefined". Of those that can be set, this is the only
  351. * one which seems bad.
  352. */
  353. if (error_code & 0x10000000)
  354. /* Guarded storage error. */
  355. goto bad_area;
  356. #endif /* CONFIG_8xx */
  357. if (is_exec) {
  358. /*
  359. * Allow execution from readable areas if the MMU does not
  360. * provide separate controls over reading and executing.
  361. *
  362. * Note: That code used to not be enabled for 4xx/BookE.
  363. * It is now as I/D cache coherency for these is done at
  364. * set_pte_at() time and I see no reason why the test
  365. * below wouldn't be valid on those processors. This -may-
  366. * break programs compiled with a really old ABI though.
  367. */
  368. if (!(vma->vm_flags & VM_EXEC) &&
  369. (cpu_has_feature(CPU_FTR_NOEXECUTE) ||
  370. !(vma->vm_flags & (VM_READ | VM_WRITE))))
  371. goto bad_area;
  372. /* a write */
  373. } else if (is_write) {
  374. if (!(vma->vm_flags & VM_WRITE))
  375. goto bad_area;
  376. flags |= FAULT_FLAG_WRITE;
  377. /* a read */
  378. } else {
  379. if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
  380. goto bad_area;
  381. }
  382. #ifdef CONFIG_PPC_STD_MMU
  383. /*
  384. * For hash translation mode, we should never get a
  385. * PROTFAULT. Any update to pte to reduce access will result in us
  386. * removing the hash page table entry, thus resulting in a DSISR_NOHPTE
  387. * fault instead of DSISR_PROTFAULT.
  388. *
  389. * A pte update to relax the access will not result in a hash page table
  390. * entry invalidate and hence can result in DSISR_PROTFAULT.
  391. * ptep_set_access_flags() doesn't do a hpte flush. This is why we have
  392. * the special !is_write in the below conditional.
  393. *
  394. * For platforms that doesn't supports coherent icache and do support
  395. * per page noexec bit, we do setup things such that we do the
  396. * sync between D/I cache via fault. But that is handled via low level
  397. * hash fault code (hash_page_do_lazy_icache()) and we should not reach
  398. * here in such case.
  399. *
  400. * For wrong access that can result in PROTFAULT, the above vma->vm_flags
  401. * check should handle those and hence we should fall to the bad_area
  402. * handling correctly.
  403. *
  404. * For embedded with per page exec support that doesn't support coherent
  405. * icache we do get PROTFAULT and we handle that D/I cache sync in
  406. * set_pte_at while taking the noexec/prot fault. Hence this is WARN_ON
  407. * is conditional for server MMU.
  408. *
  409. * For radix, we can get prot fault for autonuma case, because radix
  410. * page table will have them marked noaccess for user.
  411. */
  412. if (!radix_enabled() && !is_write)
  413. WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
  414. #endif /* CONFIG_PPC_STD_MMU */
  415. /*
  416. * If for any reason at all we couldn't handle the fault,
  417. * make sure we exit gracefully rather than endlessly redo
  418. * the fault.
  419. */
  420. fault = handle_mm_fault(vma, address, flags);
  421. if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
  422. if (fault & VM_FAULT_SIGSEGV)
  423. goto bad_area;
  424. rc = mm_fault_error(regs, address, fault);
  425. if (rc >= MM_FAULT_RETURN)
  426. goto bail;
  427. else
  428. rc = 0;
  429. }
  430. /*
  431. * Major/minor page fault accounting is only done on the
  432. * initial attempt. If we go through a retry, it is extremely
  433. * likely that the page will be found in page cache at that point.
  434. */
  435. if (flags & FAULT_FLAG_ALLOW_RETRY) {
  436. if (fault & VM_FAULT_MAJOR) {
  437. current->maj_flt++;
  438. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
  439. regs, address);
  440. #ifdef CONFIG_PPC_SMLPAR
  441. if (firmware_has_feature(FW_FEATURE_CMO)) {
  442. u32 page_ins;
  443. preempt_disable();
  444. page_ins = be32_to_cpu(get_lppaca()->page_ins);
  445. page_ins += 1 << PAGE_FACTOR;
  446. get_lppaca()->page_ins = cpu_to_be32(page_ins);
  447. preempt_enable();
  448. }
  449. #endif /* CONFIG_PPC_SMLPAR */
  450. } else {
  451. current->min_flt++;
  452. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
  453. regs, address);
  454. }
  455. if (fault & VM_FAULT_RETRY) {
  456. /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
  457. * of starvation. */
  458. flags &= ~FAULT_FLAG_ALLOW_RETRY;
  459. flags |= FAULT_FLAG_TRIED;
  460. goto retry;
  461. }
  462. }
  463. up_read(&mm->mmap_sem);
  464. goto bail;
  465. bad_area:
  466. up_read(&mm->mmap_sem);
  467. bad_area_nosemaphore:
  468. /* User mode accesses cause a SIGSEGV */
  469. if (user_mode(regs)) {
  470. _exception(SIGSEGV, regs, code, address);
  471. goto bail;
  472. }
  473. if (is_exec && (error_code & DSISR_PROTFAULT))
  474. printk_ratelimited(KERN_CRIT "kernel tried to execute NX-protected"
  475. " page (%lx) - exploit attempt? (uid: %d)\n",
  476. address, from_kuid(&init_user_ns, current_uid()));
  477. rc = SIGSEGV;
  478. bail:
  479. exception_exit(prev_state);
  480. return rc;
  481. }
  482. NOKPROBE_SYMBOL(do_page_fault);
  483. /*
  484. * bad_page_fault is called when we have a bad access from the kernel.
  485. * It is called from the DSI and ISI handlers in head.S and from some
  486. * of the procedures in traps.c.
  487. */
  488. void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
  489. {
  490. const struct exception_table_entry *entry;
  491. /* Are we prepared to handle this fault? */
  492. if ((entry = search_exception_tables(regs->nip)) != NULL) {
  493. regs->nip = extable_fixup(entry);
  494. return;
  495. }
  496. /* kernel has accessed a bad area */
  497. switch (regs->trap) {
  498. case 0x300:
  499. case 0x380:
  500. printk(KERN_ALERT "Unable to handle kernel paging request for "
  501. "data at address 0x%08lx\n", regs->dar);
  502. break;
  503. case 0x400:
  504. case 0x480:
  505. printk(KERN_ALERT "Unable to handle kernel paging request for "
  506. "instruction fetch\n");
  507. break;
  508. case 0x600:
  509. printk(KERN_ALERT "Unable to handle kernel paging request for "
  510. "unaligned access at address 0x%08lx\n", regs->dar);
  511. break;
  512. default:
  513. printk(KERN_ALERT "Unable to handle kernel paging request for "
  514. "unknown fault\n");
  515. break;
  516. }
  517. printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
  518. regs->nip);
  519. if (task_stack_end_corrupted(current))
  520. printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
  521. die("Kernel access of bad area", regs, sig);
  522. }