traps.c 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918
  1. /*
  2. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  3. * Copyright 2007-2010 Freescale Semiconductor, Inc.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; either version
  8. * 2 of the License, or (at your option) any later version.
  9. *
  10. * Modified by Cort Dougan (cort@cs.nmt.edu)
  11. * and Paul Mackerras (paulus@samba.org)
  12. */
  13. /*
  14. * This file handles the architecture-dependent parts of hardware exceptions
  15. */
  16. #include <linux/errno.h>
  17. #include <linux/sched.h>
  18. #include <linux/kernel.h>
  19. #include <linux/mm.h>
  20. #include <linux/stddef.h>
  21. #include <linux/unistd.h>
  22. #include <linux/ptrace.h>
  23. #include <linux/user.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/init.h>
  26. #include <linux/module.h>
  27. #include <linux/prctl.h>
  28. #include <linux/delay.h>
  29. #include <linux/kprobes.h>
  30. #include <linux/kexec.h>
  31. #include <linux/backlight.h>
  32. #include <linux/bug.h>
  33. #include <linux/kdebug.h>
  34. #include <linux/debugfs.h>
  35. #include <linux/ratelimit.h>
  36. #include <linux/context_tracking.h>
  37. #include <asm/emulated_ops.h>
  38. #include <asm/pgtable.h>
  39. #include <asm/uaccess.h>
  40. #include <asm/io.h>
  41. #include <asm/machdep.h>
  42. #include <asm/rtas.h>
  43. #include <asm/pmc.h>
  44. #include <asm/reg.h>
  45. #ifdef CONFIG_PMAC_BACKLIGHT
  46. #include <asm/backlight.h>
  47. #endif
  48. #ifdef CONFIG_PPC64
  49. #include <asm/firmware.h>
  50. #include <asm/processor.h>
  51. #include <asm/tm.h>
  52. #endif
  53. #include <asm/kexec.h>
  54. #include <asm/ppc-opcode.h>
  55. #include <asm/rio.h>
  56. #include <asm/fadump.h>
  57. #include <asm/switch_to.h>
  58. #include <asm/tm.h>
  59. #include <asm/debug.h>
  60. #include <sysdev/fsl_pci.h>
  61. #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
  62. int (*__debugger)(struct pt_regs *regs) __read_mostly;
  63. int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
  64. int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
  65. int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
  66. int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
  67. int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly;
  68. int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
  69. EXPORT_SYMBOL(__debugger);
  70. EXPORT_SYMBOL(__debugger_ipi);
  71. EXPORT_SYMBOL(__debugger_bpt);
  72. EXPORT_SYMBOL(__debugger_sstep);
  73. EXPORT_SYMBOL(__debugger_iabr_match);
  74. EXPORT_SYMBOL(__debugger_break_match);
  75. EXPORT_SYMBOL(__debugger_fault_handler);
  76. #endif
  77. /* Transactional Memory trap debug */
  78. #ifdef TM_DEBUG_SW
  79. #define TM_DEBUG(x...) printk(KERN_INFO x)
  80. #else
  81. #define TM_DEBUG(x...) do { } while(0)
  82. #endif
  83. /*
  84. * Trap & Exception support
  85. */
  86. #ifdef CONFIG_PMAC_BACKLIGHT
  87. static void pmac_backlight_unblank(void)
  88. {
  89. mutex_lock(&pmac_backlight_mutex);
  90. if (pmac_backlight) {
  91. struct backlight_properties *props;
  92. props = &pmac_backlight->props;
  93. props->brightness = props->max_brightness;
  94. props->power = FB_BLANK_UNBLANK;
  95. backlight_update_status(pmac_backlight);
  96. }
  97. mutex_unlock(&pmac_backlight_mutex);
  98. }
  99. #else
  100. static inline void pmac_backlight_unblank(void) { }
  101. #endif
  102. static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
  103. static int die_owner = -1;
  104. static unsigned int die_nest_count;
  105. static int die_counter;
  106. static unsigned __kprobes long oops_begin(struct pt_regs *regs)
  107. {
  108. int cpu;
  109. unsigned long flags;
  110. if (debugger(regs))
  111. return 1;
  112. oops_enter();
  113. /* racy, but better than risking deadlock. */
  114. raw_local_irq_save(flags);
  115. cpu = smp_processor_id();
  116. if (!arch_spin_trylock(&die_lock)) {
  117. if (cpu == die_owner)
  118. /* nested oops. should stop eventually */;
  119. else
  120. arch_spin_lock(&die_lock);
  121. }
  122. die_nest_count++;
  123. die_owner = cpu;
  124. console_verbose();
  125. bust_spinlocks(1);
  126. if (machine_is(powermac))
  127. pmac_backlight_unblank();
  128. return flags;
  129. }
  130. static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
  131. int signr)
  132. {
  133. bust_spinlocks(0);
  134. die_owner = -1;
  135. add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
  136. die_nest_count--;
  137. oops_exit();
  138. printk("\n");
  139. if (!die_nest_count)
  140. /* Nest count reaches zero, release the lock. */
  141. arch_spin_unlock(&die_lock);
  142. raw_local_irq_restore(flags);
  143. crash_fadump(regs, "die oops");
  144. /*
  145. * A system reset (0x100) is a request to dump, so we always send
  146. * it through the crashdump code.
  147. */
  148. if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) {
  149. crash_kexec(regs);
  150. /*
  151. * We aren't the primary crash CPU. We need to send it
  152. * to a holding pattern to avoid it ending up in the panic
  153. * code.
  154. */
  155. crash_kexec_secondary(regs);
  156. }
  157. if (!signr)
  158. return;
  159. /*
  160. * While our oops output is serialised by a spinlock, output
  161. * from panic() called below can race and corrupt it. If we
  162. * know we are going to panic, delay for 1 second so we have a
  163. * chance to get clean backtraces from all CPUs that are oopsing.
  164. */
  165. if (in_interrupt() || panic_on_oops || !current->pid ||
  166. is_global_init(current)) {
  167. mdelay(MSEC_PER_SEC);
  168. }
  169. if (in_interrupt())
  170. panic("Fatal exception in interrupt");
  171. if (panic_on_oops)
  172. panic("Fatal exception");
  173. do_exit(signr);
  174. }
  175. static int __kprobes __die(const char *str, struct pt_regs *regs, long err)
  176. {
  177. printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
  178. #ifdef CONFIG_PREEMPT
  179. printk("PREEMPT ");
  180. #endif
  181. #ifdef CONFIG_SMP
  182. printk("SMP NR_CPUS=%d ", NR_CPUS);
  183. #endif
  184. #ifdef CONFIG_DEBUG_PAGEALLOC
  185. printk("DEBUG_PAGEALLOC ");
  186. #endif
  187. #ifdef CONFIG_NUMA
  188. printk("NUMA ");
  189. #endif
  190. printk("%s\n", ppc_md.name ? ppc_md.name : "");
  191. if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
  192. return 1;
  193. print_modules();
  194. show_regs(regs);
  195. return 0;
  196. }
  197. void die(const char *str, struct pt_regs *regs, long err)
  198. {
  199. unsigned long flags = oops_begin(regs);
  200. if (__die(str, regs, err))
  201. err = 0;
  202. oops_end(flags, regs, err);
  203. }
  204. void user_single_step_siginfo(struct task_struct *tsk,
  205. struct pt_regs *regs, siginfo_t *info)
  206. {
  207. memset(info, 0, sizeof(*info));
  208. info->si_signo = SIGTRAP;
  209. info->si_code = TRAP_TRACE;
  210. info->si_addr = (void __user *)regs->nip;
  211. }
  212. void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
  213. {
  214. siginfo_t info;
  215. const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \
  216. "at %08lx nip %08lx lr %08lx code %x\n";
  217. const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \
  218. "at %016lx nip %016lx lr %016lx code %x\n";
  219. if (!user_mode(regs)) {
  220. die("Exception in kernel mode", regs, signr);
  221. return;
  222. }
  223. if (show_unhandled_signals && unhandled_signal(current, signr)) {
  224. printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
  225. current->comm, current->pid, signr,
  226. addr, regs->nip, regs->link, code);
  227. }
  228. if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
  229. local_irq_enable();
  230. current->thread.trap_nr = code;
  231. memset(&info, 0, sizeof(info));
  232. info.si_signo = signr;
  233. info.si_code = code;
  234. info.si_addr = (void __user *) addr;
  235. force_sig_info(signr, &info, current);
  236. }
  237. #ifdef CONFIG_PPC64
  238. void system_reset_exception(struct pt_regs *regs)
  239. {
  240. /* See if any machine dependent calls */
  241. if (ppc_md.system_reset_exception) {
  242. if (ppc_md.system_reset_exception(regs))
  243. return;
  244. }
  245. die("System Reset", regs, SIGABRT);
  246. /* Must die if the interrupt is not recoverable */
  247. if (!(regs->msr & MSR_RI))
  248. panic("Unrecoverable System Reset");
  249. /* What should we do here? We could issue a shutdown or hard reset. */
  250. }
  251. /*
  252. * This function is called in real mode. Strictly no printk's please.
  253. *
  254. * regs->nip and regs->msr contains srr0 and ssr1.
  255. */
  256. long machine_check_early(struct pt_regs *regs)
  257. {
  258. long handled = 0;
  259. if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
  260. handled = cur_cpu_spec->machine_check_early(regs);
  261. return handled;
  262. }
  263. #endif
  264. /*
  265. * I/O accesses can cause machine checks on powermacs.
  266. * Check if the NIP corresponds to the address of a sync
  267. * instruction for which there is an entry in the exception
  268. * table.
  269. * Note that the 601 only takes a machine check on TEA
  270. * (transfer error ack) signal assertion, and does not
  271. * set any of the top 16 bits of SRR1.
  272. * -- paulus.
  273. */
  274. static inline int check_io_access(struct pt_regs *regs)
  275. {
  276. #ifdef CONFIG_PPC32
  277. unsigned long msr = regs->msr;
  278. const struct exception_table_entry *entry;
  279. unsigned int *nip = (unsigned int *)regs->nip;
  280. if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
  281. && (entry = search_exception_tables(regs->nip)) != NULL) {
  282. /*
  283. * Check that it's a sync instruction, or somewhere
  284. * in the twi; isync; nop sequence that inb/inw/inl uses.
  285. * As the address is in the exception table
  286. * we should be able to read the instr there.
  287. * For the debug message, we look at the preceding
  288. * load or store.
  289. */
  290. if (*nip == 0x60000000) /* nop */
  291. nip -= 2;
  292. else if (*nip == 0x4c00012c) /* isync */
  293. --nip;
  294. if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
  295. /* sync or twi */
  296. unsigned int rb;
  297. --nip;
  298. rb = (*nip >> 11) & 0x1f;
  299. printk(KERN_DEBUG "%s bad port %lx at %p\n",
  300. (*nip & 0x100)? "OUT to": "IN from",
  301. regs->gpr[rb] - _IO_BASE, nip);
  302. regs->msr |= MSR_RI;
  303. regs->nip = entry->fixup;
  304. return 1;
  305. }
  306. }
  307. #endif /* CONFIG_PPC32 */
  308. return 0;
  309. }
  310. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  311. /* On 4xx, the reason for the machine check or program exception
  312. is in the ESR. */
  313. #define get_reason(regs) ((regs)->dsisr)
  314. #ifndef CONFIG_FSL_BOOKE
  315. #define get_mc_reason(regs) ((regs)->dsisr)
  316. #else
  317. #define get_mc_reason(regs) (mfspr(SPRN_MCSR))
  318. #endif
  319. #define REASON_FP ESR_FP
  320. #define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
  321. #define REASON_PRIVILEGED ESR_PPR
  322. #define REASON_TRAP ESR_PTR
  323. /* single-step stuff */
  324. #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC)
  325. #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC)
  326. #else
  327. /* On non-4xx, the reason for the machine check or program
  328. exception is in the MSR. */
  329. #define get_reason(regs) ((regs)->msr)
  330. #define get_mc_reason(regs) ((regs)->msr)
  331. #define REASON_TM 0x200000
  332. #define REASON_FP 0x100000
  333. #define REASON_ILLEGAL 0x80000
  334. #define REASON_PRIVILEGED 0x40000
  335. #define REASON_TRAP 0x20000
  336. #define single_stepping(regs) ((regs)->msr & MSR_SE)
  337. #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
  338. #endif
  339. #if defined(CONFIG_4xx)
  340. int machine_check_4xx(struct pt_regs *regs)
  341. {
  342. unsigned long reason = get_mc_reason(regs);
  343. if (reason & ESR_IMCP) {
  344. printk("Instruction");
  345. mtspr(SPRN_ESR, reason & ~ESR_IMCP);
  346. } else
  347. printk("Data");
  348. printk(" machine check in kernel mode.\n");
  349. return 0;
  350. }
  351. int machine_check_440A(struct pt_regs *regs)
  352. {
  353. unsigned long reason = get_mc_reason(regs);
  354. printk("Machine check in kernel mode.\n");
  355. if (reason & ESR_IMCP){
  356. printk("Instruction Synchronous Machine Check exception\n");
  357. mtspr(SPRN_ESR, reason & ~ESR_IMCP);
  358. }
  359. else {
  360. u32 mcsr = mfspr(SPRN_MCSR);
  361. if (mcsr & MCSR_IB)
  362. printk("Instruction Read PLB Error\n");
  363. if (mcsr & MCSR_DRB)
  364. printk("Data Read PLB Error\n");
  365. if (mcsr & MCSR_DWB)
  366. printk("Data Write PLB Error\n");
  367. if (mcsr & MCSR_TLBP)
  368. printk("TLB Parity Error\n");
  369. if (mcsr & MCSR_ICP){
  370. flush_instruction_cache();
  371. printk("I-Cache Parity Error\n");
  372. }
  373. if (mcsr & MCSR_DCSP)
  374. printk("D-Cache Search Parity Error\n");
  375. if (mcsr & MCSR_DCFP)
  376. printk("D-Cache Flush Parity Error\n");
  377. if (mcsr & MCSR_IMPE)
  378. printk("Machine Check exception is imprecise\n");
  379. /* Clear MCSR */
  380. mtspr(SPRN_MCSR, mcsr);
  381. }
  382. return 0;
  383. }
  384. int machine_check_47x(struct pt_regs *regs)
  385. {
  386. unsigned long reason = get_mc_reason(regs);
  387. u32 mcsr;
  388. printk(KERN_ERR "Machine check in kernel mode.\n");
  389. if (reason & ESR_IMCP) {
  390. printk(KERN_ERR
  391. "Instruction Synchronous Machine Check exception\n");
  392. mtspr(SPRN_ESR, reason & ~ESR_IMCP);
  393. return 0;
  394. }
  395. mcsr = mfspr(SPRN_MCSR);
  396. if (mcsr & MCSR_IB)
  397. printk(KERN_ERR "Instruction Read PLB Error\n");
  398. if (mcsr & MCSR_DRB)
  399. printk(KERN_ERR "Data Read PLB Error\n");
  400. if (mcsr & MCSR_DWB)
  401. printk(KERN_ERR "Data Write PLB Error\n");
  402. if (mcsr & MCSR_TLBP)
  403. printk(KERN_ERR "TLB Parity Error\n");
  404. if (mcsr & MCSR_ICP) {
  405. flush_instruction_cache();
  406. printk(KERN_ERR "I-Cache Parity Error\n");
  407. }
  408. if (mcsr & MCSR_DCSP)
  409. printk(KERN_ERR "D-Cache Search Parity Error\n");
  410. if (mcsr & PPC47x_MCSR_GPR)
  411. printk(KERN_ERR "GPR Parity Error\n");
  412. if (mcsr & PPC47x_MCSR_FPR)
  413. printk(KERN_ERR "FPR Parity Error\n");
  414. if (mcsr & PPC47x_MCSR_IPR)
  415. printk(KERN_ERR "Machine Check exception is imprecise\n");
  416. /* Clear MCSR */
  417. mtspr(SPRN_MCSR, mcsr);
  418. return 0;
  419. }
  420. #elif defined(CONFIG_E500)
  421. int machine_check_e500mc(struct pt_regs *regs)
  422. {
  423. unsigned long mcsr = mfspr(SPRN_MCSR);
  424. unsigned long reason = mcsr;
  425. int recoverable = 1;
  426. if (reason & MCSR_LD) {
  427. recoverable = fsl_rio_mcheck_exception(regs);
  428. if (recoverable == 1)
  429. goto silent_out;
  430. }
  431. printk("Machine check in kernel mode.\n");
  432. printk("Caused by (from MCSR=%lx): ", reason);
  433. if (reason & MCSR_MCP)
  434. printk("Machine Check Signal\n");
  435. if (reason & MCSR_ICPERR) {
  436. printk("Instruction Cache Parity Error\n");
  437. /*
  438. * This is recoverable by invalidating the i-cache.
  439. */
  440. mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
  441. while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
  442. ;
  443. /*
  444. * This will generally be accompanied by an instruction
  445. * fetch error report -- only treat MCSR_IF as fatal
  446. * if it wasn't due to an L1 parity error.
  447. */
  448. reason &= ~MCSR_IF;
  449. }
  450. if (reason & MCSR_DCPERR_MC) {
  451. printk("Data Cache Parity Error\n");
  452. /*
  453. * In write shadow mode we auto-recover from the error, but it
  454. * may still get logged and cause a machine check. We should
  455. * only treat the non-write shadow case as non-recoverable.
  456. */
  457. if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
  458. recoverable = 0;
  459. }
  460. if (reason & MCSR_L2MMU_MHIT) {
  461. printk("Hit on multiple TLB entries\n");
  462. recoverable = 0;
  463. }
  464. if (reason & MCSR_NMI)
  465. printk("Non-maskable interrupt\n");
  466. if (reason & MCSR_IF) {
  467. printk("Instruction Fetch Error Report\n");
  468. recoverable = 0;
  469. }
  470. if (reason & MCSR_LD) {
  471. printk("Load Error Report\n");
  472. recoverable = 0;
  473. }
  474. if (reason & MCSR_ST) {
  475. printk("Store Error Report\n");
  476. recoverable = 0;
  477. }
  478. if (reason & MCSR_LDG) {
  479. printk("Guarded Load Error Report\n");
  480. recoverable = 0;
  481. }
  482. if (reason & MCSR_TLBSYNC)
  483. printk("Simultaneous tlbsync operations\n");
  484. if (reason & MCSR_BSL2_ERR) {
  485. printk("Level 2 Cache Error\n");
  486. recoverable = 0;
  487. }
  488. if (reason & MCSR_MAV) {
  489. u64 addr;
  490. addr = mfspr(SPRN_MCAR);
  491. addr |= (u64)mfspr(SPRN_MCARU) << 32;
  492. printk("Machine Check %s Address: %#llx\n",
  493. reason & MCSR_MEA ? "Effective" : "Physical", addr);
  494. }
  495. silent_out:
  496. mtspr(SPRN_MCSR, mcsr);
  497. return mfspr(SPRN_MCSR) == 0 && recoverable;
  498. }
  499. int machine_check_e500(struct pt_regs *regs)
  500. {
  501. unsigned long reason = get_mc_reason(regs);
  502. if (reason & MCSR_BUS_RBERR) {
  503. if (fsl_rio_mcheck_exception(regs))
  504. return 1;
  505. if (fsl_pci_mcheck_exception(regs))
  506. return 1;
  507. }
  508. printk("Machine check in kernel mode.\n");
  509. printk("Caused by (from MCSR=%lx): ", reason);
  510. if (reason & MCSR_MCP)
  511. printk("Machine Check Signal\n");
  512. if (reason & MCSR_ICPERR)
  513. printk("Instruction Cache Parity Error\n");
  514. if (reason & MCSR_DCP_PERR)
  515. printk("Data Cache Push Parity Error\n");
  516. if (reason & MCSR_DCPERR)
  517. printk("Data Cache Parity Error\n");
  518. if (reason & MCSR_BUS_IAERR)
  519. printk("Bus - Instruction Address Error\n");
  520. if (reason & MCSR_BUS_RAERR)
  521. printk("Bus - Read Address Error\n");
  522. if (reason & MCSR_BUS_WAERR)
  523. printk("Bus - Write Address Error\n");
  524. if (reason & MCSR_BUS_IBERR)
  525. printk("Bus - Instruction Data Error\n");
  526. if (reason & MCSR_BUS_RBERR)
  527. printk("Bus - Read Data Bus Error\n");
  528. if (reason & MCSR_BUS_WBERR)
  529. printk("Bus - Read Data Bus Error\n");
  530. if (reason & MCSR_BUS_IPERR)
  531. printk("Bus - Instruction Parity Error\n");
  532. if (reason & MCSR_BUS_RPERR)
  533. printk("Bus - Read Parity Error\n");
  534. return 0;
  535. }
  536. int machine_check_generic(struct pt_regs *regs)
  537. {
  538. return 0;
  539. }
  540. #elif defined(CONFIG_E200)
  541. int machine_check_e200(struct pt_regs *regs)
  542. {
  543. unsigned long reason = get_mc_reason(regs);
  544. printk("Machine check in kernel mode.\n");
  545. printk("Caused by (from MCSR=%lx): ", reason);
  546. if (reason & MCSR_MCP)
  547. printk("Machine Check Signal\n");
  548. if (reason & MCSR_CP_PERR)
  549. printk("Cache Push Parity Error\n");
  550. if (reason & MCSR_CPERR)
  551. printk("Cache Parity Error\n");
  552. if (reason & MCSR_EXCP_ERR)
  553. printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
  554. if (reason & MCSR_BUS_IRERR)
  555. printk("Bus - Read Bus Error on instruction fetch\n");
  556. if (reason & MCSR_BUS_DRERR)
  557. printk("Bus - Read Bus Error on data load\n");
  558. if (reason & MCSR_BUS_WRERR)
  559. printk("Bus - Write Bus Error on buffered store or cache line push\n");
  560. return 0;
  561. }
  562. #else
  563. int machine_check_generic(struct pt_regs *regs)
  564. {
  565. unsigned long reason = get_mc_reason(regs);
  566. printk("Machine check in kernel mode.\n");
  567. printk("Caused by (from SRR1=%lx): ", reason);
  568. switch (reason & 0x601F0000) {
  569. case 0x80000:
  570. printk("Machine check signal\n");
  571. break;
  572. case 0: /* for 601 */
  573. case 0x40000:
  574. case 0x140000: /* 7450 MSS error and TEA */
  575. printk("Transfer error ack signal\n");
  576. break;
  577. case 0x20000:
  578. printk("Data parity error signal\n");
  579. break;
  580. case 0x10000:
  581. printk("Address parity error signal\n");
  582. break;
  583. case 0x20000000:
  584. printk("L1 Data Cache error\n");
  585. break;
  586. case 0x40000000:
  587. printk("L1 Instruction Cache error\n");
  588. break;
  589. case 0x00100000:
  590. printk("L2 data cache parity error\n");
  591. break;
  592. default:
  593. printk("Unknown values in msr\n");
  594. }
  595. return 0;
  596. }
  597. #endif /* everything else */
  598. void machine_check_exception(struct pt_regs *regs)
  599. {
  600. enum ctx_state prev_state = exception_enter();
  601. int recover = 0;
  602. __get_cpu_var(irq_stat).mce_exceptions++;
  603. /* See if any machine dependent calls. In theory, we would want
  604. * to call the CPU first, and call the ppc_md. one if the CPU
  605. * one returns a positive number. However there is existing code
  606. * that assumes the board gets a first chance, so let's keep it
  607. * that way for now and fix things later. --BenH.
  608. */
  609. if (ppc_md.machine_check_exception)
  610. recover = ppc_md.machine_check_exception(regs);
  611. else if (cur_cpu_spec->machine_check)
  612. recover = cur_cpu_spec->machine_check(regs);
  613. if (recover > 0)
  614. goto bail;
  615. #if defined(CONFIG_8xx) && defined(CONFIG_PCI)
  616. /* the qspan pci read routines can cause machine checks -- Cort
  617. *
  618. * yuck !!! that totally needs to go away ! There are better ways
  619. * to deal with that than having a wart in the mcheck handler.
  620. * -- BenH
  621. */
  622. bad_page_fault(regs, regs->dar, SIGBUS);
  623. goto bail;
  624. #endif
  625. if (debugger_fault_handler(regs))
  626. goto bail;
  627. if (check_io_access(regs))
  628. goto bail;
  629. die("Machine check", regs, SIGBUS);
  630. /* Must die if the interrupt is not recoverable */
  631. if (!(regs->msr & MSR_RI))
  632. panic("Unrecoverable Machine check");
  633. bail:
  634. exception_exit(prev_state);
  635. }
  636. void SMIException(struct pt_regs *regs)
  637. {
  638. die("System Management Interrupt", regs, SIGABRT);
  639. }
  640. void unknown_exception(struct pt_regs *regs)
  641. {
  642. enum ctx_state prev_state = exception_enter();
  643. printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
  644. regs->nip, regs->msr, regs->trap);
  645. _exception(SIGTRAP, regs, 0, 0);
  646. exception_exit(prev_state);
  647. }
  648. void instruction_breakpoint_exception(struct pt_regs *regs)
  649. {
  650. enum ctx_state prev_state = exception_enter();
  651. if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
  652. 5, SIGTRAP) == NOTIFY_STOP)
  653. goto bail;
  654. if (debugger_iabr_match(regs))
  655. goto bail;
  656. _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
  657. bail:
  658. exception_exit(prev_state);
  659. }
  660. void RunModeException(struct pt_regs *regs)
  661. {
  662. _exception(SIGTRAP, regs, 0, 0);
  663. }
  664. void __kprobes single_step_exception(struct pt_regs *regs)
  665. {
  666. enum ctx_state prev_state = exception_enter();
  667. clear_single_step(regs);
  668. if (notify_die(DIE_SSTEP, "single_step", regs, 5,
  669. 5, SIGTRAP) == NOTIFY_STOP)
  670. goto bail;
  671. if (debugger_sstep(regs))
  672. goto bail;
  673. _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
  674. bail:
  675. exception_exit(prev_state);
  676. }
  677. /*
  678. * After we have successfully emulated an instruction, we have to
  679. * check if the instruction was being single-stepped, and if so,
  680. * pretend we got a single-step exception. This was pointed out
  681. * by Kumar Gala. -- paulus
  682. */
  683. static void emulate_single_step(struct pt_regs *regs)
  684. {
  685. if (single_stepping(regs))
  686. single_step_exception(regs);
  687. }
  688. static inline int __parse_fpscr(unsigned long fpscr)
  689. {
  690. int ret = 0;
  691. /* Invalid operation */
  692. if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
  693. ret = FPE_FLTINV;
  694. /* Overflow */
  695. else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
  696. ret = FPE_FLTOVF;
  697. /* Underflow */
  698. else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
  699. ret = FPE_FLTUND;
  700. /* Divide by zero */
  701. else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
  702. ret = FPE_FLTDIV;
  703. /* Inexact result */
  704. else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
  705. ret = FPE_FLTRES;
  706. return ret;
  707. }
  708. static void parse_fpe(struct pt_regs *regs)
  709. {
  710. int code = 0;
  711. flush_fp_to_thread(current);
  712. code = __parse_fpscr(current->thread.fp_state.fpscr);
  713. _exception(SIGFPE, regs, code, regs->nip);
  714. }
  715. /*
  716. * Illegal instruction emulation support. Originally written to
  717. * provide the PVR to user applications using the mfspr rd, PVR.
  718. * Return non-zero if we can't emulate, or -EFAULT if the associated
  719. * memory access caused an access fault. Return zero on success.
  720. *
  721. * There are a couple of ways to do this, either "decode" the instruction
  722. * or directly match lots of bits. In this case, matching lots of
  723. * bits is faster and easier.
  724. *
  725. */
  726. static int emulate_string_inst(struct pt_regs *regs, u32 instword)
  727. {
  728. u8 rT = (instword >> 21) & 0x1f;
  729. u8 rA = (instword >> 16) & 0x1f;
  730. u8 NB_RB = (instword >> 11) & 0x1f;
  731. u32 num_bytes;
  732. unsigned long EA;
  733. int pos = 0;
  734. /* Early out if we are an invalid form of lswx */
  735. if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
  736. if ((rT == rA) || (rT == NB_RB))
  737. return -EINVAL;
  738. EA = (rA == 0) ? 0 : regs->gpr[rA];
  739. switch (instword & PPC_INST_STRING_MASK) {
  740. case PPC_INST_LSWX:
  741. case PPC_INST_STSWX:
  742. EA += NB_RB;
  743. num_bytes = regs->xer & 0x7f;
  744. break;
  745. case PPC_INST_LSWI:
  746. case PPC_INST_STSWI:
  747. num_bytes = (NB_RB == 0) ? 32 : NB_RB;
  748. break;
  749. default:
  750. return -EINVAL;
  751. }
  752. while (num_bytes != 0)
  753. {
  754. u8 val;
  755. u32 shift = 8 * (3 - (pos & 0x3));
  756. /* if process is 32-bit, clear upper 32 bits of EA */
  757. if ((regs->msr & MSR_64BIT) == 0)
  758. EA &= 0xFFFFFFFF;
  759. switch ((instword & PPC_INST_STRING_MASK)) {
  760. case PPC_INST_LSWX:
  761. case PPC_INST_LSWI:
  762. if (get_user(val, (u8 __user *)EA))
  763. return -EFAULT;
  764. /* first time updating this reg,
  765. * zero it out */
  766. if (pos == 0)
  767. regs->gpr[rT] = 0;
  768. regs->gpr[rT] |= val << shift;
  769. break;
  770. case PPC_INST_STSWI:
  771. case PPC_INST_STSWX:
  772. val = regs->gpr[rT] >> shift;
  773. if (put_user(val, (u8 __user *)EA))
  774. return -EFAULT;
  775. break;
  776. }
  777. /* move EA to next address */
  778. EA += 1;
  779. num_bytes--;
  780. /* manage our position within the register */
  781. if (++pos == 4) {
  782. pos = 0;
  783. if (++rT == 32)
  784. rT = 0;
  785. }
  786. }
  787. return 0;
  788. }
  789. static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
  790. {
  791. u32 ra,rs;
  792. unsigned long tmp;
  793. ra = (instword >> 16) & 0x1f;
  794. rs = (instword >> 21) & 0x1f;
  795. tmp = regs->gpr[rs];
  796. tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
  797. tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
  798. tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
  799. regs->gpr[ra] = tmp;
  800. return 0;
  801. }
  802. static int emulate_isel(struct pt_regs *regs, u32 instword)
  803. {
  804. u8 rT = (instword >> 21) & 0x1f;
  805. u8 rA = (instword >> 16) & 0x1f;
  806. u8 rB = (instword >> 11) & 0x1f;
  807. u8 BC = (instword >> 6) & 0x1f;
  808. u8 bit;
  809. unsigned long tmp;
  810. tmp = (rA == 0) ? 0 : regs->gpr[rA];
  811. bit = (regs->ccr >> (31 - BC)) & 0x1;
  812. regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
  813. return 0;
  814. }
  815. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  816. static inline bool tm_abort_check(struct pt_regs *regs, int cause)
  817. {
  818. /* If we're emulating a load/store in an active transaction, we cannot
  819. * emulate it as the kernel operates in transaction suspended context.
  820. * We need to abort the transaction. This creates a persistent TM
  821. * abort so tell the user what caused it with a new code.
  822. */
  823. if (MSR_TM_TRANSACTIONAL(regs->msr)) {
  824. tm_enable();
  825. tm_abort(cause);
  826. return true;
  827. }
  828. return false;
  829. }
  830. #else
  831. static inline bool tm_abort_check(struct pt_regs *regs, int reason)
  832. {
  833. return false;
  834. }
  835. #endif
  836. static int emulate_instruction(struct pt_regs *regs)
  837. {
  838. u32 instword;
  839. u32 rd;
  840. if (!user_mode(regs))
  841. return -EINVAL;
  842. CHECK_FULL_REGS(regs);
  843. if (get_user(instword, (u32 __user *)(regs->nip)))
  844. return -EFAULT;
  845. /* Emulate the mfspr rD, PVR. */
  846. if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
  847. PPC_WARN_EMULATED(mfpvr, regs);
  848. rd = (instword >> 21) & 0x1f;
  849. regs->gpr[rd] = mfspr(SPRN_PVR);
  850. return 0;
  851. }
  852. /* Emulating the dcba insn is just a no-op. */
  853. if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
  854. PPC_WARN_EMULATED(dcba, regs);
  855. return 0;
  856. }
  857. /* Emulate the mcrxr insn. */
  858. if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
  859. int shift = (instword >> 21) & 0x1c;
  860. unsigned long msk = 0xf0000000UL >> shift;
  861. PPC_WARN_EMULATED(mcrxr, regs);
  862. regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
  863. regs->xer &= ~0xf0000000UL;
  864. return 0;
  865. }
  866. /* Emulate load/store string insn. */
  867. if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
  868. if (tm_abort_check(regs,
  869. TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
  870. return -EINVAL;
  871. PPC_WARN_EMULATED(string, regs);
  872. return emulate_string_inst(regs, instword);
  873. }
  874. /* Emulate the popcntb (Population Count Bytes) instruction. */
  875. if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
  876. PPC_WARN_EMULATED(popcntb, regs);
  877. return emulate_popcntb_inst(regs, instword);
  878. }
  879. /* Emulate isel (Integer Select) instruction */
  880. if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
  881. PPC_WARN_EMULATED(isel, regs);
  882. return emulate_isel(regs, instword);
  883. }
  884. /* Emulate sync instruction variants */
  885. if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) {
  886. PPC_WARN_EMULATED(sync, regs);
  887. asm volatile("sync");
  888. return 0;
  889. }
  890. #ifdef CONFIG_PPC64
  891. /* Emulate the mfspr rD, DSCR. */
  892. if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
  893. PPC_INST_MFSPR_DSCR_USER) ||
  894. ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
  895. PPC_INST_MFSPR_DSCR)) &&
  896. cpu_has_feature(CPU_FTR_DSCR)) {
  897. PPC_WARN_EMULATED(mfdscr, regs);
  898. rd = (instword >> 21) & 0x1f;
  899. regs->gpr[rd] = mfspr(SPRN_DSCR);
  900. return 0;
  901. }
  902. /* Emulate the mtspr DSCR, rD. */
  903. if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
  904. PPC_INST_MTSPR_DSCR_USER) ||
  905. ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
  906. PPC_INST_MTSPR_DSCR)) &&
  907. cpu_has_feature(CPU_FTR_DSCR)) {
  908. PPC_WARN_EMULATED(mtdscr, regs);
  909. rd = (instword >> 21) & 0x1f;
  910. current->thread.dscr = regs->gpr[rd];
  911. current->thread.dscr_inherit = 1;
  912. mtspr(SPRN_DSCR, current->thread.dscr);
  913. return 0;
  914. }
  915. #endif
  916. return -EINVAL;
  917. }
  918. int is_valid_bugaddr(unsigned long addr)
  919. {
  920. return is_kernel_addr(addr);
  921. }
  922. #ifdef CONFIG_MATH_EMULATION
  923. static int emulate_math(struct pt_regs *regs)
  924. {
  925. int ret;
  926. extern int do_mathemu(struct pt_regs *regs);
  927. ret = do_mathemu(regs);
  928. if (ret >= 0)
  929. PPC_WARN_EMULATED(math, regs);
  930. switch (ret) {
  931. case 0:
  932. emulate_single_step(regs);
  933. return 0;
  934. case 1: {
  935. int code = 0;
  936. code = __parse_fpscr(current->thread.fp_state.fpscr);
  937. _exception(SIGFPE, regs, code, regs->nip);
  938. return 0;
  939. }
  940. case -EFAULT:
  941. _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
  942. return 0;
  943. }
  944. return -1;
  945. }
  946. #else
  947. static inline int emulate_math(struct pt_regs *regs) { return -1; }
  948. #endif
  949. void __kprobes program_check_exception(struct pt_regs *regs)
  950. {
  951. enum ctx_state prev_state = exception_enter();
  952. unsigned int reason = get_reason(regs);
  953. /* We can now get here via a FP Unavailable exception if the core
  954. * has no FPU, in that case the reason flags will be 0 */
  955. if (reason & REASON_FP) {
  956. /* IEEE FP exception */
  957. parse_fpe(regs);
  958. goto bail;
  959. }
  960. if (reason & REASON_TRAP) {
  961. /* Debugger is first in line to stop recursive faults in
  962. * rcu_lock, notify_die, or atomic_notifier_call_chain */
  963. if (debugger_bpt(regs))
  964. goto bail;
  965. /* trap exception */
  966. if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
  967. == NOTIFY_STOP)
  968. goto bail;
  969. if (!(regs->msr & MSR_PR) && /* not user-mode */
  970. report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
  971. regs->nip += 4;
  972. goto bail;
  973. }
  974. _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
  975. goto bail;
  976. }
  977. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  978. if (reason & REASON_TM) {
  979. /* This is a TM "Bad Thing Exception" program check.
  980. * This occurs when:
  981. * - An rfid/hrfid/mtmsrd attempts to cause an illegal
  982. * transition in TM states.
  983. * - A trechkpt is attempted when transactional.
  984. * - A treclaim is attempted when non transactional.
  985. * - A tend is illegally attempted.
  986. * - writing a TM SPR when transactional.
  987. */
  988. if (!user_mode(regs) &&
  989. report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
  990. regs->nip += 4;
  991. goto bail;
  992. }
  993. /* If usermode caused this, it's done something illegal and
  994. * gets a SIGILL slap on the wrist. We call it an illegal
  995. * operand to distinguish from the instruction just being bad
  996. * (e.g. executing a 'tend' on a CPU without TM!); it's an
  997. * illegal /placement/ of a valid instruction.
  998. */
  999. if (user_mode(regs)) {
  1000. _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
  1001. goto bail;
  1002. } else {
  1003. printk(KERN_EMERG "Unexpected TM Bad Thing exception "
  1004. "at %lx (msr 0x%x)\n", regs->nip, reason);
  1005. die("Unrecoverable exception", regs, SIGABRT);
  1006. }
  1007. }
  1008. #endif
  1009. /*
  1010. * If we took the program check in the kernel skip down to sending a
  1011. * SIGILL. The subsequent cases all relate to emulating instructions
  1012. * which we should only do for userspace. We also do not want to enable
  1013. * interrupts for kernel faults because that might lead to further
  1014. * faults, and loose the context of the original exception.
  1015. */
  1016. if (!user_mode(regs))
  1017. goto sigill;
  1018. /* We restore the interrupt state now */
  1019. if (!arch_irq_disabled_regs(regs))
  1020. local_irq_enable();
  1021. /* (reason & REASON_ILLEGAL) would be the obvious thing here,
  1022. * but there seems to be a hardware bug on the 405GP (RevD)
  1023. * that means ESR is sometimes set incorrectly - either to
  1024. * ESR_DST (!?) or 0. In the process of chasing this with the
  1025. * hardware people - not sure if it can happen on any illegal
  1026. * instruction or only on FP instructions, whether there is a
  1027. * pattern to occurrences etc. -dgibson 31/Mar/2003
  1028. */
  1029. if (!emulate_math(regs))
  1030. goto bail;
  1031. /* Try to emulate it if we should. */
  1032. if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
  1033. switch (emulate_instruction(regs)) {
  1034. case 0:
  1035. regs->nip += 4;
  1036. emulate_single_step(regs);
  1037. goto bail;
  1038. case -EFAULT:
  1039. _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
  1040. goto bail;
  1041. }
  1042. }
  1043. sigill:
  1044. if (reason & REASON_PRIVILEGED)
  1045. _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
  1046. else
  1047. _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
  1048. bail:
  1049. exception_exit(prev_state);
  1050. }
  1051. /*
  1052. * This occurs when running in hypervisor mode on POWER6 or later
  1053. * and an illegal instruction is encountered.
  1054. */
  1055. void __kprobes emulation_assist_interrupt(struct pt_regs *regs)
  1056. {
  1057. regs->msr |= REASON_ILLEGAL;
  1058. program_check_exception(regs);
  1059. }
  1060. void alignment_exception(struct pt_regs *regs)
  1061. {
  1062. enum ctx_state prev_state = exception_enter();
  1063. int sig, code, fixed = 0;
  1064. /* We restore the interrupt state now */
  1065. if (!arch_irq_disabled_regs(regs))
  1066. local_irq_enable();
  1067. if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
  1068. goto bail;
  1069. /* we don't implement logging of alignment exceptions */
  1070. if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
  1071. fixed = fix_alignment(regs);
  1072. if (fixed == 1) {
  1073. regs->nip += 4; /* skip over emulated instruction */
  1074. emulate_single_step(regs);
  1075. goto bail;
  1076. }
  1077. /* Operand address was bad */
  1078. if (fixed == -EFAULT) {
  1079. sig = SIGSEGV;
  1080. code = SEGV_ACCERR;
  1081. } else {
  1082. sig = SIGBUS;
  1083. code = BUS_ADRALN;
  1084. }
  1085. if (user_mode(regs))
  1086. _exception(sig, regs, code, regs->dar);
  1087. else
  1088. bad_page_fault(regs, regs->dar, sig);
  1089. bail:
  1090. exception_exit(prev_state);
  1091. }
  1092. void StackOverflow(struct pt_regs *regs)
  1093. {
  1094. printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
  1095. current, regs->gpr[1]);
  1096. debugger(regs);
  1097. show_regs(regs);
  1098. panic("kernel stack overflow");
  1099. }
  1100. void nonrecoverable_exception(struct pt_regs *regs)
  1101. {
  1102. printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
  1103. regs->nip, regs->msr);
  1104. debugger(regs);
  1105. die("nonrecoverable exception", regs, SIGKILL);
  1106. }
  1107. void trace_syscall(struct pt_regs *regs)
  1108. {
  1109. printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n",
  1110. current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0],
  1111. regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
  1112. }
  1113. void kernel_fp_unavailable_exception(struct pt_regs *regs)
  1114. {
  1115. enum ctx_state prev_state = exception_enter();
  1116. printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
  1117. "%lx at %lx\n", regs->trap, regs->nip);
  1118. die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
  1119. exception_exit(prev_state);
  1120. }
  1121. void altivec_unavailable_exception(struct pt_regs *regs)
  1122. {
  1123. enum ctx_state prev_state = exception_enter();
  1124. if (user_mode(regs)) {
  1125. /* A user program has executed an altivec instruction,
  1126. but this kernel doesn't support altivec. */
  1127. _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
  1128. goto bail;
  1129. }
  1130. printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
  1131. "%lx at %lx\n", regs->trap, regs->nip);
  1132. die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
  1133. bail:
  1134. exception_exit(prev_state);
  1135. }
  1136. void vsx_unavailable_exception(struct pt_regs *regs)
  1137. {
  1138. if (user_mode(regs)) {
  1139. /* A user program has executed an vsx instruction,
  1140. but this kernel doesn't support vsx. */
  1141. _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
  1142. return;
  1143. }
  1144. printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
  1145. "%lx at %lx\n", regs->trap, regs->nip);
  1146. die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
  1147. }
  1148. #ifdef CONFIG_PPC64
  1149. void facility_unavailable_exception(struct pt_regs *regs)
  1150. {
  1151. static char *facility_strings[] = {
  1152. [FSCR_FP_LG] = "FPU",
  1153. [FSCR_VECVSX_LG] = "VMX/VSX",
  1154. [FSCR_DSCR_LG] = "DSCR",
  1155. [FSCR_PM_LG] = "PMU SPRs",
  1156. [FSCR_BHRB_LG] = "BHRB",
  1157. [FSCR_TM_LG] = "TM",
  1158. [FSCR_EBB_LG] = "EBB",
  1159. [FSCR_TAR_LG] = "TAR",
  1160. };
  1161. char *facility = "unknown";
  1162. u64 value;
  1163. u8 status;
  1164. bool hv;
  1165. hv = (regs->trap == 0xf80);
  1166. if (hv)
  1167. value = mfspr(SPRN_HFSCR);
  1168. else
  1169. value = mfspr(SPRN_FSCR);
  1170. status = value >> 56;
  1171. if (status == FSCR_DSCR_LG) {
  1172. /* User is acessing the DSCR. Set the inherit bit and allow
  1173. * the user to set it directly in future by setting via the
  1174. * FSCR DSCR bit. We always leave HFSCR DSCR set.
  1175. */
  1176. current->thread.dscr_inherit = 1;
  1177. mtspr(SPRN_FSCR, value | FSCR_DSCR);
  1178. return;
  1179. }
  1180. if ((status < ARRAY_SIZE(facility_strings)) &&
  1181. facility_strings[status])
  1182. facility = facility_strings[status];
  1183. /* We restore the interrupt state now */
  1184. if (!arch_irq_disabled_regs(regs))
  1185. local_irq_enable();
  1186. pr_err_ratelimited(
  1187. "%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
  1188. hv ? "Hypervisor " : "", facility, regs->nip, regs->msr);
  1189. if (user_mode(regs)) {
  1190. _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
  1191. return;
  1192. }
  1193. die("Unexpected facility unavailable exception", regs, SIGABRT);
  1194. }
  1195. #endif
  1196. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  1197. void fp_unavailable_tm(struct pt_regs *regs)
  1198. {
  1199. /* Note: This does not handle any kind of FP laziness. */
  1200. TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
  1201. regs->nip, regs->msr);
  1202. /* We can only have got here if the task started using FP after
  1203. * beginning the transaction. So, the transactional regs are just a
  1204. * copy of the checkpointed ones. But, we still need to recheckpoint
  1205. * as we're enabling FP for the process; it will return, abort the
  1206. * transaction, and probably retry but now with FP enabled. So the
  1207. * checkpointed FP registers need to be loaded.
  1208. */
  1209. tm_reclaim_current(TM_CAUSE_FAC_UNAV);
  1210. /* Reclaim didn't save out any FPRs to transact_fprs. */
  1211. /* Enable FP for the task: */
  1212. regs->msr |= (MSR_FP | current->thread.fpexc_mode);
  1213. /* This loads and recheckpoints the FP registers from
  1214. * thread.fpr[]. They will remain in registers after the
  1215. * checkpoint so we don't need to reload them after.
  1216. * If VMX is in use, the VRs now hold checkpointed values,
  1217. * so we don't want to load the VRs from the thread_struct.
  1218. */
  1219. tm_recheckpoint(&current->thread, MSR_FP);
  1220. /* If VMX is in use, get the transactional values back */
  1221. if (regs->msr & MSR_VEC) {
  1222. do_load_up_transact_altivec(&current->thread);
  1223. /* At this point all the VSX state is loaded, so enable it */
  1224. regs->msr |= MSR_VSX;
  1225. }
  1226. }
  1227. void altivec_unavailable_tm(struct pt_regs *regs)
  1228. {
  1229. /* See the comments in fp_unavailable_tm(). This function operates
  1230. * the same way.
  1231. */
  1232. TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
  1233. "MSR=%lx\n",
  1234. regs->nip, regs->msr);
  1235. tm_reclaim_current(TM_CAUSE_FAC_UNAV);
  1236. regs->msr |= MSR_VEC;
  1237. tm_recheckpoint(&current->thread, MSR_VEC);
  1238. current->thread.used_vr = 1;
  1239. if (regs->msr & MSR_FP) {
  1240. do_load_up_transact_fpu(&current->thread);
  1241. regs->msr |= MSR_VSX;
  1242. }
  1243. }
  1244. void vsx_unavailable_tm(struct pt_regs *regs)
  1245. {
  1246. unsigned long orig_msr = regs->msr;
  1247. /* See the comments in fp_unavailable_tm(). This works similarly,
  1248. * though we're loading both FP and VEC registers in here.
  1249. *
  1250. * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC
  1251. * regs. Either way, set MSR_VSX.
  1252. */
  1253. TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
  1254. "MSR=%lx\n",
  1255. regs->nip, regs->msr);
  1256. current->thread.used_vsr = 1;
  1257. /* If FP and VMX are already loaded, we have all the state we need */
  1258. if ((orig_msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) {
  1259. regs->msr |= MSR_VSX;
  1260. return;
  1261. }
  1262. /* This reclaims FP and/or VR regs if they're already enabled */
  1263. tm_reclaim_current(TM_CAUSE_FAC_UNAV);
  1264. regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode |
  1265. MSR_VSX;
  1266. /* This loads & recheckpoints FP and VRs; but we have
  1267. * to be sure not to overwrite previously-valid state.
  1268. */
  1269. tm_recheckpoint(&current->thread, regs->msr & ~orig_msr);
  1270. if (orig_msr & MSR_FP)
  1271. do_load_up_transact_fpu(&current->thread);
  1272. if (orig_msr & MSR_VEC)
  1273. do_load_up_transact_altivec(&current->thread);
  1274. }
  1275. #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
  1276. void performance_monitor_exception(struct pt_regs *regs)
  1277. {
  1278. __get_cpu_var(irq_stat).pmu_irqs++;
  1279. perf_irq(regs);
  1280. }
  1281. #ifdef CONFIG_8xx
  1282. void SoftwareEmulation(struct pt_regs *regs)
  1283. {
  1284. CHECK_FULL_REGS(regs);
  1285. if (!user_mode(regs)) {
  1286. debugger(regs);
  1287. die("Kernel Mode Unimplemented Instruction or SW FPU Emulation",
  1288. regs, SIGFPE);
  1289. }
  1290. if (!emulate_math(regs))
  1291. return;
  1292. _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
  1293. }
  1294. #endif /* CONFIG_8xx */
  1295. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  1296. static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
  1297. {
  1298. int changed = 0;
  1299. /*
  1300. * Determine the cause of the debug event, clear the
  1301. * event flags and send a trap to the handler. Torez
  1302. */
  1303. if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
  1304. dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
  1305. #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
  1306. current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
  1307. #endif
  1308. do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT,
  1309. 5);
  1310. changed |= 0x01;
  1311. } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
  1312. dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
  1313. do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT,
  1314. 6);
  1315. changed |= 0x01;
  1316. } else if (debug_status & DBSR_IAC1) {
  1317. current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
  1318. dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
  1319. do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT,
  1320. 1);
  1321. changed |= 0x01;
  1322. } else if (debug_status & DBSR_IAC2) {
  1323. current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
  1324. do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT,
  1325. 2);
  1326. changed |= 0x01;
  1327. } else if (debug_status & DBSR_IAC3) {
  1328. current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
  1329. dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
  1330. do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT,
  1331. 3);
  1332. changed |= 0x01;
  1333. } else if (debug_status & DBSR_IAC4) {
  1334. current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
  1335. do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT,
  1336. 4);
  1337. changed |= 0x01;
  1338. }
  1339. /*
  1340. * At the point this routine was called, the MSR(DE) was turned off.
  1341. * Check all other debug flags and see if that bit needs to be turned
  1342. * back on or not.
  1343. */
  1344. if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
  1345. current->thread.debug.dbcr1))
  1346. regs->msr |= MSR_DE;
  1347. else
  1348. /* Make sure the IDM flag is off */
  1349. current->thread.debug.dbcr0 &= ~DBCR0_IDM;
  1350. if (changed & 0x01)
  1351. mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
  1352. }
  1353. void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
  1354. {
  1355. current->thread.debug.dbsr = debug_status;
  1356. /* Hack alert: On BookE, Branch Taken stops on the branch itself, while
  1357. * on server, it stops on the target of the branch. In order to simulate
  1358. * the server behaviour, we thus restart right away with a single step
  1359. * instead of stopping here when hitting a BT
  1360. */
  1361. if (debug_status & DBSR_BT) {
  1362. regs->msr &= ~MSR_DE;
  1363. /* Disable BT */
  1364. mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
  1365. /* Clear the BT event */
  1366. mtspr(SPRN_DBSR, DBSR_BT);
  1367. /* Do the single step trick only when coming from userspace */
  1368. if (user_mode(regs)) {
  1369. current->thread.debug.dbcr0 &= ~DBCR0_BT;
  1370. current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
  1371. regs->msr |= MSR_DE;
  1372. return;
  1373. }
  1374. if (notify_die(DIE_SSTEP, "block_step", regs, 5,
  1375. 5, SIGTRAP) == NOTIFY_STOP) {
  1376. return;
  1377. }
  1378. if (debugger_sstep(regs))
  1379. return;
  1380. } else if (debug_status & DBSR_IC) { /* Instruction complete */
  1381. regs->msr &= ~MSR_DE;
  1382. /* Disable instruction completion */
  1383. mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
  1384. /* Clear the instruction completion event */
  1385. mtspr(SPRN_DBSR, DBSR_IC);
  1386. if (notify_die(DIE_SSTEP, "single_step", regs, 5,
  1387. 5, SIGTRAP) == NOTIFY_STOP) {
  1388. return;
  1389. }
  1390. if (debugger_sstep(regs))
  1391. return;
  1392. if (user_mode(regs)) {
  1393. current->thread.debug.dbcr0 &= ~DBCR0_IC;
  1394. if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
  1395. current->thread.debug.dbcr1))
  1396. regs->msr |= MSR_DE;
  1397. else
  1398. /* Make sure the IDM bit is off */
  1399. current->thread.debug.dbcr0 &= ~DBCR0_IDM;
  1400. }
  1401. _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
  1402. } else
  1403. handle_debug(regs, debug_status);
  1404. }
  1405. #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
  1406. #if !defined(CONFIG_TAU_INT)
  1407. void TAUException(struct pt_regs *regs)
  1408. {
  1409. printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
  1410. regs->nip, regs->msr, regs->trap, print_tainted());
  1411. }
  1412. #endif /* CONFIG_INT_TAU */
  1413. #ifdef CONFIG_ALTIVEC
  1414. void altivec_assist_exception(struct pt_regs *regs)
  1415. {
  1416. int err;
  1417. if (!user_mode(regs)) {
  1418. printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
  1419. " at %lx\n", regs->nip);
  1420. die("Kernel VMX/Altivec assist exception", regs, SIGILL);
  1421. }
  1422. flush_altivec_to_thread(current);
  1423. PPC_WARN_EMULATED(altivec, regs);
  1424. err = emulate_altivec(regs);
  1425. if (err == 0) {
  1426. regs->nip += 4; /* skip emulated instruction */
  1427. emulate_single_step(regs);
  1428. return;
  1429. }
  1430. if (err == -EFAULT) {
  1431. /* got an error reading the instruction */
  1432. _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
  1433. } else {
  1434. /* didn't recognize the instruction */
  1435. /* XXX quick hack for now: set the non-Java bit in the VSCR */
  1436. printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
  1437. "in %s at %lx\n", current->comm, regs->nip);
  1438. current->thread.vr_state.vscr.u[3] |= 0x10000;
  1439. }
  1440. }
  1441. #endif /* CONFIG_ALTIVEC */
  1442. #ifdef CONFIG_VSX
  1443. void vsx_assist_exception(struct pt_regs *regs)
  1444. {
  1445. if (!user_mode(regs)) {
  1446. printk(KERN_EMERG "VSX assist exception in kernel mode"
  1447. " at %lx\n", regs->nip);
  1448. die("Kernel VSX assist exception", regs, SIGILL);
  1449. }
  1450. flush_vsx_to_thread(current);
  1451. printk(KERN_INFO "VSX assist not supported at %lx\n", regs->nip);
  1452. _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
  1453. }
  1454. #endif /* CONFIG_VSX */
  1455. #ifdef CONFIG_FSL_BOOKE
  1456. void CacheLockingException(struct pt_regs *regs, unsigned long address,
  1457. unsigned long error_code)
  1458. {
  1459. /* We treat cache locking instructions from the user
  1460. * as priv ops, in the future we could try to do
  1461. * something smarter
  1462. */
  1463. if (error_code & (ESR_DLK|ESR_ILK))
  1464. _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
  1465. return;
  1466. }
  1467. #endif /* CONFIG_FSL_BOOKE */
  1468. #ifdef CONFIG_SPE
  1469. void SPEFloatingPointException(struct pt_regs *regs)
  1470. {
  1471. extern int do_spe_mathemu(struct pt_regs *regs);
  1472. unsigned long spefscr;
  1473. int fpexc_mode;
  1474. int code = 0;
  1475. int err;
  1476. flush_spe_to_thread(current);
  1477. spefscr = current->thread.spefscr;
  1478. fpexc_mode = current->thread.fpexc_mode;
  1479. if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
  1480. code = FPE_FLTOVF;
  1481. }
  1482. else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
  1483. code = FPE_FLTUND;
  1484. }
  1485. else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
  1486. code = FPE_FLTDIV;
  1487. else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
  1488. code = FPE_FLTINV;
  1489. }
  1490. else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
  1491. code = FPE_FLTRES;
  1492. err = do_spe_mathemu(regs);
  1493. if (err == 0) {
  1494. regs->nip += 4; /* skip emulated instruction */
  1495. emulate_single_step(regs);
  1496. return;
  1497. }
  1498. if (err == -EFAULT) {
  1499. /* got an error reading the instruction */
  1500. _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
  1501. } else if (err == -EINVAL) {
  1502. /* didn't recognize the instruction */
  1503. printk(KERN_ERR "unrecognized spe instruction "
  1504. "in %s at %lx\n", current->comm, regs->nip);
  1505. } else {
  1506. _exception(SIGFPE, regs, code, regs->nip);
  1507. }
  1508. return;
  1509. }
  1510. void SPEFloatingPointRoundException(struct pt_regs *regs)
  1511. {
  1512. extern int speround_handler(struct pt_regs *regs);
  1513. int err;
  1514. preempt_disable();
  1515. if (regs->msr & MSR_SPE)
  1516. giveup_spe(current);
  1517. preempt_enable();
  1518. regs->nip -= 4;
  1519. err = speround_handler(regs);
  1520. if (err == 0) {
  1521. regs->nip += 4; /* skip emulated instruction */
  1522. emulate_single_step(regs);
  1523. return;
  1524. }
  1525. if (err == -EFAULT) {
  1526. /* got an error reading the instruction */
  1527. _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
  1528. } else if (err == -EINVAL) {
  1529. /* didn't recognize the instruction */
  1530. printk(KERN_ERR "unrecognized spe instruction "
  1531. "in %s at %lx\n", current->comm, regs->nip);
  1532. } else {
  1533. _exception(SIGFPE, regs, 0, regs->nip);
  1534. return;
  1535. }
  1536. }
  1537. #endif
  1538. /*
  1539. * We enter here if we get an unrecoverable exception, that is, one
  1540. * that happened at a point where the RI (recoverable interrupt) bit
  1541. * in the MSR is 0. This indicates that SRR0/1 are live, and that
  1542. * we therefore lost state by taking this exception.
  1543. */
  1544. void unrecoverable_exception(struct pt_regs *regs)
  1545. {
  1546. printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
  1547. regs->trap, regs->nip);
  1548. die("Unrecoverable exception", regs, SIGABRT);
  1549. }
  1550. #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
  1551. /*
  1552. * Default handler for a Watchdog exception,
  1553. * spins until a reboot occurs
  1554. */
  1555. void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
  1556. {
  1557. /* Generic WatchdogHandler, implement your own */
  1558. mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
  1559. return;
  1560. }
  1561. void WatchdogException(struct pt_regs *regs)
  1562. {
  1563. printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
  1564. WatchdogHandler(regs);
  1565. }
  1566. #endif
  1567. /*
  1568. * We enter here if we discover during exception entry that we are
  1569. * running in supervisor mode with a userspace value in the stack pointer.
  1570. */
  1571. void kernel_bad_stack(struct pt_regs *regs)
  1572. {
  1573. printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
  1574. regs->gpr[1], regs->nip);
  1575. die("Bad kernel stack pointer", regs, SIGABRT);
  1576. }
  1577. void __init trap_init(void)
  1578. {
  1579. }
  1580. #ifdef CONFIG_PPC_EMULATED_STATS
  1581. #define WARN_EMULATED_SETUP(type) .type = { .name = #type }
  1582. struct ppc_emulated ppc_emulated = {
  1583. #ifdef CONFIG_ALTIVEC
  1584. WARN_EMULATED_SETUP(altivec),
  1585. #endif
  1586. WARN_EMULATED_SETUP(dcba),
  1587. WARN_EMULATED_SETUP(dcbz),
  1588. WARN_EMULATED_SETUP(fp_pair),
  1589. WARN_EMULATED_SETUP(isel),
  1590. WARN_EMULATED_SETUP(mcrxr),
  1591. WARN_EMULATED_SETUP(mfpvr),
  1592. WARN_EMULATED_SETUP(multiple),
  1593. WARN_EMULATED_SETUP(popcntb),
  1594. WARN_EMULATED_SETUP(spe),
  1595. WARN_EMULATED_SETUP(string),
  1596. WARN_EMULATED_SETUP(sync),
  1597. WARN_EMULATED_SETUP(unaligned),
  1598. #ifdef CONFIG_MATH_EMULATION
  1599. WARN_EMULATED_SETUP(math),
  1600. #endif
  1601. #ifdef CONFIG_VSX
  1602. WARN_EMULATED_SETUP(vsx),
  1603. #endif
  1604. #ifdef CONFIG_PPC64
  1605. WARN_EMULATED_SETUP(mfdscr),
  1606. WARN_EMULATED_SETUP(mtdscr),
  1607. WARN_EMULATED_SETUP(lq_stq),
  1608. #endif
  1609. };
  1610. u32 ppc_warn_emulated;
  1611. void ppc_warn_emulated_print(const char *type)
  1612. {
  1613. pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
  1614. type);
  1615. }
  1616. static int __init ppc_warn_emulated_init(void)
  1617. {
  1618. struct dentry *dir, *d;
  1619. unsigned int i;
  1620. struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
  1621. if (!powerpc_debugfs_root)
  1622. return -ENODEV;
  1623. dir = debugfs_create_dir("emulated_instructions",
  1624. powerpc_debugfs_root);
  1625. if (!dir)
  1626. return -ENOMEM;
  1627. d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir,
  1628. &ppc_warn_emulated);
  1629. if (!d)
  1630. goto fail;
  1631. for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) {
  1632. d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir,
  1633. (u32 *)&entries[i].val.counter);
  1634. if (!d)
  1635. goto fail;
  1636. }
  1637. return 0;
  1638. fail:
  1639. debugfs_remove_recursive(dir);
  1640. return -ENOMEM;
  1641. }
  1642. device_initcall(ppc_warn_emulated_init);
  1643. #endif /* CONFIG_PPC_EMULATED_STATS */