traps.c 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208
  1. /*
  2. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  3. * Copyright 2007-2010 Freescale Semiconductor, Inc.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; either version
  8. * 2 of the License, or (at your option) any later version.
  9. *
  10. * Modified by Cort Dougan (cort@cs.nmt.edu)
  11. * and Paul Mackerras (paulus@samba.org)
  12. */
  13. /*
  14. * This file handles the architecture-dependent parts of hardware exceptions
  15. */
  16. #include <linux/errno.h>
  17. #include <linux/sched.h>
  18. #include <linux/sched/debug.h>
  19. #include <linux/kernel.h>
  20. #include <linux/mm.h>
  21. #include <linux/pkeys.h>
  22. #include <linux/stddef.h>
  23. #include <linux/unistd.h>
  24. #include <linux/ptrace.h>
  25. #include <linux/user.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/init.h>
  28. #include <linux/extable.h>
  29. #include <linux/module.h> /* print_modules */
  30. #include <linux/prctl.h>
  31. #include <linux/delay.h>
  32. #include <linux/kprobes.h>
  33. #include <linux/kexec.h>
  34. #include <linux/backlight.h>
  35. #include <linux/bug.h>
  36. #include <linux/kdebug.h>
  37. #include <linux/ratelimit.h>
  38. #include <linux/context_tracking.h>
  39. #include <linux/smp.h>
  40. #include <linux/console.h>
  41. #include <linux/kmsg_dump.h>
  42. #include <asm/emulated_ops.h>
  43. #include <asm/pgtable.h>
  44. #include <linux/uaccess.h>
  45. #include <asm/debugfs.h>
  46. #include <asm/io.h>
  47. #include <asm/machdep.h>
  48. #include <asm/rtas.h>
  49. #include <asm/pmc.h>
  50. #include <asm/reg.h>
  51. #ifdef CONFIG_PMAC_BACKLIGHT
  52. #include <asm/backlight.h>
  53. #endif
  54. #ifdef CONFIG_PPC64
  55. #include <asm/firmware.h>
  56. #include <asm/processor.h>
  57. #include <asm/tm.h>
  58. #endif
  59. #include <asm/kexec.h>
  60. #include <asm/ppc-opcode.h>
  61. #include <asm/rio.h>
  62. #include <asm/fadump.h>
  63. #include <asm/switch_to.h>
  64. #include <asm/tm.h>
  65. #include <asm/debug.h>
  66. #include <asm/asm-prototypes.h>
  67. #include <asm/hmi.h>
  68. #include <sysdev/fsl_pci.h>
  69. #include <asm/kprobes.h>
  70. #include <asm/stacktrace.h>
  71. #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
  72. int (*__debugger)(struct pt_regs *regs) __read_mostly;
  73. int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
  74. int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
  75. int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
  76. int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
  77. int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly;
  78. int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
  79. EXPORT_SYMBOL(__debugger);
  80. EXPORT_SYMBOL(__debugger_ipi);
  81. EXPORT_SYMBOL(__debugger_bpt);
  82. EXPORT_SYMBOL(__debugger_sstep);
  83. EXPORT_SYMBOL(__debugger_iabr_match);
  84. EXPORT_SYMBOL(__debugger_break_match);
  85. EXPORT_SYMBOL(__debugger_fault_handler);
  86. #endif
  87. /* Transactional Memory trap debug */
  88. #ifdef TM_DEBUG_SW
  89. #define TM_DEBUG(x...) printk(KERN_INFO x)
  90. #else
  91. #define TM_DEBUG(x...) do { } while(0)
  92. #endif
  93. static const char *signame(int signr)
  94. {
  95. switch (signr) {
  96. case SIGBUS: return "bus error";
  97. case SIGFPE: return "floating point exception";
  98. case SIGILL: return "illegal instruction";
  99. case SIGSEGV: return "segfault";
  100. case SIGTRAP: return "unhandled trap";
  101. }
  102. return "unknown signal";
  103. }
  104. /*
  105. * Trap & Exception support
  106. */
  107. #ifdef CONFIG_PMAC_BACKLIGHT
  108. static void pmac_backlight_unblank(void)
  109. {
  110. mutex_lock(&pmac_backlight_mutex);
  111. if (pmac_backlight) {
  112. struct backlight_properties *props;
  113. props = &pmac_backlight->props;
  114. props->brightness = props->max_brightness;
  115. props->power = FB_BLANK_UNBLANK;
  116. backlight_update_status(pmac_backlight);
  117. }
  118. mutex_unlock(&pmac_backlight_mutex);
  119. }
  120. #else
  121. static inline void pmac_backlight_unblank(void) { }
  122. #endif
  123. /*
  124. * If oops/die is expected to crash the machine, return true here.
  125. *
  126. * This should not be expected to be 100% accurate, there may be
  127. * notifiers registered or other unexpected conditions that may bring
  128. * down the kernel. Or if the current process in the kernel is holding
  129. * locks or has other critical state, the kernel may become effectively
  130. * unusable anyway.
  131. */
  132. bool die_will_crash(void)
  133. {
  134. if (should_fadump_crash())
  135. return true;
  136. if (kexec_should_crash(current))
  137. return true;
  138. if (in_interrupt() || panic_on_oops ||
  139. !current->pid || is_global_init(current))
  140. return true;
  141. return false;
  142. }
  143. static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
  144. static int die_owner = -1;
  145. static unsigned int die_nest_count;
  146. static int die_counter;
  147. extern void panic_flush_kmsg_start(void)
  148. {
  149. /*
  150. * These are mostly taken from kernel/panic.c, but tries to do
  151. * relatively minimal work. Don't use delay functions (TB may
  152. * be broken), don't crash dump (need to set a firmware log),
  153. * don't run notifiers. We do want to get some information to
  154. * Linux console.
  155. */
  156. console_verbose();
  157. bust_spinlocks(1);
  158. }
  159. extern void panic_flush_kmsg_end(void)
  160. {
  161. printk_safe_flush_on_panic();
  162. kmsg_dump(KMSG_DUMP_PANIC);
  163. bust_spinlocks(0);
  164. debug_locks_off();
  165. console_flush_on_panic();
  166. }
  167. static unsigned long oops_begin(struct pt_regs *regs)
  168. {
  169. int cpu;
  170. unsigned long flags;
  171. oops_enter();
  172. /* racy, but better than risking deadlock. */
  173. raw_local_irq_save(flags);
  174. cpu = smp_processor_id();
  175. if (!arch_spin_trylock(&die_lock)) {
  176. if (cpu == die_owner)
  177. /* nested oops. should stop eventually */;
  178. else
  179. arch_spin_lock(&die_lock);
  180. }
  181. die_nest_count++;
  182. die_owner = cpu;
  183. console_verbose();
  184. bust_spinlocks(1);
  185. if (machine_is(powermac))
  186. pmac_backlight_unblank();
  187. return flags;
  188. }
  189. NOKPROBE_SYMBOL(oops_begin);
  190. static void oops_end(unsigned long flags, struct pt_regs *regs,
  191. int signr)
  192. {
  193. bust_spinlocks(0);
  194. add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
  195. die_nest_count--;
  196. oops_exit();
  197. printk("\n");
  198. if (!die_nest_count) {
  199. /* Nest count reaches zero, release the lock. */
  200. die_owner = -1;
  201. arch_spin_unlock(&die_lock);
  202. }
  203. raw_local_irq_restore(flags);
  204. /*
  205. * system_reset_excption handles debugger, crash dump, panic, for 0x100
  206. */
  207. if (TRAP(regs) == 0x100)
  208. return;
  209. crash_fadump(regs, "die oops");
  210. if (kexec_should_crash(current))
  211. crash_kexec(regs);
  212. if (!signr)
  213. return;
  214. /*
  215. * While our oops output is serialised by a spinlock, output
  216. * from panic() called below can race and corrupt it. If we
  217. * know we are going to panic, delay for 1 second so we have a
  218. * chance to get clean backtraces from all CPUs that are oopsing.
  219. */
  220. if (in_interrupt() || panic_on_oops || !current->pid ||
  221. is_global_init(current)) {
  222. mdelay(MSEC_PER_SEC);
  223. }
  224. if (panic_on_oops)
  225. panic("Fatal exception");
  226. do_exit(signr);
  227. }
  228. NOKPROBE_SYMBOL(oops_end);
  229. static int __die(const char *str, struct pt_regs *regs, long err)
  230. {
  231. printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
  232. if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
  233. printk("LE ");
  234. else
  235. printk("BE ");
  236. if (IS_ENABLED(CONFIG_PREEMPT))
  237. pr_cont("PREEMPT ");
  238. if (IS_ENABLED(CONFIG_SMP))
  239. pr_cont("SMP NR_CPUS=%d ", NR_CPUS);
  240. if (debug_pagealloc_enabled())
  241. pr_cont("DEBUG_PAGEALLOC ");
  242. if (IS_ENABLED(CONFIG_NUMA))
  243. pr_cont("NUMA ");
  244. pr_cont("%s\n", ppc_md.name ? ppc_md.name : "");
  245. if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
  246. return 1;
  247. print_modules();
  248. show_regs(regs);
  249. return 0;
  250. }
  251. NOKPROBE_SYMBOL(__die);
  252. void die(const char *str, struct pt_regs *regs, long err)
  253. {
  254. unsigned long flags;
  255. /*
  256. * system_reset_excption handles debugger, crash dump, panic, for 0x100
  257. */
  258. if (TRAP(regs) != 0x100) {
  259. if (debugger(regs))
  260. return;
  261. }
  262. flags = oops_begin(regs);
  263. if (__die(str, regs, err))
  264. err = 0;
  265. oops_end(flags, regs, err);
  266. }
  267. NOKPROBE_SYMBOL(die);
  268. void user_single_step_report(struct pt_regs *regs)
  269. {
  270. force_sig_fault(SIGTRAP, TRAP_TRACE, (void __user *)regs->nip, current);
  271. }
  272. static void show_signal_msg(int signr, struct pt_regs *regs, int code,
  273. unsigned long addr)
  274. {
  275. static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
  276. DEFAULT_RATELIMIT_BURST);
  277. if (!show_unhandled_signals)
  278. return;
  279. if (!unhandled_signal(current, signr))
  280. return;
  281. if (!__ratelimit(&rs))
  282. return;
  283. pr_info("%s[%d]: %s (%d) at %lx nip %lx lr %lx code %x",
  284. current->comm, current->pid, signame(signr), signr,
  285. addr, regs->nip, regs->link, code);
  286. print_vma_addr(KERN_CONT " in ", regs->nip);
  287. pr_cont("\n");
  288. show_user_instructions(regs);
  289. }
  290. static bool exception_common(int signr, struct pt_regs *regs, int code,
  291. unsigned long addr)
  292. {
  293. if (!user_mode(regs)) {
  294. die("Exception in kernel mode", regs, signr);
  295. return false;
  296. }
  297. show_signal_msg(signr, regs, code, addr);
  298. if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
  299. local_irq_enable();
  300. current->thread.trap_nr = code;
  301. /*
  302. * Save all the pkey registers AMR/IAMR/UAMOR. Eg: Core dumps need
  303. * to capture the content, if the task gets killed.
  304. */
  305. thread_pkey_regs_save(&current->thread);
  306. return true;
  307. }
  308. void _exception_pkey(struct pt_regs *regs, unsigned long addr, int key)
  309. {
  310. if (!exception_common(SIGSEGV, regs, SEGV_PKUERR, addr))
  311. return;
  312. force_sig_pkuerr((void __user *) addr, key);
  313. }
  314. void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
  315. {
  316. if (!exception_common(signr, regs, code, addr))
  317. return;
  318. force_sig_fault(signr, code, (void __user *)addr, current);
  319. }
  320. void system_reset_exception(struct pt_regs *regs)
  321. {
  322. /*
  323. * Avoid crashes in case of nested NMI exceptions. Recoverability
  324. * is determined by RI and in_nmi
  325. */
  326. bool nested = in_nmi();
  327. if (!nested)
  328. nmi_enter();
  329. __this_cpu_inc(irq_stat.sreset_irqs);
  330. /* See if any machine dependent calls */
  331. if (ppc_md.system_reset_exception) {
  332. if (ppc_md.system_reset_exception(regs))
  333. goto out;
  334. }
  335. if (debugger(regs))
  336. goto out;
  337. /*
  338. * A system reset is a request to dump, so we always send
  339. * it through the crashdump code (if fadump or kdump are
  340. * registered).
  341. */
  342. crash_fadump(regs, "System Reset");
  343. crash_kexec(regs);
  344. /*
  345. * We aren't the primary crash CPU. We need to send it
  346. * to a holding pattern to avoid it ending up in the panic
  347. * code.
  348. */
  349. crash_kexec_secondary(regs);
  350. /*
  351. * No debugger or crash dump registered, print logs then
  352. * panic.
  353. */
  354. die("System Reset", regs, SIGABRT);
  355. mdelay(2*MSEC_PER_SEC); /* Wait a little while for others to print */
  356. add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
  357. nmi_panic(regs, "System Reset");
  358. out:
  359. #ifdef CONFIG_PPC_BOOK3S_64
  360. BUG_ON(get_paca()->in_nmi == 0);
  361. if (get_paca()->in_nmi > 1)
  362. nmi_panic(regs, "Unrecoverable nested System Reset");
  363. #endif
  364. /* Must die if the interrupt is not recoverable */
  365. if (!(regs->msr & MSR_RI))
  366. nmi_panic(regs, "Unrecoverable System Reset");
  367. if (!nested)
  368. nmi_exit();
  369. /* What should we do here? We could issue a shutdown or hard reset. */
  370. }
  371. /*
  372. * I/O accesses can cause machine checks on powermacs.
  373. * Check if the NIP corresponds to the address of a sync
  374. * instruction for which there is an entry in the exception
  375. * table.
  376. * Note that the 601 only takes a machine check on TEA
  377. * (transfer error ack) signal assertion, and does not
  378. * set any of the top 16 bits of SRR1.
  379. * -- paulus.
  380. */
  381. static inline int check_io_access(struct pt_regs *regs)
  382. {
  383. #ifdef CONFIG_PPC32
  384. unsigned long msr = regs->msr;
  385. const struct exception_table_entry *entry;
  386. unsigned int *nip = (unsigned int *)regs->nip;
  387. if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
  388. && (entry = search_exception_tables(regs->nip)) != NULL) {
  389. /*
  390. * Check that it's a sync instruction, or somewhere
  391. * in the twi; isync; nop sequence that inb/inw/inl uses.
  392. * As the address is in the exception table
  393. * we should be able to read the instr there.
  394. * For the debug message, we look at the preceding
  395. * load or store.
  396. */
  397. if (*nip == PPC_INST_NOP)
  398. nip -= 2;
  399. else if (*nip == PPC_INST_ISYNC)
  400. --nip;
  401. if (*nip == PPC_INST_SYNC || (*nip >> 26) == OP_TRAP) {
  402. unsigned int rb;
  403. --nip;
  404. rb = (*nip >> 11) & 0x1f;
  405. printk(KERN_DEBUG "%s bad port %lx at %p\n",
  406. (*nip & 0x100)? "OUT to": "IN from",
  407. regs->gpr[rb] - _IO_BASE, nip);
  408. regs->msr |= MSR_RI;
  409. regs->nip = extable_fixup(entry);
  410. return 1;
  411. }
  412. }
  413. #endif /* CONFIG_PPC32 */
  414. return 0;
  415. }
  416. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  417. /* On 4xx, the reason for the machine check or program exception
  418. is in the ESR. */
  419. #define get_reason(regs) ((regs)->dsisr)
  420. #define REASON_FP ESR_FP
  421. #define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
  422. #define REASON_PRIVILEGED ESR_PPR
  423. #define REASON_TRAP ESR_PTR
  424. /* single-step stuff */
  425. #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC)
  426. #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC)
  427. #define clear_br_trace(regs) do {} while(0)
  428. #else
  429. /* On non-4xx, the reason for the machine check or program
  430. exception is in the MSR. */
  431. #define get_reason(regs) ((regs)->msr)
  432. #define REASON_TM SRR1_PROGTM
  433. #define REASON_FP SRR1_PROGFPE
  434. #define REASON_ILLEGAL SRR1_PROGILL
  435. #define REASON_PRIVILEGED SRR1_PROGPRIV
  436. #define REASON_TRAP SRR1_PROGTRAP
  437. #define single_stepping(regs) ((regs)->msr & MSR_SE)
  438. #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
  439. #define clear_br_trace(regs) ((regs)->msr &= ~MSR_BE)
  440. #endif
  441. #if defined(CONFIG_E500)
  442. int machine_check_e500mc(struct pt_regs *regs)
  443. {
  444. unsigned long mcsr = mfspr(SPRN_MCSR);
  445. unsigned long pvr = mfspr(SPRN_PVR);
  446. unsigned long reason = mcsr;
  447. int recoverable = 1;
  448. if (reason & MCSR_LD) {
  449. recoverable = fsl_rio_mcheck_exception(regs);
  450. if (recoverable == 1)
  451. goto silent_out;
  452. }
  453. printk("Machine check in kernel mode.\n");
  454. printk("Caused by (from MCSR=%lx): ", reason);
  455. if (reason & MCSR_MCP)
  456. pr_cont("Machine Check Signal\n");
  457. if (reason & MCSR_ICPERR) {
  458. pr_cont("Instruction Cache Parity Error\n");
  459. /*
  460. * This is recoverable by invalidating the i-cache.
  461. */
  462. mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
  463. while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
  464. ;
  465. /*
  466. * This will generally be accompanied by an instruction
  467. * fetch error report -- only treat MCSR_IF as fatal
  468. * if it wasn't due to an L1 parity error.
  469. */
  470. reason &= ~MCSR_IF;
  471. }
  472. if (reason & MCSR_DCPERR_MC) {
  473. pr_cont("Data Cache Parity Error\n");
  474. /*
  475. * In write shadow mode we auto-recover from the error, but it
  476. * may still get logged and cause a machine check. We should
  477. * only treat the non-write shadow case as non-recoverable.
  478. */
  479. /* On e6500 core, L1 DCWS (Data cache write shadow mode) bit
  480. * is not implemented but L1 data cache always runs in write
  481. * shadow mode. Hence on data cache parity errors HW will
  482. * automatically invalidate the L1 Data Cache.
  483. */
  484. if (PVR_VER(pvr) != PVR_VER_E6500) {
  485. if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
  486. recoverable = 0;
  487. }
  488. }
  489. if (reason & MCSR_L2MMU_MHIT) {
  490. pr_cont("Hit on multiple TLB entries\n");
  491. recoverable = 0;
  492. }
  493. if (reason & MCSR_NMI)
  494. pr_cont("Non-maskable interrupt\n");
  495. if (reason & MCSR_IF) {
  496. pr_cont("Instruction Fetch Error Report\n");
  497. recoverable = 0;
  498. }
  499. if (reason & MCSR_LD) {
  500. pr_cont("Load Error Report\n");
  501. recoverable = 0;
  502. }
  503. if (reason & MCSR_ST) {
  504. pr_cont("Store Error Report\n");
  505. recoverable = 0;
  506. }
  507. if (reason & MCSR_LDG) {
  508. pr_cont("Guarded Load Error Report\n");
  509. recoverable = 0;
  510. }
  511. if (reason & MCSR_TLBSYNC)
  512. pr_cont("Simultaneous tlbsync operations\n");
  513. if (reason & MCSR_BSL2_ERR) {
  514. pr_cont("Level 2 Cache Error\n");
  515. recoverable = 0;
  516. }
  517. if (reason & MCSR_MAV) {
  518. u64 addr;
  519. addr = mfspr(SPRN_MCAR);
  520. addr |= (u64)mfspr(SPRN_MCARU) << 32;
  521. pr_cont("Machine Check %s Address: %#llx\n",
  522. reason & MCSR_MEA ? "Effective" : "Physical", addr);
  523. }
  524. silent_out:
  525. mtspr(SPRN_MCSR, mcsr);
  526. return mfspr(SPRN_MCSR) == 0 && recoverable;
  527. }
  528. int machine_check_e500(struct pt_regs *regs)
  529. {
  530. unsigned long reason = mfspr(SPRN_MCSR);
  531. if (reason & MCSR_BUS_RBERR) {
  532. if (fsl_rio_mcheck_exception(regs))
  533. return 1;
  534. if (fsl_pci_mcheck_exception(regs))
  535. return 1;
  536. }
  537. printk("Machine check in kernel mode.\n");
  538. printk("Caused by (from MCSR=%lx): ", reason);
  539. if (reason & MCSR_MCP)
  540. pr_cont("Machine Check Signal\n");
  541. if (reason & MCSR_ICPERR)
  542. pr_cont("Instruction Cache Parity Error\n");
  543. if (reason & MCSR_DCP_PERR)
  544. pr_cont("Data Cache Push Parity Error\n");
  545. if (reason & MCSR_DCPERR)
  546. pr_cont("Data Cache Parity Error\n");
  547. if (reason & MCSR_BUS_IAERR)
  548. pr_cont("Bus - Instruction Address Error\n");
  549. if (reason & MCSR_BUS_RAERR)
  550. pr_cont("Bus - Read Address Error\n");
  551. if (reason & MCSR_BUS_WAERR)
  552. pr_cont("Bus - Write Address Error\n");
  553. if (reason & MCSR_BUS_IBERR)
  554. pr_cont("Bus - Instruction Data Error\n");
  555. if (reason & MCSR_BUS_RBERR)
  556. pr_cont("Bus - Read Data Bus Error\n");
  557. if (reason & MCSR_BUS_WBERR)
  558. pr_cont("Bus - Write Data Bus Error\n");
  559. if (reason & MCSR_BUS_IPERR)
  560. pr_cont("Bus - Instruction Parity Error\n");
  561. if (reason & MCSR_BUS_RPERR)
  562. pr_cont("Bus - Read Parity Error\n");
  563. return 0;
  564. }
  565. int machine_check_generic(struct pt_regs *regs)
  566. {
  567. return 0;
  568. }
  569. #elif defined(CONFIG_E200)
  570. int machine_check_e200(struct pt_regs *regs)
  571. {
  572. unsigned long reason = mfspr(SPRN_MCSR);
  573. printk("Machine check in kernel mode.\n");
  574. printk("Caused by (from MCSR=%lx): ", reason);
  575. if (reason & MCSR_MCP)
  576. pr_cont("Machine Check Signal\n");
  577. if (reason & MCSR_CP_PERR)
  578. pr_cont("Cache Push Parity Error\n");
  579. if (reason & MCSR_CPERR)
  580. pr_cont("Cache Parity Error\n");
  581. if (reason & MCSR_EXCP_ERR)
  582. pr_cont("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
  583. if (reason & MCSR_BUS_IRERR)
  584. pr_cont("Bus - Read Bus Error on instruction fetch\n");
  585. if (reason & MCSR_BUS_DRERR)
  586. pr_cont("Bus - Read Bus Error on data load\n");
  587. if (reason & MCSR_BUS_WRERR)
  588. pr_cont("Bus - Write Bus Error on buffered store or cache line push\n");
  589. return 0;
  590. }
  591. #elif defined(CONFIG_PPC32)
  592. int machine_check_generic(struct pt_regs *regs)
  593. {
  594. unsigned long reason = regs->msr;
  595. printk("Machine check in kernel mode.\n");
  596. printk("Caused by (from SRR1=%lx): ", reason);
  597. switch (reason & 0x601F0000) {
  598. case 0x80000:
  599. pr_cont("Machine check signal\n");
  600. break;
  601. case 0: /* for 601 */
  602. case 0x40000:
  603. case 0x140000: /* 7450 MSS error and TEA */
  604. pr_cont("Transfer error ack signal\n");
  605. break;
  606. case 0x20000:
  607. pr_cont("Data parity error signal\n");
  608. break;
  609. case 0x10000:
  610. pr_cont("Address parity error signal\n");
  611. break;
  612. case 0x20000000:
  613. pr_cont("L1 Data Cache error\n");
  614. break;
  615. case 0x40000000:
  616. pr_cont("L1 Instruction Cache error\n");
  617. break;
  618. case 0x00100000:
  619. pr_cont("L2 data cache parity error\n");
  620. break;
  621. default:
  622. pr_cont("Unknown values in msr\n");
  623. }
  624. return 0;
  625. }
  626. #endif /* everything else */
  627. void machine_check_exception(struct pt_regs *regs)
  628. {
  629. int recover = 0;
  630. bool nested = in_nmi();
  631. if (!nested)
  632. nmi_enter();
  633. __this_cpu_inc(irq_stat.mce_exceptions);
  634. add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
  635. /* See if any machine dependent calls. In theory, we would want
  636. * to call the CPU first, and call the ppc_md. one if the CPU
  637. * one returns a positive number. However there is existing code
  638. * that assumes the board gets a first chance, so let's keep it
  639. * that way for now and fix things later. --BenH.
  640. */
  641. if (ppc_md.machine_check_exception)
  642. recover = ppc_md.machine_check_exception(regs);
  643. else if (cur_cpu_spec->machine_check)
  644. recover = cur_cpu_spec->machine_check(regs);
  645. if (recover > 0)
  646. goto bail;
  647. if (debugger_fault_handler(regs))
  648. goto bail;
  649. if (check_io_access(regs))
  650. goto bail;
  651. /* Must die if the interrupt is not recoverable */
  652. if (!(regs->msr & MSR_RI))
  653. nmi_panic(regs, "Unrecoverable Machine check");
  654. if (!nested)
  655. nmi_exit();
  656. die("Machine check", regs, SIGBUS);
  657. return;
  658. bail:
  659. if (!nested)
  660. nmi_exit();
  661. }
  662. void SMIException(struct pt_regs *regs)
  663. {
  664. die("System Management Interrupt", regs, SIGABRT);
  665. }
  666. #ifdef CONFIG_VSX
  667. static void p9_hmi_special_emu(struct pt_regs *regs)
  668. {
  669. unsigned int ra, rb, t, i, sel, instr, rc;
  670. const void __user *addr;
  671. u8 vbuf[16], *vdst;
  672. unsigned long ea, msr, msr_mask;
  673. bool swap;
  674. if (__get_user_inatomic(instr, (unsigned int __user *)regs->nip))
  675. return;
  676. /*
  677. * lxvb16x opcode: 0x7c0006d8
  678. * lxvd2x opcode: 0x7c000698
  679. * lxvh8x opcode: 0x7c000658
  680. * lxvw4x opcode: 0x7c000618
  681. */
  682. if ((instr & 0xfc00073e) != 0x7c000618) {
  683. pr_devel("HMI vec emu: not vector CI %i:%s[%d] nip=%016lx"
  684. " instr=%08x\n",
  685. smp_processor_id(), current->comm, current->pid,
  686. regs->nip, instr);
  687. return;
  688. }
  689. /* Grab vector registers into the task struct */
  690. msr = regs->msr; /* Grab msr before we flush the bits */
  691. flush_vsx_to_thread(current);
  692. enable_kernel_altivec();
  693. /*
  694. * Is userspace running with a different endian (this is rare but
  695. * not impossible)
  696. */
  697. swap = (msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
  698. /* Decode the instruction */
  699. ra = (instr >> 16) & 0x1f;
  700. rb = (instr >> 11) & 0x1f;
  701. t = (instr >> 21) & 0x1f;
  702. if (instr & 1)
  703. vdst = (u8 *)&current->thread.vr_state.vr[t];
  704. else
  705. vdst = (u8 *)&current->thread.fp_state.fpr[t][0];
  706. /* Grab the vector address */
  707. ea = regs->gpr[rb] + (ra ? regs->gpr[ra] : 0);
  708. if (is_32bit_task())
  709. ea &= 0xfffffffful;
  710. addr = (__force const void __user *)ea;
  711. /* Check it */
  712. if (!access_ok(VERIFY_READ, addr, 16)) {
  713. pr_devel("HMI vec emu: bad access %i:%s[%d] nip=%016lx"
  714. " instr=%08x addr=%016lx\n",
  715. smp_processor_id(), current->comm, current->pid,
  716. regs->nip, instr, (unsigned long)addr);
  717. return;
  718. }
  719. /* Read the vector */
  720. rc = 0;
  721. if ((unsigned long)addr & 0xfUL)
  722. /* unaligned case */
  723. rc = __copy_from_user_inatomic(vbuf, addr, 16);
  724. else
  725. __get_user_atomic_128_aligned(vbuf, addr, rc);
  726. if (rc) {
  727. pr_devel("HMI vec emu: page fault %i:%s[%d] nip=%016lx"
  728. " instr=%08x addr=%016lx\n",
  729. smp_processor_id(), current->comm, current->pid,
  730. regs->nip, instr, (unsigned long)addr);
  731. return;
  732. }
  733. pr_devel("HMI vec emu: emulated vector CI %i:%s[%d] nip=%016lx"
  734. " instr=%08x addr=%016lx\n",
  735. smp_processor_id(), current->comm, current->pid, regs->nip,
  736. instr, (unsigned long) addr);
  737. /* Grab instruction "selector" */
  738. sel = (instr >> 6) & 3;
  739. /*
  740. * Check to make sure the facility is actually enabled. This
  741. * could happen if we get a false positive hit.
  742. *
  743. * lxvd2x/lxvw4x always check MSR VSX sel = 0,2
  744. * lxvh8x/lxvb16x check MSR VSX or VEC depending on VSR used sel = 1,3
  745. */
  746. msr_mask = MSR_VSX;
  747. if ((sel & 1) && (instr & 1)) /* lxvh8x & lxvb16x + VSR >= 32 */
  748. msr_mask = MSR_VEC;
  749. if (!(msr & msr_mask)) {
  750. pr_devel("HMI vec emu: MSR fac clear %i:%s[%d] nip=%016lx"
  751. " instr=%08x msr:%016lx\n",
  752. smp_processor_id(), current->comm, current->pid,
  753. regs->nip, instr, msr);
  754. return;
  755. }
  756. /* Do logging here before we modify sel based on endian */
  757. switch (sel) {
  758. case 0: /* lxvw4x */
  759. PPC_WARN_EMULATED(lxvw4x, regs);
  760. break;
  761. case 1: /* lxvh8x */
  762. PPC_WARN_EMULATED(lxvh8x, regs);
  763. break;
  764. case 2: /* lxvd2x */
  765. PPC_WARN_EMULATED(lxvd2x, regs);
  766. break;
  767. case 3: /* lxvb16x */
  768. PPC_WARN_EMULATED(lxvb16x, regs);
  769. break;
  770. }
  771. #ifdef __LITTLE_ENDIAN__
  772. /*
  773. * An LE kernel stores the vector in the task struct as an LE
  774. * byte array (effectively swapping both the components and
  775. * the content of the components). Those instructions expect
  776. * the components to remain in ascending address order, so we
  777. * swap them back.
  778. *
  779. * If we are running a BE user space, the expectation is that
  780. * of a simple memcpy, so forcing the emulation to look like
  781. * a lxvb16x should do the trick.
  782. */
  783. if (swap)
  784. sel = 3;
  785. switch (sel) {
  786. case 0: /* lxvw4x */
  787. for (i = 0; i < 4; i++)
  788. ((u32 *)vdst)[i] = ((u32 *)vbuf)[3-i];
  789. break;
  790. case 1: /* lxvh8x */
  791. for (i = 0; i < 8; i++)
  792. ((u16 *)vdst)[i] = ((u16 *)vbuf)[7-i];
  793. break;
  794. case 2: /* lxvd2x */
  795. for (i = 0; i < 2; i++)
  796. ((u64 *)vdst)[i] = ((u64 *)vbuf)[1-i];
  797. break;
  798. case 3: /* lxvb16x */
  799. for (i = 0; i < 16; i++)
  800. vdst[i] = vbuf[15-i];
  801. break;
  802. }
  803. #else /* __LITTLE_ENDIAN__ */
  804. /* On a big endian kernel, a BE userspace only needs a memcpy */
  805. if (!swap)
  806. sel = 3;
  807. /* Otherwise, we need to swap the content of the components */
  808. switch (sel) {
  809. case 0: /* lxvw4x */
  810. for (i = 0; i < 4; i++)
  811. ((u32 *)vdst)[i] = cpu_to_le32(((u32 *)vbuf)[i]);
  812. break;
  813. case 1: /* lxvh8x */
  814. for (i = 0; i < 8; i++)
  815. ((u16 *)vdst)[i] = cpu_to_le16(((u16 *)vbuf)[i]);
  816. break;
  817. case 2: /* lxvd2x */
  818. for (i = 0; i < 2; i++)
  819. ((u64 *)vdst)[i] = cpu_to_le64(((u64 *)vbuf)[i]);
  820. break;
  821. case 3: /* lxvb16x */
  822. memcpy(vdst, vbuf, 16);
  823. break;
  824. }
  825. #endif /* !__LITTLE_ENDIAN__ */
  826. /* Go to next instruction */
  827. regs->nip += 4;
  828. }
  829. #endif /* CONFIG_VSX */
  830. void handle_hmi_exception(struct pt_regs *regs)
  831. {
  832. struct pt_regs *old_regs;
  833. old_regs = set_irq_regs(regs);
  834. irq_enter();
  835. #ifdef CONFIG_VSX
  836. /* Real mode flagged P9 special emu is needed */
  837. if (local_paca->hmi_p9_special_emu) {
  838. local_paca->hmi_p9_special_emu = 0;
  839. /*
  840. * We don't want to take page faults while doing the
  841. * emulation, we just replay the instruction if necessary.
  842. */
  843. pagefault_disable();
  844. p9_hmi_special_emu(regs);
  845. pagefault_enable();
  846. }
  847. #endif /* CONFIG_VSX */
  848. if (ppc_md.handle_hmi_exception)
  849. ppc_md.handle_hmi_exception(regs);
  850. irq_exit();
  851. set_irq_regs(old_regs);
  852. }
  853. void unknown_exception(struct pt_regs *regs)
  854. {
  855. enum ctx_state prev_state = exception_enter();
  856. printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
  857. regs->nip, regs->msr, regs->trap);
  858. _exception(SIGTRAP, regs, TRAP_UNK, 0);
  859. exception_exit(prev_state);
  860. }
  861. void instruction_breakpoint_exception(struct pt_regs *regs)
  862. {
  863. enum ctx_state prev_state = exception_enter();
  864. if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
  865. 5, SIGTRAP) == NOTIFY_STOP)
  866. goto bail;
  867. if (debugger_iabr_match(regs))
  868. goto bail;
  869. _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
  870. bail:
  871. exception_exit(prev_state);
  872. }
  873. void RunModeException(struct pt_regs *regs)
  874. {
  875. _exception(SIGTRAP, regs, TRAP_UNK, 0);
  876. }
  877. void single_step_exception(struct pt_regs *regs)
  878. {
  879. enum ctx_state prev_state = exception_enter();
  880. clear_single_step(regs);
  881. clear_br_trace(regs);
  882. if (kprobe_post_handler(regs))
  883. return;
  884. if (notify_die(DIE_SSTEP, "single_step", regs, 5,
  885. 5, SIGTRAP) == NOTIFY_STOP)
  886. goto bail;
  887. if (debugger_sstep(regs))
  888. goto bail;
  889. _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
  890. bail:
  891. exception_exit(prev_state);
  892. }
  893. NOKPROBE_SYMBOL(single_step_exception);
  894. /*
  895. * After we have successfully emulated an instruction, we have to
  896. * check if the instruction was being single-stepped, and if so,
  897. * pretend we got a single-step exception. This was pointed out
  898. * by Kumar Gala. -- paulus
  899. */
  900. static void emulate_single_step(struct pt_regs *regs)
  901. {
  902. if (single_stepping(regs))
  903. single_step_exception(regs);
  904. }
  905. static inline int __parse_fpscr(unsigned long fpscr)
  906. {
  907. int ret = FPE_FLTUNK;
  908. /* Invalid operation */
  909. if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
  910. ret = FPE_FLTINV;
  911. /* Overflow */
  912. else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
  913. ret = FPE_FLTOVF;
  914. /* Underflow */
  915. else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
  916. ret = FPE_FLTUND;
  917. /* Divide by zero */
  918. else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
  919. ret = FPE_FLTDIV;
  920. /* Inexact result */
  921. else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
  922. ret = FPE_FLTRES;
  923. return ret;
  924. }
  925. static void parse_fpe(struct pt_regs *regs)
  926. {
  927. int code = 0;
  928. flush_fp_to_thread(current);
  929. code = __parse_fpscr(current->thread.fp_state.fpscr);
  930. _exception(SIGFPE, regs, code, regs->nip);
  931. }
  932. /*
  933. * Illegal instruction emulation support. Originally written to
  934. * provide the PVR to user applications using the mfspr rd, PVR.
  935. * Return non-zero if we can't emulate, or -EFAULT if the associated
  936. * memory access caused an access fault. Return zero on success.
  937. *
  938. * There are a couple of ways to do this, either "decode" the instruction
  939. * or directly match lots of bits. In this case, matching lots of
  940. * bits is faster and easier.
  941. *
  942. */
  943. static int emulate_string_inst(struct pt_regs *regs, u32 instword)
  944. {
  945. u8 rT = (instword >> 21) & 0x1f;
  946. u8 rA = (instword >> 16) & 0x1f;
  947. u8 NB_RB = (instword >> 11) & 0x1f;
  948. u32 num_bytes;
  949. unsigned long EA;
  950. int pos = 0;
  951. /* Early out if we are an invalid form of lswx */
  952. if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
  953. if ((rT == rA) || (rT == NB_RB))
  954. return -EINVAL;
  955. EA = (rA == 0) ? 0 : regs->gpr[rA];
  956. switch (instword & PPC_INST_STRING_MASK) {
  957. case PPC_INST_LSWX:
  958. case PPC_INST_STSWX:
  959. EA += NB_RB;
  960. num_bytes = regs->xer & 0x7f;
  961. break;
  962. case PPC_INST_LSWI:
  963. case PPC_INST_STSWI:
  964. num_bytes = (NB_RB == 0) ? 32 : NB_RB;
  965. break;
  966. default:
  967. return -EINVAL;
  968. }
  969. while (num_bytes != 0)
  970. {
  971. u8 val;
  972. u32 shift = 8 * (3 - (pos & 0x3));
  973. /* if process is 32-bit, clear upper 32 bits of EA */
  974. if ((regs->msr & MSR_64BIT) == 0)
  975. EA &= 0xFFFFFFFF;
  976. switch ((instword & PPC_INST_STRING_MASK)) {
  977. case PPC_INST_LSWX:
  978. case PPC_INST_LSWI:
  979. if (get_user(val, (u8 __user *)EA))
  980. return -EFAULT;
  981. /* first time updating this reg,
  982. * zero it out */
  983. if (pos == 0)
  984. regs->gpr[rT] = 0;
  985. regs->gpr[rT] |= val << shift;
  986. break;
  987. case PPC_INST_STSWI:
  988. case PPC_INST_STSWX:
  989. val = regs->gpr[rT] >> shift;
  990. if (put_user(val, (u8 __user *)EA))
  991. return -EFAULT;
  992. break;
  993. }
  994. /* move EA to next address */
  995. EA += 1;
  996. num_bytes--;
  997. /* manage our position within the register */
  998. if (++pos == 4) {
  999. pos = 0;
  1000. if (++rT == 32)
  1001. rT = 0;
  1002. }
  1003. }
  1004. return 0;
  1005. }
  1006. static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
  1007. {
  1008. u32 ra,rs;
  1009. unsigned long tmp;
  1010. ra = (instword >> 16) & 0x1f;
  1011. rs = (instword >> 21) & 0x1f;
  1012. tmp = regs->gpr[rs];
  1013. tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
  1014. tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
  1015. tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
  1016. regs->gpr[ra] = tmp;
  1017. return 0;
  1018. }
  1019. static int emulate_isel(struct pt_regs *regs, u32 instword)
  1020. {
  1021. u8 rT = (instword >> 21) & 0x1f;
  1022. u8 rA = (instword >> 16) & 0x1f;
  1023. u8 rB = (instword >> 11) & 0x1f;
  1024. u8 BC = (instword >> 6) & 0x1f;
  1025. u8 bit;
  1026. unsigned long tmp;
  1027. tmp = (rA == 0) ? 0 : regs->gpr[rA];
  1028. bit = (regs->ccr >> (31 - BC)) & 0x1;
  1029. regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
  1030. return 0;
  1031. }
  1032. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  1033. static inline bool tm_abort_check(struct pt_regs *regs, int cause)
  1034. {
  1035. /* If we're emulating a load/store in an active transaction, we cannot
  1036. * emulate it as the kernel operates in transaction suspended context.
  1037. * We need to abort the transaction. This creates a persistent TM
  1038. * abort so tell the user what caused it with a new code.
  1039. */
  1040. if (MSR_TM_TRANSACTIONAL(regs->msr)) {
  1041. tm_enable();
  1042. tm_abort(cause);
  1043. return true;
  1044. }
  1045. return false;
  1046. }
  1047. #else
  1048. static inline bool tm_abort_check(struct pt_regs *regs, int reason)
  1049. {
  1050. return false;
  1051. }
  1052. #endif
  1053. static int emulate_instruction(struct pt_regs *regs)
  1054. {
  1055. u32 instword;
  1056. u32 rd;
  1057. if (!user_mode(regs))
  1058. return -EINVAL;
  1059. CHECK_FULL_REGS(regs);
  1060. if (get_user(instword, (u32 __user *)(regs->nip)))
  1061. return -EFAULT;
  1062. /* Emulate the mfspr rD, PVR. */
  1063. if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
  1064. PPC_WARN_EMULATED(mfpvr, regs);
  1065. rd = (instword >> 21) & 0x1f;
  1066. regs->gpr[rd] = mfspr(SPRN_PVR);
  1067. return 0;
  1068. }
  1069. /* Emulating the dcba insn is just a no-op. */
  1070. if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
  1071. PPC_WARN_EMULATED(dcba, regs);
  1072. return 0;
  1073. }
  1074. /* Emulate the mcrxr insn. */
  1075. if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
  1076. int shift = (instword >> 21) & 0x1c;
  1077. unsigned long msk = 0xf0000000UL >> shift;
  1078. PPC_WARN_EMULATED(mcrxr, regs);
  1079. regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
  1080. regs->xer &= ~0xf0000000UL;
  1081. return 0;
  1082. }
  1083. /* Emulate load/store string insn. */
  1084. if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
  1085. if (tm_abort_check(regs,
  1086. TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
  1087. return -EINVAL;
  1088. PPC_WARN_EMULATED(string, regs);
  1089. return emulate_string_inst(regs, instword);
  1090. }
  1091. /* Emulate the popcntb (Population Count Bytes) instruction. */
  1092. if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
  1093. PPC_WARN_EMULATED(popcntb, regs);
  1094. return emulate_popcntb_inst(regs, instword);
  1095. }
  1096. /* Emulate isel (Integer Select) instruction */
  1097. if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
  1098. PPC_WARN_EMULATED(isel, regs);
  1099. return emulate_isel(regs, instword);
  1100. }
  1101. /* Emulate sync instruction variants */
  1102. if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) {
  1103. PPC_WARN_EMULATED(sync, regs);
  1104. asm volatile("sync");
  1105. return 0;
  1106. }
  1107. #ifdef CONFIG_PPC64
  1108. /* Emulate the mfspr rD, DSCR. */
  1109. if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
  1110. PPC_INST_MFSPR_DSCR_USER) ||
  1111. ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
  1112. PPC_INST_MFSPR_DSCR)) &&
  1113. cpu_has_feature(CPU_FTR_DSCR)) {
  1114. PPC_WARN_EMULATED(mfdscr, regs);
  1115. rd = (instword >> 21) & 0x1f;
  1116. regs->gpr[rd] = mfspr(SPRN_DSCR);
  1117. return 0;
  1118. }
  1119. /* Emulate the mtspr DSCR, rD. */
  1120. if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
  1121. PPC_INST_MTSPR_DSCR_USER) ||
  1122. ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
  1123. PPC_INST_MTSPR_DSCR)) &&
  1124. cpu_has_feature(CPU_FTR_DSCR)) {
  1125. PPC_WARN_EMULATED(mtdscr, regs);
  1126. rd = (instword >> 21) & 0x1f;
  1127. current->thread.dscr = regs->gpr[rd];
  1128. current->thread.dscr_inherit = 1;
  1129. mtspr(SPRN_DSCR, current->thread.dscr);
  1130. return 0;
  1131. }
  1132. #endif
  1133. return -EINVAL;
  1134. }
  1135. int is_valid_bugaddr(unsigned long addr)
  1136. {
  1137. return is_kernel_addr(addr);
  1138. }
  1139. #ifdef CONFIG_MATH_EMULATION
  1140. static int emulate_math(struct pt_regs *regs)
  1141. {
  1142. int ret;
  1143. extern int do_mathemu(struct pt_regs *regs);
  1144. ret = do_mathemu(regs);
  1145. if (ret >= 0)
  1146. PPC_WARN_EMULATED(math, regs);
  1147. switch (ret) {
  1148. case 0:
  1149. emulate_single_step(regs);
  1150. return 0;
  1151. case 1: {
  1152. int code = 0;
  1153. code = __parse_fpscr(current->thread.fp_state.fpscr);
  1154. _exception(SIGFPE, regs, code, regs->nip);
  1155. return 0;
  1156. }
  1157. case -EFAULT:
  1158. _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
  1159. return 0;
  1160. }
  1161. return -1;
  1162. }
  1163. #else
  1164. static inline int emulate_math(struct pt_regs *regs) { return -1; }
  1165. #endif
  1166. void program_check_exception(struct pt_regs *regs)
  1167. {
  1168. enum ctx_state prev_state = exception_enter();
  1169. unsigned int reason = get_reason(regs);
  1170. /* We can now get here via a FP Unavailable exception if the core
  1171. * has no FPU, in that case the reason flags will be 0 */
  1172. if (reason & REASON_FP) {
  1173. /* IEEE FP exception */
  1174. parse_fpe(regs);
  1175. goto bail;
  1176. }
  1177. if (reason & REASON_TRAP) {
  1178. unsigned long bugaddr;
  1179. /* Debugger is first in line to stop recursive faults in
  1180. * rcu_lock, notify_die, or atomic_notifier_call_chain */
  1181. if (debugger_bpt(regs))
  1182. goto bail;
  1183. if (kprobe_handler(regs))
  1184. goto bail;
  1185. /* trap exception */
  1186. if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
  1187. == NOTIFY_STOP)
  1188. goto bail;
  1189. bugaddr = regs->nip;
  1190. /*
  1191. * Fixup bugaddr for BUG_ON() in real mode
  1192. */
  1193. if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR))
  1194. bugaddr += PAGE_OFFSET;
  1195. if (!(regs->msr & MSR_PR) && /* not user-mode */
  1196. report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
  1197. regs->nip += 4;
  1198. goto bail;
  1199. }
  1200. _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
  1201. goto bail;
  1202. }
  1203. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  1204. if (reason & REASON_TM) {
  1205. /* This is a TM "Bad Thing Exception" program check.
  1206. * This occurs when:
  1207. * - An rfid/hrfid/mtmsrd attempts to cause an illegal
  1208. * transition in TM states.
  1209. * - A trechkpt is attempted when transactional.
  1210. * - A treclaim is attempted when non transactional.
  1211. * - A tend is illegally attempted.
  1212. * - writing a TM SPR when transactional.
  1213. *
  1214. * If usermode caused this, it's done something illegal and
  1215. * gets a SIGILL slap on the wrist. We call it an illegal
  1216. * operand to distinguish from the instruction just being bad
  1217. * (e.g. executing a 'tend' on a CPU without TM!); it's an
  1218. * illegal /placement/ of a valid instruction.
  1219. */
  1220. if (user_mode(regs)) {
  1221. _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
  1222. goto bail;
  1223. } else {
  1224. printk(KERN_EMERG "Unexpected TM Bad Thing exception "
  1225. "at %lx (msr 0x%lx)\n", regs->nip, regs->msr);
  1226. die("Unrecoverable exception", regs, SIGABRT);
  1227. }
  1228. }
  1229. #endif
  1230. /*
  1231. * If we took the program check in the kernel skip down to sending a
  1232. * SIGILL. The subsequent cases all relate to emulating instructions
  1233. * which we should only do for userspace. We also do not want to enable
  1234. * interrupts for kernel faults because that might lead to further
  1235. * faults, and loose the context of the original exception.
  1236. */
  1237. if (!user_mode(regs))
  1238. goto sigill;
  1239. /* We restore the interrupt state now */
  1240. if (!arch_irq_disabled_regs(regs))
  1241. local_irq_enable();
  1242. /* (reason & REASON_ILLEGAL) would be the obvious thing here,
  1243. * but there seems to be a hardware bug on the 405GP (RevD)
  1244. * that means ESR is sometimes set incorrectly - either to
  1245. * ESR_DST (!?) or 0. In the process of chasing this with the
  1246. * hardware people - not sure if it can happen on any illegal
  1247. * instruction or only on FP instructions, whether there is a
  1248. * pattern to occurrences etc. -dgibson 31/Mar/2003
  1249. */
  1250. if (!emulate_math(regs))
  1251. goto bail;
  1252. /* Try to emulate it if we should. */
  1253. if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
  1254. switch (emulate_instruction(regs)) {
  1255. case 0:
  1256. regs->nip += 4;
  1257. emulate_single_step(regs);
  1258. goto bail;
  1259. case -EFAULT:
  1260. _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
  1261. goto bail;
  1262. }
  1263. }
  1264. sigill:
  1265. if (reason & REASON_PRIVILEGED)
  1266. _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
  1267. else
  1268. _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
  1269. bail:
  1270. exception_exit(prev_state);
  1271. }
  1272. NOKPROBE_SYMBOL(program_check_exception);
  1273. /*
  1274. * This occurs when running in hypervisor mode on POWER6 or later
  1275. * and an illegal instruction is encountered.
  1276. */
  1277. void emulation_assist_interrupt(struct pt_regs *regs)
  1278. {
  1279. regs->msr |= REASON_ILLEGAL;
  1280. program_check_exception(regs);
  1281. }
  1282. NOKPROBE_SYMBOL(emulation_assist_interrupt);
  1283. void alignment_exception(struct pt_regs *regs)
  1284. {
  1285. enum ctx_state prev_state = exception_enter();
  1286. int sig, code, fixed = 0;
  1287. /* We restore the interrupt state now */
  1288. if (!arch_irq_disabled_regs(regs))
  1289. local_irq_enable();
  1290. if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
  1291. goto bail;
  1292. /* we don't implement logging of alignment exceptions */
  1293. if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
  1294. fixed = fix_alignment(regs);
  1295. if (fixed == 1) {
  1296. regs->nip += 4; /* skip over emulated instruction */
  1297. emulate_single_step(regs);
  1298. goto bail;
  1299. }
  1300. /* Operand address was bad */
  1301. if (fixed == -EFAULT) {
  1302. sig = SIGSEGV;
  1303. code = SEGV_ACCERR;
  1304. } else {
  1305. sig = SIGBUS;
  1306. code = BUS_ADRALN;
  1307. }
  1308. if (user_mode(regs))
  1309. _exception(sig, regs, code, regs->dar);
  1310. else
  1311. bad_page_fault(regs, regs->dar, sig);
  1312. bail:
  1313. exception_exit(prev_state);
  1314. }
  1315. void StackOverflow(struct pt_regs *regs)
  1316. {
  1317. printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
  1318. current, regs->gpr[1]);
  1319. debugger(regs);
  1320. show_regs(regs);
  1321. panic("kernel stack overflow");
  1322. }
  1323. void kernel_fp_unavailable_exception(struct pt_regs *regs)
  1324. {
  1325. enum ctx_state prev_state = exception_enter();
  1326. printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
  1327. "%lx at %lx\n", regs->trap, regs->nip);
  1328. die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
  1329. exception_exit(prev_state);
  1330. }
  1331. void altivec_unavailable_exception(struct pt_regs *regs)
  1332. {
  1333. enum ctx_state prev_state = exception_enter();
  1334. if (user_mode(regs)) {
  1335. /* A user program has executed an altivec instruction,
  1336. but this kernel doesn't support altivec. */
  1337. _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
  1338. goto bail;
  1339. }
  1340. printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
  1341. "%lx at %lx\n", regs->trap, regs->nip);
  1342. die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
  1343. bail:
  1344. exception_exit(prev_state);
  1345. }
  1346. void vsx_unavailable_exception(struct pt_regs *regs)
  1347. {
  1348. if (user_mode(regs)) {
  1349. /* A user program has executed an vsx instruction,
  1350. but this kernel doesn't support vsx. */
  1351. _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
  1352. return;
  1353. }
  1354. printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
  1355. "%lx at %lx\n", regs->trap, regs->nip);
  1356. die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
  1357. }
  1358. #ifdef CONFIG_PPC64
  1359. static void tm_unavailable(struct pt_regs *regs)
  1360. {
  1361. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  1362. if (user_mode(regs)) {
  1363. current->thread.load_tm++;
  1364. regs->msr |= MSR_TM;
  1365. tm_enable();
  1366. tm_restore_sprs(&current->thread);
  1367. return;
  1368. }
  1369. #endif
  1370. pr_emerg("Unrecoverable TM Unavailable Exception "
  1371. "%lx at %lx\n", regs->trap, regs->nip);
  1372. die("Unrecoverable TM Unavailable Exception", regs, SIGABRT);
  1373. }
  1374. void facility_unavailable_exception(struct pt_regs *regs)
  1375. {
  1376. static char *facility_strings[] = {
  1377. [FSCR_FP_LG] = "FPU",
  1378. [FSCR_VECVSX_LG] = "VMX/VSX",
  1379. [FSCR_DSCR_LG] = "DSCR",
  1380. [FSCR_PM_LG] = "PMU SPRs",
  1381. [FSCR_BHRB_LG] = "BHRB",
  1382. [FSCR_TM_LG] = "TM",
  1383. [FSCR_EBB_LG] = "EBB",
  1384. [FSCR_TAR_LG] = "TAR",
  1385. [FSCR_MSGP_LG] = "MSGP",
  1386. [FSCR_SCV_LG] = "SCV",
  1387. };
  1388. char *facility = "unknown";
  1389. u64 value;
  1390. u32 instword, rd;
  1391. u8 status;
  1392. bool hv;
  1393. hv = (TRAP(regs) == 0xf80);
  1394. if (hv)
  1395. value = mfspr(SPRN_HFSCR);
  1396. else
  1397. value = mfspr(SPRN_FSCR);
  1398. status = value >> 56;
  1399. if ((hv || status >= 2) &&
  1400. (status < ARRAY_SIZE(facility_strings)) &&
  1401. facility_strings[status])
  1402. facility = facility_strings[status];
  1403. /* We should not have taken this interrupt in kernel */
  1404. if (!user_mode(regs)) {
  1405. pr_emerg("Facility '%s' unavailable (%d) exception in kernel mode at %lx\n",
  1406. facility, status, regs->nip);
  1407. die("Unexpected facility unavailable exception", regs, SIGABRT);
  1408. }
  1409. /* We restore the interrupt state now */
  1410. if (!arch_irq_disabled_regs(regs))
  1411. local_irq_enable();
  1412. if (status == FSCR_DSCR_LG) {
  1413. /*
  1414. * User is accessing the DSCR register using the problem
  1415. * state only SPR number (0x03) either through a mfspr or
  1416. * a mtspr instruction. If it is a write attempt through
  1417. * a mtspr, then we set the inherit bit. This also allows
  1418. * the user to write or read the register directly in the
  1419. * future by setting via the FSCR DSCR bit. But in case it
  1420. * is a read DSCR attempt through a mfspr instruction, we
  1421. * just emulate the instruction instead. This code path will
  1422. * always emulate all the mfspr instructions till the user
  1423. * has attempted at least one mtspr instruction. This way it
  1424. * preserves the same behaviour when the user is accessing
  1425. * the DSCR through privilege level only SPR number (0x11)
  1426. * which is emulated through illegal instruction exception.
  1427. * We always leave HFSCR DSCR set.
  1428. */
  1429. if (get_user(instword, (u32 __user *)(regs->nip))) {
  1430. pr_err("Failed to fetch the user instruction\n");
  1431. return;
  1432. }
  1433. /* Write into DSCR (mtspr 0x03, RS) */
  1434. if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK)
  1435. == PPC_INST_MTSPR_DSCR_USER) {
  1436. rd = (instword >> 21) & 0x1f;
  1437. current->thread.dscr = regs->gpr[rd];
  1438. current->thread.dscr_inherit = 1;
  1439. current->thread.fscr |= FSCR_DSCR;
  1440. mtspr(SPRN_FSCR, current->thread.fscr);
  1441. }
  1442. /* Read from DSCR (mfspr RT, 0x03) */
  1443. if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK)
  1444. == PPC_INST_MFSPR_DSCR_USER) {
  1445. if (emulate_instruction(regs)) {
  1446. pr_err("DSCR based mfspr emulation failed\n");
  1447. return;
  1448. }
  1449. regs->nip += 4;
  1450. emulate_single_step(regs);
  1451. }
  1452. return;
  1453. }
  1454. if (status == FSCR_TM_LG) {
  1455. /*
  1456. * If we're here then the hardware is TM aware because it
  1457. * generated an exception with FSRM_TM set.
  1458. *
  1459. * If cpu_has_feature(CPU_FTR_TM) is false, then either firmware
  1460. * told us not to do TM, or the kernel is not built with TM
  1461. * support.
  1462. *
  1463. * If both of those things are true, then userspace can spam the
  1464. * console by triggering the printk() below just by continually
  1465. * doing tbegin (or any TM instruction). So in that case just
  1466. * send the process a SIGILL immediately.
  1467. */
  1468. if (!cpu_has_feature(CPU_FTR_TM))
  1469. goto out;
  1470. tm_unavailable(regs);
  1471. return;
  1472. }
  1473. pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n",
  1474. hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr);
  1475. out:
  1476. _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
  1477. }
  1478. #endif
  1479. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  1480. void fp_unavailable_tm(struct pt_regs *regs)
  1481. {
  1482. /* Note: This does not handle any kind of FP laziness. */
  1483. TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
  1484. regs->nip, regs->msr);
  1485. /* We can only have got here if the task started using FP after
  1486. * beginning the transaction. So, the transactional regs are just a
  1487. * copy of the checkpointed ones. But, we still need to recheckpoint
  1488. * as we're enabling FP for the process; it will return, abort the
  1489. * transaction, and probably retry but now with FP enabled. So the
  1490. * checkpointed FP registers need to be loaded.
  1491. */
  1492. tm_reclaim_current(TM_CAUSE_FAC_UNAV);
  1493. /*
  1494. * Reclaim initially saved out bogus (lazy) FPRs to ckfp_state, and
  1495. * then it was overwrite by the thr->fp_state by tm_reclaim_thread().
  1496. *
  1497. * At this point, ck{fp,vr}_state contains the exact values we want to
  1498. * recheckpoint.
  1499. */
  1500. /* Enable FP for the task: */
  1501. current->thread.load_fp = 1;
  1502. /*
  1503. * Recheckpoint all the checkpointed ckpt, ck{fp, vr}_state registers.
  1504. */
  1505. tm_recheckpoint(&current->thread);
  1506. }
  1507. void altivec_unavailable_tm(struct pt_regs *regs)
  1508. {
  1509. /* See the comments in fp_unavailable_tm(). This function operates
  1510. * the same way.
  1511. */
  1512. TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
  1513. "MSR=%lx\n",
  1514. regs->nip, regs->msr);
  1515. tm_reclaim_current(TM_CAUSE_FAC_UNAV);
  1516. current->thread.load_vec = 1;
  1517. tm_recheckpoint(&current->thread);
  1518. current->thread.used_vr = 1;
  1519. }
  1520. void vsx_unavailable_tm(struct pt_regs *regs)
  1521. {
  1522. /* See the comments in fp_unavailable_tm(). This works similarly,
  1523. * though we're loading both FP and VEC registers in here.
  1524. *
  1525. * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC
  1526. * regs. Either way, set MSR_VSX.
  1527. */
  1528. TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
  1529. "MSR=%lx\n",
  1530. regs->nip, regs->msr);
  1531. current->thread.used_vsr = 1;
  1532. /* This reclaims FP and/or VR regs if they're already enabled */
  1533. tm_reclaim_current(TM_CAUSE_FAC_UNAV);
  1534. current->thread.load_vec = 1;
  1535. current->thread.load_fp = 1;
  1536. tm_recheckpoint(&current->thread);
  1537. }
  1538. #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
  1539. void performance_monitor_exception(struct pt_regs *regs)
  1540. {
  1541. __this_cpu_inc(irq_stat.pmu_irqs);
  1542. perf_irq(regs);
  1543. }
  1544. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  1545. static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
  1546. {
  1547. int changed = 0;
  1548. /*
  1549. * Determine the cause of the debug event, clear the
  1550. * event flags and send a trap to the handler. Torez
  1551. */
  1552. if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
  1553. dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
  1554. #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
  1555. current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
  1556. #endif
  1557. do_send_trap(regs, mfspr(SPRN_DAC1), debug_status,
  1558. 5);
  1559. changed |= 0x01;
  1560. } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
  1561. dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
  1562. do_send_trap(regs, mfspr(SPRN_DAC2), debug_status,
  1563. 6);
  1564. changed |= 0x01;
  1565. } else if (debug_status & DBSR_IAC1) {
  1566. current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
  1567. dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
  1568. do_send_trap(regs, mfspr(SPRN_IAC1), debug_status,
  1569. 1);
  1570. changed |= 0x01;
  1571. } else if (debug_status & DBSR_IAC2) {
  1572. current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
  1573. do_send_trap(regs, mfspr(SPRN_IAC2), debug_status,
  1574. 2);
  1575. changed |= 0x01;
  1576. } else if (debug_status & DBSR_IAC3) {
  1577. current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
  1578. dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
  1579. do_send_trap(regs, mfspr(SPRN_IAC3), debug_status,
  1580. 3);
  1581. changed |= 0x01;
  1582. } else if (debug_status & DBSR_IAC4) {
  1583. current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
  1584. do_send_trap(regs, mfspr(SPRN_IAC4), debug_status,
  1585. 4);
  1586. changed |= 0x01;
  1587. }
  1588. /*
  1589. * At the point this routine was called, the MSR(DE) was turned off.
  1590. * Check all other debug flags and see if that bit needs to be turned
  1591. * back on or not.
  1592. */
  1593. if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
  1594. current->thread.debug.dbcr1))
  1595. regs->msr |= MSR_DE;
  1596. else
  1597. /* Make sure the IDM flag is off */
  1598. current->thread.debug.dbcr0 &= ~DBCR0_IDM;
  1599. if (changed & 0x01)
  1600. mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
  1601. }
  1602. void DebugException(struct pt_regs *regs, unsigned long debug_status)
  1603. {
  1604. current->thread.debug.dbsr = debug_status;
  1605. /* Hack alert: On BookE, Branch Taken stops on the branch itself, while
  1606. * on server, it stops on the target of the branch. In order to simulate
  1607. * the server behaviour, we thus restart right away with a single step
  1608. * instead of stopping here when hitting a BT
  1609. */
  1610. if (debug_status & DBSR_BT) {
  1611. regs->msr &= ~MSR_DE;
  1612. /* Disable BT */
  1613. mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
  1614. /* Clear the BT event */
  1615. mtspr(SPRN_DBSR, DBSR_BT);
  1616. /* Do the single step trick only when coming from userspace */
  1617. if (user_mode(regs)) {
  1618. current->thread.debug.dbcr0 &= ~DBCR0_BT;
  1619. current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
  1620. regs->msr |= MSR_DE;
  1621. return;
  1622. }
  1623. if (kprobe_post_handler(regs))
  1624. return;
  1625. if (notify_die(DIE_SSTEP, "block_step", regs, 5,
  1626. 5, SIGTRAP) == NOTIFY_STOP) {
  1627. return;
  1628. }
  1629. if (debugger_sstep(regs))
  1630. return;
  1631. } else if (debug_status & DBSR_IC) { /* Instruction complete */
  1632. regs->msr &= ~MSR_DE;
  1633. /* Disable instruction completion */
  1634. mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
  1635. /* Clear the instruction completion event */
  1636. mtspr(SPRN_DBSR, DBSR_IC);
  1637. if (kprobe_post_handler(regs))
  1638. return;
  1639. if (notify_die(DIE_SSTEP, "single_step", regs, 5,
  1640. 5, SIGTRAP) == NOTIFY_STOP) {
  1641. return;
  1642. }
  1643. if (debugger_sstep(regs))
  1644. return;
  1645. if (user_mode(regs)) {
  1646. current->thread.debug.dbcr0 &= ~DBCR0_IC;
  1647. if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
  1648. current->thread.debug.dbcr1))
  1649. regs->msr |= MSR_DE;
  1650. else
  1651. /* Make sure the IDM bit is off */
  1652. current->thread.debug.dbcr0 &= ~DBCR0_IDM;
  1653. }
  1654. _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
  1655. } else
  1656. handle_debug(regs, debug_status);
  1657. }
  1658. NOKPROBE_SYMBOL(DebugException);
  1659. #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
  1660. #if !defined(CONFIG_TAU_INT)
  1661. void TAUException(struct pt_regs *regs)
  1662. {
  1663. printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
  1664. regs->nip, regs->msr, regs->trap, print_tainted());
  1665. }
  1666. #endif /* CONFIG_INT_TAU */
  1667. #ifdef CONFIG_ALTIVEC
  1668. void altivec_assist_exception(struct pt_regs *regs)
  1669. {
  1670. int err;
  1671. if (!user_mode(regs)) {
  1672. printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
  1673. " at %lx\n", regs->nip);
  1674. die("Kernel VMX/Altivec assist exception", regs, SIGILL);
  1675. }
  1676. flush_altivec_to_thread(current);
  1677. PPC_WARN_EMULATED(altivec, regs);
  1678. err = emulate_altivec(regs);
  1679. if (err == 0) {
  1680. regs->nip += 4; /* skip emulated instruction */
  1681. emulate_single_step(regs);
  1682. return;
  1683. }
  1684. if (err == -EFAULT) {
  1685. /* got an error reading the instruction */
  1686. _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
  1687. } else {
  1688. /* didn't recognize the instruction */
  1689. /* XXX quick hack for now: set the non-Java bit in the VSCR */
  1690. printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
  1691. "in %s at %lx\n", current->comm, regs->nip);
  1692. current->thread.vr_state.vscr.u[3] |= 0x10000;
  1693. }
  1694. }
  1695. #endif /* CONFIG_ALTIVEC */
  1696. #ifdef CONFIG_FSL_BOOKE
  1697. void CacheLockingException(struct pt_regs *regs, unsigned long address,
  1698. unsigned long error_code)
  1699. {
  1700. /* We treat cache locking instructions from the user
  1701. * as priv ops, in the future we could try to do
  1702. * something smarter
  1703. */
  1704. if (error_code & (ESR_DLK|ESR_ILK))
  1705. _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
  1706. return;
  1707. }
  1708. #endif /* CONFIG_FSL_BOOKE */
  1709. #ifdef CONFIG_SPE
  1710. void SPEFloatingPointException(struct pt_regs *regs)
  1711. {
  1712. extern int do_spe_mathemu(struct pt_regs *regs);
  1713. unsigned long spefscr;
  1714. int fpexc_mode;
  1715. int code = FPE_FLTUNK;
  1716. int err;
  1717. flush_spe_to_thread(current);
  1718. spefscr = current->thread.spefscr;
  1719. fpexc_mode = current->thread.fpexc_mode;
  1720. if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
  1721. code = FPE_FLTOVF;
  1722. }
  1723. else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
  1724. code = FPE_FLTUND;
  1725. }
  1726. else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
  1727. code = FPE_FLTDIV;
  1728. else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
  1729. code = FPE_FLTINV;
  1730. }
  1731. else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
  1732. code = FPE_FLTRES;
  1733. err = do_spe_mathemu(regs);
  1734. if (err == 0) {
  1735. regs->nip += 4; /* skip emulated instruction */
  1736. emulate_single_step(regs);
  1737. return;
  1738. }
  1739. if (err == -EFAULT) {
  1740. /* got an error reading the instruction */
  1741. _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
  1742. } else if (err == -EINVAL) {
  1743. /* didn't recognize the instruction */
  1744. printk(KERN_ERR "unrecognized spe instruction "
  1745. "in %s at %lx\n", current->comm, regs->nip);
  1746. } else {
  1747. _exception(SIGFPE, regs, code, regs->nip);
  1748. }
  1749. return;
  1750. }
  1751. void SPEFloatingPointRoundException(struct pt_regs *regs)
  1752. {
  1753. extern int speround_handler(struct pt_regs *regs);
  1754. int err;
  1755. preempt_disable();
  1756. if (regs->msr & MSR_SPE)
  1757. giveup_spe(current);
  1758. preempt_enable();
  1759. regs->nip -= 4;
  1760. err = speround_handler(regs);
  1761. if (err == 0) {
  1762. regs->nip += 4; /* skip emulated instruction */
  1763. emulate_single_step(regs);
  1764. return;
  1765. }
  1766. if (err == -EFAULT) {
  1767. /* got an error reading the instruction */
  1768. _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
  1769. } else if (err == -EINVAL) {
  1770. /* didn't recognize the instruction */
  1771. printk(KERN_ERR "unrecognized spe instruction "
  1772. "in %s at %lx\n", current->comm, regs->nip);
  1773. } else {
  1774. _exception(SIGFPE, regs, FPE_FLTUNK, regs->nip);
  1775. return;
  1776. }
  1777. }
  1778. #endif
  1779. /*
  1780. * We enter here if we get an unrecoverable exception, that is, one
  1781. * that happened at a point where the RI (recoverable interrupt) bit
  1782. * in the MSR is 0. This indicates that SRR0/1 are live, and that
  1783. * we therefore lost state by taking this exception.
  1784. */
  1785. void unrecoverable_exception(struct pt_regs *regs)
  1786. {
  1787. pr_emerg("Unrecoverable exception %lx at %lx (msr=%lx)\n",
  1788. regs->trap, regs->nip, regs->msr);
  1789. die("Unrecoverable exception", regs, SIGABRT);
  1790. }
  1791. NOKPROBE_SYMBOL(unrecoverable_exception);
  1792. #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
  1793. /*
  1794. * Default handler for a Watchdog exception,
  1795. * spins until a reboot occurs
  1796. */
  1797. void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
  1798. {
  1799. /* Generic WatchdogHandler, implement your own */
  1800. mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
  1801. return;
  1802. }
  1803. void WatchdogException(struct pt_regs *regs)
  1804. {
  1805. printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
  1806. WatchdogHandler(regs);
  1807. }
  1808. #endif
  1809. /*
  1810. * We enter here if we discover during exception entry that we are
  1811. * running in supervisor mode with a userspace value in the stack pointer.
  1812. */
  1813. void kernel_bad_stack(struct pt_regs *regs)
  1814. {
  1815. printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
  1816. regs->gpr[1], regs->nip);
  1817. die("Bad kernel stack pointer", regs, SIGABRT);
  1818. }
  1819. NOKPROBE_SYMBOL(kernel_bad_stack);
  1820. void __init trap_init(void)
  1821. {
  1822. }
  1823. #ifdef CONFIG_PPC_EMULATED_STATS
  1824. #define WARN_EMULATED_SETUP(type) .type = { .name = #type }
  1825. struct ppc_emulated ppc_emulated = {
  1826. #ifdef CONFIG_ALTIVEC
  1827. WARN_EMULATED_SETUP(altivec),
  1828. #endif
  1829. WARN_EMULATED_SETUP(dcba),
  1830. WARN_EMULATED_SETUP(dcbz),
  1831. WARN_EMULATED_SETUP(fp_pair),
  1832. WARN_EMULATED_SETUP(isel),
  1833. WARN_EMULATED_SETUP(mcrxr),
  1834. WARN_EMULATED_SETUP(mfpvr),
  1835. WARN_EMULATED_SETUP(multiple),
  1836. WARN_EMULATED_SETUP(popcntb),
  1837. WARN_EMULATED_SETUP(spe),
  1838. WARN_EMULATED_SETUP(string),
  1839. WARN_EMULATED_SETUP(sync),
  1840. WARN_EMULATED_SETUP(unaligned),
  1841. #ifdef CONFIG_MATH_EMULATION
  1842. WARN_EMULATED_SETUP(math),
  1843. #endif
  1844. #ifdef CONFIG_VSX
  1845. WARN_EMULATED_SETUP(vsx),
  1846. #endif
  1847. #ifdef CONFIG_PPC64
  1848. WARN_EMULATED_SETUP(mfdscr),
  1849. WARN_EMULATED_SETUP(mtdscr),
  1850. WARN_EMULATED_SETUP(lq_stq),
  1851. WARN_EMULATED_SETUP(lxvw4x),
  1852. WARN_EMULATED_SETUP(lxvh8x),
  1853. WARN_EMULATED_SETUP(lxvd2x),
  1854. WARN_EMULATED_SETUP(lxvb16x),
  1855. #endif
  1856. };
  1857. u32 ppc_warn_emulated;
  1858. void ppc_warn_emulated_print(const char *type)
  1859. {
  1860. pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
  1861. type);
  1862. }
  1863. static int __init ppc_warn_emulated_init(void)
  1864. {
  1865. struct dentry *dir, *d;
  1866. unsigned int i;
  1867. struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
  1868. if (!powerpc_debugfs_root)
  1869. return -ENODEV;
  1870. dir = debugfs_create_dir("emulated_instructions",
  1871. powerpc_debugfs_root);
  1872. if (!dir)
  1873. return -ENOMEM;
  1874. d = debugfs_create_u32("do_warn", 0644, dir,
  1875. &ppc_warn_emulated);
  1876. if (!d)
  1877. goto fail;
  1878. for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) {
  1879. d = debugfs_create_u32(entries[i].name, 0644, dir,
  1880. (u32 *)&entries[i].val.counter);
  1881. if (!d)
  1882. goto fail;
  1883. }
  1884. return 0;
  1885. fail:
  1886. debugfs_remove_recursive(dir);
  1887. return -ENOMEM;
  1888. }
  1889. device_initcall(ppc_warn_emulated_init);
  1890. #endif /* CONFIG_PPC_EMULATED_STATS */