mce-severity.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406
  1. /*
  2. * MCE grading rules.
  3. * Copyright 2008, 2009 Intel Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; version 2
  8. * of the License.
  9. *
  10. * Author: Andi Kleen
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/seq_file.h>
  14. #include <linux/init.h>
  15. #include <linux/debugfs.h>
  16. #include <asm/mce.h>
  17. #include <linux/uaccess.h>
  18. #include "mce-internal.h"
  19. /*
  20. * Grade an mce by severity. In general the most severe ones are processed
  21. * first. Since there are quite a lot of combinations test the bits in a
  22. * table-driven way. The rules are simply processed in order, first
  23. * match wins.
  24. *
  25. * Note this is only used for machine check exceptions, the corrected
  26. * errors use much simpler rules. The exceptions still check for the corrected
  27. * errors, but only to leave them alone for the CMCI handler (except for
  28. * panic situations)
  29. */
  30. enum context { IN_KERNEL = 1, IN_USER = 2, IN_KERNEL_RECOV = 3 };
  31. enum ser { SER_REQUIRED = 1, NO_SER = 2 };
  32. enum exception { EXCP_CONTEXT = 1, NO_EXCP = 2 };
  33. static struct severity {
  34. u64 mask;
  35. u64 result;
  36. unsigned char sev;
  37. unsigned char mcgmask;
  38. unsigned char mcgres;
  39. unsigned char ser;
  40. unsigned char context;
  41. unsigned char excp;
  42. unsigned char covered;
  43. char *msg;
  44. } severities[] = {
  45. #define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c }
  46. #define KERNEL .context = IN_KERNEL
  47. #define USER .context = IN_USER
  48. #define KERNEL_RECOV .context = IN_KERNEL_RECOV
  49. #define SER .ser = SER_REQUIRED
  50. #define NOSER .ser = NO_SER
  51. #define EXCP .excp = EXCP_CONTEXT
  52. #define NOEXCP .excp = NO_EXCP
  53. #define BITCLR(x) .mask = x, .result = 0
  54. #define BITSET(x) .mask = x, .result = x
  55. #define MCGMASK(x, y) .mcgmask = x, .mcgres = y
  56. #define MASK(x, y) .mask = x, .result = y
  57. #define MCI_UC_S (MCI_STATUS_UC|MCI_STATUS_S)
  58. #define MCI_UC_SAR (MCI_STATUS_UC|MCI_STATUS_S|MCI_STATUS_AR)
  59. #define MCI_ADDR (MCI_STATUS_ADDRV|MCI_STATUS_MISCV)
  60. MCESEV(
  61. NO, "Invalid",
  62. BITCLR(MCI_STATUS_VAL)
  63. ),
  64. MCESEV(
  65. NO, "Not enabled",
  66. EXCP, BITCLR(MCI_STATUS_EN)
  67. ),
  68. MCESEV(
  69. PANIC, "Processor context corrupt",
  70. BITSET(MCI_STATUS_PCC)
  71. ),
  72. /* When MCIP is not set something is very confused */
  73. MCESEV(
  74. PANIC, "MCIP not set in MCA handler",
  75. EXCP, MCGMASK(MCG_STATUS_MCIP, 0)
  76. ),
  77. /* Neither return not error IP -- no chance to recover -> PANIC */
  78. MCESEV(
  79. PANIC, "Neither restart nor error IP",
  80. EXCP, MCGMASK(MCG_STATUS_RIPV|MCG_STATUS_EIPV, 0)
  81. ),
  82. MCESEV(
  83. PANIC, "In kernel and no restart IP",
  84. EXCP, KERNEL, MCGMASK(MCG_STATUS_RIPV, 0)
  85. ),
  86. MCESEV(
  87. PANIC, "In kernel and no restart IP",
  88. EXCP, KERNEL_RECOV, MCGMASK(MCG_STATUS_RIPV, 0)
  89. ),
  90. MCESEV(
  91. DEFERRED, "Deferred error",
  92. NOSER, MASK(MCI_STATUS_UC|MCI_STATUS_DEFERRED|MCI_STATUS_POISON, MCI_STATUS_DEFERRED)
  93. ),
  94. MCESEV(
  95. KEEP, "Corrected error",
  96. NOSER, BITCLR(MCI_STATUS_UC)
  97. ),
  98. /* ignore OVER for UCNA */
  99. MCESEV(
  100. UCNA, "Uncorrected no action required",
  101. SER, MASK(MCI_UC_SAR, MCI_STATUS_UC)
  102. ),
  103. MCESEV(
  104. PANIC, "Illegal combination (UCNA with AR=1)",
  105. SER,
  106. MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_UC|MCI_STATUS_AR)
  107. ),
  108. MCESEV(
  109. KEEP, "Non signalled machine check",
  110. SER, BITCLR(MCI_STATUS_S)
  111. ),
  112. MCESEV(
  113. PANIC, "Action required with lost events",
  114. SER, BITSET(MCI_STATUS_OVER|MCI_UC_SAR)
  115. ),
  116. /* known AR MCACODs: */
  117. #ifdef CONFIG_MEMORY_FAILURE
  118. MCESEV(
  119. KEEP, "Action required but unaffected thread is continuable",
  120. SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR, MCI_UC_SAR|MCI_ADDR),
  121. MCGMASK(MCG_STATUS_RIPV|MCG_STATUS_EIPV, MCG_STATUS_RIPV)
  122. ),
  123. MCESEV(
  124. AR, "Action required: data load in error recoverable area of kernel",
  125. SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
  126. KERNEL_RECOV
  127. ),
  128. MCESEV(
  129. AR, "Action required: data load error in a user process",
  130. SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
  131. USER
  132. ),
  133. MCESEV(
  134. AR, "Action required: instruction fetch error in a user process",
  135. SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
  136. USER
  137. ),
  138. #endif
  139. MCESEV(
  140. PANIC, "Action required: unknown MCACOD",
  141. SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_SAR)
  142. ),
  143. /* known AO MCACODs: */
  144. MCESEV(
  145. AO, "Action optional: memory scrubbing error",
  146. SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCACOD_SCRUBMSK, MCI_UC_S|MCACOD_SCRUB)
  147. ),
  148. MCESEV(
  149. AO, "Action optional: last level cache writeback error",
  150. SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCACOD, MCI_UC_S|MCACOD_L3WB)
  151. ),
  152. MCESEV(
  153. SOME, "Action optional: unknown MCACOD",
  154. SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_S)
  155. ),
  156. MCESEV(
  157. SOME, "Action optional with lost events",
  158. SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_OVER|MCI_UC_S)
  159. ),
  160. MCESEV(
  161. PANIC, "Overflowed uncorrected",
  162. BITSET(MCI_STATUS_OVER|MCI_STATUS_UC)
  163. ),
  164. MCESEV(
  165. UC, "Uncorrected",
  166. BITSET(MCI_STATUS_UC)
  167. ),
  168. MCESEV(
  169. SOME, "No match",
  170. BITSET(0)
  171. ) /* always matches. keep at end */
  172. };
  173. #define mc_recoverable(mcg) (((mcg) & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) == \
  174. (MCG_STATUS_RIPV|MCG_STATUS_EIPV))
  175. /*
  176. * If mcgstatus indicated that ip/cs on the stack were
  177. * no good, then "m->cs" will be zero and we will have
  178. * to assume the worst case (IN_KERNEL) as we actually
  179. * have no idea what we were executing when the machine
  180. * check hit.
  181. * If we do have a good "m->cs" (or a faked one in the
  182. * case we were executing in VM86 mode) we can use it to
  183. * distinguish an exception taken in user from from one
  184. * taken in the kernel.
  185. */
  186. static int error_context(struct mce *m)
  187. {
  188. if ((m->cs & 3) == 3)
  189. return IN_USER;
  190. if (mc_recoverable(m->mcgstatus) && ex_has_fault_handler(m->ip))
  191. return IN_KERNEL_RECOV;
  192. return IN_KERNEL;
  193. }
  194. static int mce_severity_amd_smca(struct mce *m, int err_ctx)
  195. {
  196. u32 addr = MSR_AMD64_SMCA_MCx_CONFIG(m->bank);
  197. u32 low, high;
  198. /*
  199. * We need to look at the following bits:
  200. * - "succor" bit (data poisoning support), and
  201. * - TCC bit (Task Context Corrupt)
  202. * in MCi_STATUS to determine error severity.
  203. */
  204. if (!mce_flags.succor)
  205. return MCE_PANIC_SEVERITY;
  206. if (rdmsr_safe(addr, &low, &high))
  207. return MCE_PANIC_SEVERITY;
  208. /* TCC (Task context corrupt). If set and if IN_KERNEL, panic. */
  209. if ((low & MCI_CONFIG_MCAX) &&
  210. (m->status & MCI_STATUS_TCC) &&
  211. (err_ctx == IN_KERNEL))
  212. return MCE_PANIC_SEVERITY;
  213. /* ...otherwise invoke hwpoison handler. */
  214. return MCE_AR_SEVERITY;
  215. }
  216. /*
  217. * See AMD Error Scope Hierarchy table in a newer BKDG. For example
  218. * 49125_15h_Models_30h-3Fh_BKDG.pdf, section "RAS Features"
  219. */
  220. static int mce_severity_amd(struct mce *m, int tolerant, char **msg, bool is_excp)
  221. {
  222. enum context ctx = error_context(m);
  223. /* Processor Context Corrupt, no need to fumble too much, die! */
  224. if (m->status & MCI_STATUS_PCC)
  225. return MCE_PANIC_SEVERITY;
  226. if (m->status & MCI_STATUS_UC) {
  227. /*
  228. * On older systems where overflow_recov flag is not present, we
  229. * should simply panic if an error overflow occurs. If
  230. * overflow_recov flag is present and set, then software can try
  231. * to at least kill process to prolong system operation.
  232. */
  233. if (mce_flags.overflow_recov) {
  234. if (mce_flags.smca)
  235. return mce_severity_amd_smca(m, ctx);
  236. /* software can try to contain */
  237. if (!(m->mcgstatus & MCG_STATUS_RIPV) && (ctx == IN_KERNEL))
  238. return MCE_PANIC_SEVERITY;
  239. /* kill current process */
  240. return MCE_AR_SEVERITY;
  241. } else {
  242. /* at least one error was not logged */
  243. if (m->status & MCI_STATUS_OVER)
  244. return MCE_PANIC_SEVERITY;
  245. }
  246. /*
  247. * For any other case, return MCE_UC_SEVERITY so that we log the
  248. * error and exit #MC handler.
  249. */
  250. return MCE_UC_SEVERITY;
  251. }
  252. /*
  253. * deferred error: poll handler catches these and adds to mce_ring so
  254. * memory-failure can take recovery actions.
  255. */
  256. if (m->status & MCI_STATUS_DEFERRED)
  257. return MCE_DEFERRED_SEVERITY;
  258. /*
  259. * corrected error: poll handler catches these and passes responsibility
  260. * of decoding the error to EDAC
  261. */
  262. return MCE_KEEP_SEVERITY;
  263. }
  264. static int mce_severity_intel(struct mce *m, int tolerant, char **msg, bool is_excp)
  265. {
  266. enum exception excp = (is_excp ? EXCP_CONTEXT : NO_EXCP);
  267. enum context ctx = error_context(m);
  268. struct severity *s;
  269. for (s = severities;; s++) {
  270. if ((m->status & s->mask) != s->result)
  271. continue;
  272. if ((m->mcgstatus & s->mcgmask) != s->mcgres)
  273. continue;
  274. if (s->ser == SER_REQUIRED && !mca_cfg.ser)
  275. continue;
  276. if (s->ser == NO_SER && mca_cfg.ser)
  277. continue;
  278. if (s->context && ctx != s->context)
  279. continue;
  280. if (s->excp && excp != s->excp)
  281. continue;
  282. if (msg)
  283. *msg = s->msg;
  284. s->covered = 1;
  285. if (s->sev >= MCE_UC_SEVERITY && ctx == IN_KERNEL) {
  286. if (tolerant < 1)
  287. return MCE_PANIC_SEVERITY;
  288. }
  289. return s->sev;
  290. }
  291. }
  292. /* Default to mce_severity_intel */
  293. int (*mce_severity)(struct mce *m, int tolerant, char **msg, bool is_excp) =
  294. mce_severity_intel;
  295. void __init mcheck_vendor_init_severity(void)
  296. {
  297. if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
  298. mce_severity = mce_severity_amd;
  299. }
  300. #ifdef CONFIG_DEBUG_FS
  301. static void *s_start(struct seq_file *f, loff_t *pos)
  302. {
  303. if (*pos >= ARRAY_SIZE(severities))
  304. return NULL;
  305. return &severities[*pos];
  306. }
  307. static void *s_next(struct seq_file *f, void *data, loff_t *pos)
  308. {
  309. if (++(*pos) >= ARRAY_SIZE(severities))
  310. return NULL;
  311. return &severities[*pos];
  312. }
  313. static void s_stop(struct seq_file *f, void *data)
  314. {
  315. }
  316. static int s_show(struct seq_file *f, void *data)
  317. {
  318. struct severity *ser = data;
  319. seq_printf(f, "%d\t%s\n", ser->covered, ser->msg);
  320. return 0;
  321. }
  322. static const struct seq_operations severities_seq_ops = {
  323. .start = s_start,
  324. .next = s_next,
  325. .stop = s_stop,
  326. .show = s_show,
  327. };
  328. static int severities_coverage_open(struct inode *inode, struct file *file)
  329. {
  330. return seq_open(file, &severities_seq_ops);
  331. }
  332. static ssize_t severities_coverage_write(struct file *file,
  333. const char __user *ubuf,
  334. size_t count, loff_t *ppos)
  335. {
  336. int i;
  337. for (i = 0; i < ARRAY_SIZE(severities); i++)
  338. severities[i].covered = 0;
  339. return count;
  340. }
  341. static const struct file_operations severities_coverage_fops = {
  342. .open = severities_coverage_open,
  343. .release = seq_release,
  344. .read = seq_read,
  345. .write = severities_coverage_write,
  346. .llseek = seq_lseek,
  347. };
  348. static int __init severities_debugfs_init(void)
  349. {
  350. struct dentry *dmce, *fsev;
  351. dmce = mce_get_debugfs_dir();
  352. if (!dmce)
  353. goto err_out;
  354. fsev = debugfs_create_file("severities-coverage", 0444, dmce, NULL,
  355. &severities_coverage_fops);
  356. if (!fsev)
  357. goto err_out;
  358. return 0;
  359. err_out:
  360. return -ENOMEM;
  361. }
  362. late_initcall(severities_debugfs_init);
  363. #endif /* CONFIG_DEBUG_FS */