mce-severity.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413
  1. /*
  2. * MCE grading rules.
  3. * Copyright 2008, 2009 Intel Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; version 2
  8. * of the License.
  9. *
  10. * Author: Andi Kleen
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/seq_file.h>
  14. #include <linux/init.h>
  15. #include <linux/debugfs.h>
  16. #include <asm/mce.h>
  17. #include <linux/uaccess.h>
  18. #include "mce-internal.h"
  19. /*
  20. * Grade an mce by severity. In general the most severe ones are processed
  21. * first. Since there are quite a lot of combinations test the bits in a
  22. * table-driven way. The rules are simply processed in order, first
  23. * match wins.
  24. *
  25. * Note this is only used for machine check exceptions, the corrected
  26. * errors use much simpler rules. The exceptions still check for the corrected
  27. * errors, but only to leave them alone for the CMCI handler (except for
  28. * panic situations)
  29. */
  30. enum context { IN_KERNEL = 1, IN_USER = 2, IN_KERNEL_RECOV = 3 };
  31. enum ser { SER_REQUIRED = 1, NO_SER = 2 };
  32. enum exception { EXCP_CONTEXT = 1, NO_EXCP = 2 };
  33. static struct severity {
  34. u64 mask;
  35. u64 result;
  36. unsigned char sev;
  37. unsigned char mcgmask;
  38. unsigned char mcgres;
  39. unsigned char ser;
  40. unsigned char context;
  41. unsigned char excp;
  42. unsigned char covered;
  43. char *msg;
  44. } severities[] = {
  45. #define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c }
  46. #define KERNEL .context = IN_KERNEL
  47. #define USER .context = IN_USER
  48. #define KERNEL_RECOV .context = IN_KERNEL_RECOV
  49. #define SER .ser = SER_REQUIRED
  50. #define NOSER .ser = NO_SER
  51. #define EXCP .excp = EXCP_CONTEXT
  52. #define NOEXCP .excp = NO_EXCP
  53. #define BITCLR(x) .mask = x, .result = 0
  54. #define BITSET(x) .mask = x, .result = x
  55. #define MCGMASK(x, y) .mcgmask = x, .mcgres = y
  56. #define MASK(x, y) .mask = x, .result = y
  57. #define MCI_UC_S (MCI_STATUS_UC|MCI_STATUS_S)
  58. #define MCI_UC_AR (MCI_STATUS_UC|MCI_STATUS_AR)
  59. #define MCI_UC_SAR (MCI_STATUS_UC|MCI_STATUS_S|MCI_STATUS_AR)
  60. #define MCI_ADDR (MCI_STATUS_ADDRV|MCI_STATUS_MISCV)
  61. MCESEV(
  62. NO, "Invalid",
  63. BITCLR(MCI_STATUS_VAL)
  64. ),
  65. MCESEV(
  66. NO, "Not enabled",
  67. EXCP, BITCLR(MCI_STATUS_EN)
  68. ),
  69. MCESEV(
  70. PANIC, "Processor context corrupt",
  71. BITSET(MCI_STATUS_PCC)
  72. ),
  73. /* When MCIP is not set something is very confused */
  74. MCESEV(
  75. PANIC, "MCIP not set in MCA handler",
  76. EXCP, MCGMASK(MCG_STATUS_MCIP, 0)
  77. ),
  78. /* Neither return not error IP -- no chance to recover -> PANIC */
  79. MCESEV(
  80. PANIC, "Neither restart nor error IP",
  81. EXCP, MCGMASK(MCG_STATUS_RIPV|MCG_STATUS_EIPV, 0)
  82. ),
  83. MCESEV(
  84. PANIC, "In kernel and no restart IP",
  85. EXCP, KERNEL, MCGMASK(MCG_STATUS_RIPV, 0)
  86. ),
  87. MCESEV(
  88. PANIC, "In kernel and no restart IP",
  89. EXCP, KERNEL_RECOV, MCGMASK(MCG_STATUS_RIPV, 0)
  90. ),
  91. MCESEV(
  92. DEFERRED, "Deferred error",
  93. NOSER, MASK(MCI_STATUS_UC|MCI_STATUS_DEFERRED|MCI_STATUS_POISON, MCI_STATUS_DEFERRED)
  94. ),
  95. MCESEV(
  96. KEEP, "Corrected error",
  97. NOSER, BITCLR(MCI_STATUS_UC)
  98. ),
  99. /*
  100. * known AO MCACODs reported via MCE or CMC:
  101. *
  102. * SRAO could be signaled either via a machine check exception or
  103. * CMCI with the corresponding bit S 1 or 0. So we don't need to
  104. * check bit S for SRAO.
  105. */
  106. MCESEV(
  107. AO, "Action optional: memory scrubbing error",
  108. SER, MASK(MCI_STATUS_OVER|MCI_UC_AR|MCACOD_SCRUBMSK, MCI_STATUS_UC|MCACOD_SCRUB)
  109. ),
  110. MCESEV(
  111. AO, "Action optional: last level cache writeback error",
  112. SER, MASK(MCI_STATUS_OVER|MCI_UC_AR|MCACOD, MCI_STATUS_UC|MCACOD_L3WB)
  113. ),
  114. /* ignore OVER for UCNA */
  115. MCESEV(
  116. UCNA, "Uncorrected no action required",
  117. SER, MASK(MCI_UC_SAR, MCI_STATUS_UC)
  118. ),
  119. MCESEV(
  120. PANIC, "Illegal combination (UCNA with AR=1)",
  121. SER,
  122. MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_UC|MCI_STATUS_AR)
  123. ),
  124. MCESEV(
  125. KEEP, "Non signalled machine check",
  126. SER, BITCLR(MCI_STATUS_S)
  127. ),
  128. MCESEV(
  129. PANIC, "Action required with lost events",
  130. SER, BITSET(MCI_STATUS_OVER|MCI_UC_SAR)
  131. ),
  132. /* known AR MCACODs: */
  133. #ifdef CONFIG_MEMORY_FAILURE
  134. MCESEV(
  135. KEEP, "Action required but unaffected thread is continuable",
  136. SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR, MCI_UC_SAR|MCI_ADDR),
  137. MCGMASK(MCG_STATUS_RIPV|MCG_STATUS_EIPV, MCG_STATUS_RIPV)
  138. ),
  139. MCESEV(
  140. AR, "Action required: data load in error recoverable area of kernel",
  141. SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
  142. KERNEL_RECOV
  143. ),
  144. MCESEV(
  145. AR, "Action required: data load error in a user process",
  146. SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
  147. USER
  148. ),
  149. MCESEV(
  150. AR, "Action required: instruction fetch error in a user process",
  151. SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
  152. USER
  153. ),
  154. #endif
  155. MCESEV(
  156. PANIC, "Action required: unknown MCACOD",
  157. SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_SAR)
  158. ),
  159. MCESEV(
  160. SOME, "Action optional: unknown MCACOD",
  161. SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_S)
  162. ),
  163. MCESEV(
  164. SOME, "Action optional with lost events",
  165. SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_OVER|MCI_UC_S)
  166. ),
  167. MCESEV(
  168. PANIC, "Overflowed uncorrected",
  169. BITSET(MCI_STATUS_OVER|MCI_STATUS_UC)
  170. ),
  171. MCESEV(
  172. UC, "Uncorrected",
  173. BITSET(MCI_STATUS_UC)
  174. ),
  175. MCESEV(
  176. SOME, "No match",
  177. BITSET(0)
  178. ) /* always matches. keep at end */
  179. };
  180. #define mc_recoverable(mcg) (((mcg) & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) == \
  181. (MCG_STATUS_RIPV|MCG_STATUS_EIPV))
  182. /*
  183. * If mcgstatus indicated that ip/cs on the stack were
  184. * no good, then "m->cs" will be zero and we will have
  185. * to assume the worst case (IN_KERNEL) as we actually
  186. * have no idea what we were executing when the machine
  187. * check hit.
  188. * If we do have a good "m->cs" (or a faked one in the
  189. * case we were executing in VM86 mode) we can use it to
  190. * distinguish an exception taken in user from from one
  191. * taken in the kernel.
  192. */
  193. static int error_context(struct mce *m)
  194. {
  195. if ((m->cs & 3) == 3)
  196. return IN_USER;
  197. if (mc_recoverable(m->mcgstatus) && ex_has_fault_handler(m->ip))
  198. return IN_KERNEL_RECOV;
  199. return IN_KERNEL;
  200. }
  201. static int mce_severity_amd_smca(struct mce *m, enum context err_ctx)
  202. {
  203. u32 addr = MSR_AMD64_SMCA_MCx_CONFIG(m->bank);
  204. u32 low, high;
  205. /*
  206. * We need to look at the following bits:
  207. * - "succor" bit (data poisoning support), and
  208. * - TCC bit (Task Context Corrupt)
  209. * in MCi_STATUS to determine error severity.
  210. */
  211. if (!mce_flags.succor)
  212. return MCE_PANIC_SEVERITY;
  213. if (rdmsr_safe(addr, &low, &high))
  214. return MCE_PANIC_SEVERITY;
  215. /* TCC (Task context corrupt). If set and if IN_KERNEL, panic. */
  216. if ((low & MCI_CONFIG_MCAX) &&
  217. (m->status & MCI_STATUS_TCC) &&
  218. (err_ctx == IN_KERNEL))
  219. return MCE_PANIC_SEVERITY;
  220. /* ...otherwise invoke hwpoison handler. */
  221. return MCE_AR_SEVERITY;
  222. }
  223. /*
  224. * See AMD Error Scope Hierarchy table in a newer BKDG. For example
  225. * 49125_15h_Models_30h-3Fh_BKDG.pdf, section "RAS Features"
  226. */
  227. static int mce_severity_amd(struct mce *m, int tolerant, char **msg, bool is_excp)
  228. {
  229. enum context ctx = error_context(m);
  230. /* Processor Context Corrupt, no need to fumble too much, die! */
  231. if (m->status & MCI_STATUS_PCC)
  232. return MCE_PANIC_SEVERITY;
  233. if (m->status & MCI_STATUS_UC) {
  234. if (ctx == IN_KERNEL)
  235. return MCE_PANIC_SEVERITY;
  236. /*
  237. * On older systems where overflow_recov flag is not present, we
  238. * should simply panic if an error overflow occurs. If
  239. * overflow_recov flag is present and set, then software can try
  240. * to at least kill process to prolong system operation.
  241. */
  242. if (mce_flags.overflow_recov) {
  243. if (mce_flags.smca)
  244. return mce_severity_amd_smca(m, ctx);
  245. /* kill current process */
  246. return MCE_AR_SEVERITY;
  247. } else {
  248. /* at least one error was not logged */
  249. if (m->status & MCI_STATUS_OVER)
  250. return MCE_PANIC_SEVERITY;
  251. }
  252. /*
  253. * For any other case, return MCE_UC_SEVERITY so that we log the
  254. * error and exit #MC handler.
  255. */
  256. return MCE_UC_SEVERITY;
  257. }
  258. /*
  259. * deferred error: poll handler catches these and adds to mce_ring so
  260. * memory-failure can take recovery actions.
  261. */
  262. if (m->status & MCI_STATUS_DEFERRED)
  263. return MCE_DEFERRED_SEVERITY;
  264. /*
  265. * corrected error: poll handler catches these and passes responsibility
  266. * of decoding the error to EDAC
  267. */
  268. return MCE_KEEP_SEVERITY;
  269. }
  270. static int mce_severity_intel(struct mce *m, int tolerant, char **msg, bool is_excp)
  271. {
  272. enum exception excp = (is_excp ? EXCP_CONTEXT : NO_EXCP);
  273. enum context ctx = error_context(m);
  274. struct severity *s;
  275. for (s = severities;; s++) {
  276. if ((m->status & s->mask) != s->result)
  277. continue;
  278. if ((m->mcgstatus & s->mcgmask) != s->mcgres)
  279. continue;
  280. if (s->ser == SER_REQUIRED && !mca_cfg.ser)
  281. continue;
  282. if (s->ser == NO_SER && mca_cfg.ser)
  283. continue;
  284. if (s->context && ctx != s->context)
  285. continue;
  286. if (s->excp && excp != s->excp)
  287. continue;
  288. if (msg)
  289. *msg = s->msg;
  290. s->covered = 1;
  291. if (s->sev >= MCE_UC_SEVERITY && ctx == IN_KERNEL) {
  292. if (tolerant < 1)
  293. return MCE_PANIC_SEVERITY;
  294. }
  295. return s->sev;
  296. }
  297. }
  298. /* Default to mce_severity_intel */
  299. int (*mce_severity)(struct mce *m, int tolerant, char **msg, bool is_excp) =
  300. mce_severity_intel;
  301. void __init mcheck_vendor_init_severity(void)
  302. {
  303. if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
  304. mce_severity = mce_severity_amd;
  305. }
  306. #ifdef CONFIG_DEBUG_FS
  307. static void *s_start(struct seq_file *f, loff_t *pos)
  308. {
  309. if (*pos >= ARRAY_SIZE(severities))
  310. return NULL;
  311. return &severities[*pos];
  312. }
  313. static void *s_next(struct seq_file *f, void *data, loff_t *pos)
  314. {
  315. if (++(*pos) >= ARRAY_SIZE(severities))
  316. return NULL;
  317. return &severities[*pos];
  318. }
  319. static void s_stop(struct seq_file *f, void *data)
  320. {
  321. }
  322. static int s_show(struct seq_file *f, void *data)
  323. {
  324. struct severity *ser = data;
  325. seq_printf(f, "%d\t%s\n", ser->covered, ser->msg);
  326. return 0;
  327. }
  328. static const struct seq_operations severities_seq_ops = {
  329. .start = s_start,
  330. .next = s_next,
  331. .stop = s_stop,
  332. .show = s_show,
  333. };
  334. static int severities_coverage_open(struct inode *inode, struct file *file)
  335. {
  336. return seq_open(file, &severities_seq_ops);
  337. }
  338. static ssize_t severities_coverage_write(struct file *file,
  339. const char __user *ubuf,
  340. size_t count, loff_t *ppos)
  341. {
  342. int i;
  343. for (i = 0; i < ARRAY_SIZE(severities); i++)
  344. severities[i].covered = 0;
  345. return count;
  346. }
  347. static const struct file_operations severities_coverage_fops = {
  348. .open = severities_coverage_open,
  349. .release = seq_release,
  350. .read = seq_read,
  351. .write = severities_coverage_write,
  352. .llseek = seq_lseek,
  353. };
  354. static int __init severities_debugfs_init(void)
  355. {
  356. struct dentry *dmce, *fsev;
  357. dmce = mce_get_debugfs_dir();
  358. if (!dmce)
  359. goto err_out;
  360. fsev = debugfs_create_file("severities-coverage", 0444, dmce, NULL,
  361. &severities_coverage_fops);
  362. if (!fsev)
  363. goto err_out;
  364. return 0;
  365. err_out:
  366. return -ENOMEM;
  367. }
  368. late_initcall(severities_debugfs_init);
  369. #endif /* CONFIG_DEBUG_FS */