mce.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. /*
  2. * Machine check exception handling.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright 2013 IBM Corporation
  19. * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
  20. */
  21. #undef DEBUG
  22. #define pr_fmt(fmt) "mce: " fmt
  23. #include <linux/types.h>
  24. #include <linux/ptrace.h>
  25. #include <linux/percpu.h>
  26. #include <linux/export.h>
  27. #include <asm/mce.h>
  28. static DEFINE_PER_CPU(int, mce_nest_count);
  29. static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event);
  30. /* Queue for delayed MCE events. */
  31. static DEFINE_PER_CPU(int, mce_queue_count);
  32. static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event_queue);
  33. static void mce_set_error_info(struct machine_check_event *mce,
  34. struct mce_error_info *mce_err)
  35. {
  36. mce->error_type = mce_err->error_type;
  37. switch (mce_err->error_type) {
  38. case MCE_ERROR_TYPE_UE:
  39. mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type;
  40. break;
  41. case MCE_ERROR_TYPE_SLB:
  42. mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type;
  43. break;
  44. case MCE_ERROR_TYPE_ERAT:
  45. mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type;
  46. break;
  47. case MCE_ERROR_TYPE_TLB:
  48. mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
  49. break;
  50. case MCE_ERROR_TYPE_UNKNOWN:
  51. default:
  52. break;
  53. }
  54. }
  55. /*
  56. * Decode and save high level MCE information into per cpu buffer which
  57. * is an array of machine_check_event structure.
  58. */
  59. void save_mce_event(struct pt_regs *regs, long handled,
  60. struct mce_error_info *mce_err,
  61. uint64_t addr)
  62. {
  63. uint64_t srr1;
  64. int index = __get_cpu_var(mce_nest_count)++;
  65. struct machine_check_event *mce = &__get_cpu_var(mce_event[index]);
  66. /*
  67. * Return if we don't have enough space to log mce event.
  68. * mce_nest_count may go beyond MAX_MC_EVT but that's ok,
  69. * the check below will stop buffer overrun.
  70. */
  71. if (index >= MAX_MC_EVT)
  72. return;
  73. /* Populate generic machine check info */
  74. mce->version = MCE_V1;
  75. mce->srr0 = regs->nip;
  76. mce->srr1 = regs->msr;
  77. mce->gpr3 = regs->gpr[3];
  78. mce->in_use = 1;
  79. mce->initiator = MCE_INITIATOR_CPU;
  80. if (handled)
  81. mce->disposition = MCE_DISPOSITION_RECOVERED;
  82. else
  83. mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
  84. mce->severity = MCE_SEV_ERROR_SYNC;
  85. srr1 = regs->msr;
  86. /*
  87. * Populate the mce error_type and type-specific error_type.
  88. */
  89. mce_set_error_info(mce, mce_err);
  90. if (!addr)
  91. return;
  92. if (mce->error_type == MCE_ERROR_TYPE_TLB) {
  93. mce->u.tlb_error.effective_address_provided = true;
  94. mce->u.tlb_error.effective_address = addr;
  95. } else if (mce->error_type == MCE_ERROR_TYPE_SLB) {
  96. mce->u.slb_error.effective_address_provided = true;
  97. mce->u.slb_error.effective_address = addr;
  98. } else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
  99. mce->u.erat_error.effective_address_provided = true;
  100. mce->u.erat_error.effective_address = addr;
  101. } else if (mce->error_type == MCE_ERROR_TYPE_UE) {
  102. mce->u.ue_error.effective_address_provided = true;
  103. mce->u.ue_error.effective_address = addr;
  104. }
  105. return;
  106. }
  107. /*
  108. * get_mce_event:
  109. * mce Pointer to machine_check_event structure to be filled.
  110. * release Flag to indicate whether to free the event slot or not.
  111. * 0 <= do not release the mce event. Caller will invoke
  112. * release_mce_event() once event has been consumed.
  113. * 1 <= release the slot.
  114. *
  115. * return 1 = success
  116. * 0 = failure
  117. *
  118. * get_mce_event() will be called by platform specific machine check
  119. * handle routine and in KVM.
  120. * When we call get_mce_event(), we are still in interrupt context and
  121. * preemption will not be scheduled until ret_from_expect() routine
  122. * is called.
  123. */
  124. int get_mce_event(struct machine_check_event *mce, bool release)
  125. {
  126. int index = __get_cpu_var(mce_nest_count) - 1;
  127. struct machine_check_event *mc_evt;
  128. int ret = 0;
  129. /* Sanity check */
  130. if (index < 0)
  131. return ret;
  132. /* Check if we have MCE info to process. */
  133. if (index < MAX_MC_EVT) {
  134. mc_evt = &__get_cpu_var(mce_event[index]);
  135. /* Copy the event structure and release the original */
  136. if (mce)
  137. *mce = *mc_evt;
  138. if (release)
  139. mc_evt->in_use = 0;
  140. ret = 1;
  141. }
  142. /* Decrement the count to free the slot. */
  143. if (release)
  144. __get_cpu_var(mce_nest_count)--;
  145. return ret;
  146. }
  147. void release_mce_event(void)
  148. {
  149. get_mce_event(NULL, true);
  150. }
  151. /*
  152. * Queue up the MCE event which then can be handled later.
  153. */
  154. void machine_check_queue_event(void)
  155. {
  156. int index;
  157. struct machine_check_event evt;
  158. if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
  159. return;
  160. index = __get_cpu_var(mce_queue_count)++;
  161. /* If queue is full, just return for now. */
  162. if (index >= MAX_MC_EVT) {
  163. __get_cpu_var(mce_queue_count)--;
  164. return;
  165. }
  166. __get_cpu_var(mce_event_queue[index]) = evt;
  167. }
  168. /*
  169. * process pending MCE event from the mce event queue. This function will be
  170. * called during syscall exit.
  171. */
  172. void machine_check_process_queued_event(void)
  173. {
  174. int index;
  175. preempt_disable();
  176. /*
  177. * For now just print it to console.
  178. * TODO: log this error event to FSP or nvram.
  179. */
  180. while (__get_cpu_var(mce_queue_count) > 0) {
  181. index = __get_cpu_var(mce_queue_count) - 1;
  182. machine_check_print_event_info(
  183. &__get_cpu_var(mce_event_queue[index]));
  184. __get_cpu_var(mce_queue_count)--;
  185. }
  186. preempt_enable();
  187. }
  188. void machine_check_print_event_info(struct machine_check_event *evt)
  189. {
  190. const char *level, *sevstr, *subtype;
  191. static const char *mc_ue_types[] = {
  192. "Indeterminate",
  193. "Instruction fetch",
  194. "Page table walk ifetch",
  195. "Load/Store",
  196. "Page table walk Load/Store",
  197. };
  198. static const char *mc_slb_types[] = {
  199. "Indeterminate",
  200. "Parity",
  201. "Multihit",
  202. };
  203. static const char *mc_erat_types[] = {
  204. "Indeterminate",
  205. "Parity",
  206. "Multihit",
  207. };
  208. static const char *mc_tlb_types[] = {
  209. "Indeterminate",
  210. "Parity",
  211. "Multihit",
  212. };
  213. /* Print things out */
  214. if (evt->version != MCE_V1) {
  215. pr_err("Machine Check Exception, Unknown event version %d !\n",
  216. evt->version);
  217. return;
  218. }
  219. switch (evt->severity) {
  220. case MCE_SEV_NO_ERROR:
  221. level = KERN_INFO;
  222. sevstr = "Harmless";
  223. break;
  224. case MCE_SEV_WARNING:
  225. level = KERN_WARNING;
  226. sevstr = "";
  227. break;
  228. case MCE_SEV_ERROR_SYNC:
  229. level = KERN_ERR;
  230. sevstr = "Severe";
  231. break;
  232. case MCE_SEV_FATAL:
  233. default:
  234. level = KERN_ERR;
  235. sevstr = "Fatal";
  236. break;
  237. }
  238. printk("%s%s Machine check interrupt [%s]\n", level, sevstr,
  239. evt->disposition == MCE_DISPOSITION_RECOVERED ?
  240. "Recovered" : "[Not recovered");
  241. printk("%s Initiator: %s\n", level,
  242. evt->initiator == MCE_INITIATOR_CPU ? "CPU" : "Unknown");
  243. switch (evt->error_type) {
  244. case MCE_ERROR_TYPE_UE:
  245. subtype = evt->u.ue_error.ue_error_type <
  246. ARRAY_SIZE(mc_ue_types) ?
  247. mc_ue_types[evt->u.ue_error.ue_error_type]
  248. : "Unknown";
  249. printk("%s Error type: UE [%s]\n", level, subtype);
  250. if (evt->u.ue_error.effective_address_provided)
  251. printk("%s Effective address: %016llx\n",
  252. level, evt->u.ue_error.effective_address);
  253. if (evt->u.ue_error.physical_address_provided)
  254. printk("%s Physial address: %016llx\n",
  255. level, evt->u.ue_error.physical_address);
  256. break;
  257. case MCE_ERROR_TYPE_SLB:
  258. subtype = evt->u.slb_error.slb_error_type <
  259. ARRAY_SIZE(mc_slb_types) ?
  260. mc_slb_types[evt->u.slb_error.slb_error_type]
  261. : "Unknown";
  262. printk("%s Error type: SLB [%s]\n", level, subtype);
  263. if (evt->u.slb_error.effective_address_provided)
  264. printk("%s Effective address: %016llx\n",
  265. level, evt->u.slb_error.effective_address);
  266. break;
  267. case MCE_ERROR_TYPE_ERAT:
  268. subtype = evt->u.erat_error.erat_error_type <
  269. ARRAY_SIZE(mc_erat_types) ?
  270. mc_erat_types[evt->u.erat_error.erat_error_type]
  271. : "Unknown";
  272. printk("%s Error type: ERAT [%s]\n", level, subtype);
  273. if (evt->u.erat_error.effective_address_provided)
  274. printk("%s Effective address: %016llx\n",
  275. level, evt->u.erat_error.effective_address);
  276. break;
  277. case MCE_ERROR_TYPE_TLB:
  278. subtype = evt->u.tlb_error.tlb_error_type <
  279. ARRAY_SIZE(mc_tlb_types) ?
  280. mc_tlb_types[evt->u.tlb_error.tlb_error_type]
  281. : "Unknown";
  282. printk("%s Error type: TLB [%s]\n", level, subtype);
  283. if (evt->u.tlb_error.effective_address_provided)
  284. printk("%s Effective address: %016llx\n",
  285. level, evt->u.tlb_error.effective_address);
  286. break;
  287. default:
  288. case MCE_ERROR_TYPE_UNKNOWN:
  289. printk("%s Error type: Unknown\n", level);
  290. break;
  291. }
  292. }