mce.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625
  1. /*
  2. * Machine check exception handling.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright 2013 IBM Corporation
  19. * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
  20. */
  21. #undef DEBUG
  22. #define pr_fmt(fmt) "mce: " fmt
  23. #include <linux/hardirq.h>
  24. #include <linux/types.h>
  25. #include <linux/ptrace.h>
  26. #include <linux/percpu.h>
  27. #include <linux/export.h>
  28. #include <linux/irq_work.h>
  29. #include <asm/machdep.h>
  30. #include <asm/mce.h>
  31. static DEFINE_PER_CPU(int, mce_nest_count);
  32. static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event);
  33. /* Queue for delayed MCE events. */
  34. static DEFINE_PER_CPU(int, mce_queue_count);
  35. static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event_queue);
  36. /* Queue for delayed MCE UE events. */
  37. static DEFINE_PER_CPU(int, mce_ue_count);
  38. static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT],
  39. mce_ue_event_queue);
  40. static void machine_check_process_queued_event(struct irq_work *work);
  41. void machine_check_ue_event(struct machine_check_event *evt);
  42. static void machine_process_ue_event(struct work_struct *work);
  43. static struct irq_work mce_event_process_work = {
  44. .func = machine_check_process_queued_event,
  45. };
  46. DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
  47. static void mce_set_error_info(struct machine_check_event *mce,
  48. struct mce_error_info *mce_err)
  49. {
  50. mce->error_type = mce_err->error_type;
  51. switch (mce_err->error_type) {
  52. case MCE_ERROR_TYPE_UE:
  53. mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type;
  54. break;
  55. case MCE_ERROR_TYPE_SLB:
  56. mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type;
  57. break;
  58. case MCE_ERROR_TYPE_ERAT:
  59. mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type;
  60. break;
  61. case MCE_ERROR_TYPE_TLB:
  62. mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
  63. break;
  64. case MCE_ERROR_TYPE_USER:
  65. mce->u.user_error.user_error_type = mce_err->u.user_error_type;
  66. break;
  67. case MCE_ERROR_TYPE_RA:
  68. mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
  69. break;
  70. case MCE_ERROR_TYPE_LINK:
  71. mce->u.link_error.link_error_type = mce_err->u.link_error_type;
  72. break;
  73. case MCE_ERROR_TYPE_UNKNOWN:
  74. default:
  75. break;
  76. }
  77. }
  78. /*
  79. * Decode and save high level MCE information into per cpu buffer which
  80. * is an array of machine_check_event structure.
  81. */
  82. void save_mce_event(struct pt_regs *regs, long handled,
  83. struct mce_error_info *mce_err,
  84. uint64_t nip, uint64_t addr, uint64_t phys_addr)
  85. {
  86. int index = __this_cpu_inc_return(mce_nest_count) - 1;
  87. struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
  88. /*
  89. * Return if we don't have enough space to log mce event.
  90. * mce_nest_count may go beyond MAX_MC_EVT but that's ok,
  91. * the check below will stop buffer overrun.
  92. */
  93. if (index >= MAX_MC_EVT)
  94. return;
  95. /* Populate generic machine check info */
  96. mce->version = MCE_V1;
  97. mce->srr0 = nip;
  98. mce->srr1 = regs->msr;
  99. mce->gpr3 = regs->gpr[3];
  100. mce->in_use = 1;
  101. /* Mark it recovered if we have handled it and MSR(RI=1). */
  102. if (handled && (regs->msr & MSR_RI))
  103. mce->disposition = MCE_DISPOSITION_RECOVERED;
  104. else
  105. mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
  106. mce->initiator = mce_err->initiator;
  107. mce->severity = mce_err->severity;
  108. /*
  109. * Populate the mce error_type and type-specific error_type.
  110. */
  111. mce_set_error_info(mce, mce_err);
  112. if (!addr)
  113. return;
  114. if (mce->error_type == MCE_ERROR_TYPE_TLB) {
  115. mce->u.tlb_error.effective_address_provided = true;
  116. mce->u.tlb_error.effective_address = addr;
  117. } else if (mce->error_type == MCE_ERROR_TYPE_SLB) {
  118. mce->u.slb_error.effective_address_provided = true;
  119. mce->u.slb_error.effective_address = addr;
  120. } else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
  121. mce->u.erat_error.effective_address_provided = true;
  122. mce->u.erat_error.effective_address = addr;
  123. } else if (mce->error_type == MCE_ERROR_TYPE_USER) {
  124. mce->u.user_error.effective_address_provided = true;
  125. mce->u.user_error.effective_address = addr;
  126. } else if (mce->error_type == MCE_ERROR_TYPE_RA) {
  127. mce->u.ra_error.effective_address_provided = true;
  128. mce->u.ra_error.effective_address = addr;
  129. } else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
  130. mce->u.link_error.effective_address_provided = true;
  131. mce->u.link_error.effective_address = addr;
  132. } else if (mce->error_type == MCE_ERROR_TYPE_UE) {
  133. mce->u.ue_error.effective_address_provided = true;
  134. mce->u.ue_error.effective_address = addr;
  135. if (phys_addr != ULONG_MAX) {
  136. mce->u.ue_error.physical_address_provided = true;
  137. mce->u.ue_error.physical_address = phys_addr;
  138. machine_check_ue_event(mce);
  139. }
  140. }
  141. return;
  142. }
  143. /*
  144. * get_mce_event:
  145. * mce Pointer to machine_check_event structure to be filled.
  146. * release Flag to indicate whether to free the event slot or not.
  147. * 0 <= do not release the mce event. Caller will invoke
  148. * release_mce_event() once event has been consumed.
  149. * 1 <= release the slot.
  150. *
  151. * return 1 = success
  152. * 0 = failure
  153. *
  154. * get_mce_event() will be called by platform specific machine check
  155. * handle routine and in KVM.
  156. * When we call get_mce_event(), we are still in interrupt context and
  157. * preemption will not be scheduled until ret_from_expect() routine
  158. * is called.
  159. */
  160. int get_mce_event(struct machine_check_event *mce, bool release)
  161. {
  162. int index = __this_cpu_read(mce_nest_count) - 1;
  163. struct machine_check_event *mc_evt;
  164. int ret = 0;
  165. /* Sanity check */
  166. if (index < 0)
  167. return ret;
  168. /* Check if we have MCE info to process. */
  169. if (index < MAX_MC_EVT) {
  170. mc_evt = this_cpu_ptr(&mce_event[index]);
  171. /* Copy the event structure and release the original */
  172. if (mce)
  173. *mce = *mc_evt;
  174. if (release)
  175. mc_evt->in_use = 0;
  176. ret = 1;
  177. }
  178. /* Decrement the count to free the slot. */
  179. if (release)
  180. __this_cpu_dec(mce_nest_count);
  181. return ret;
  182. }
  183. void release_mce_event(void)
  184. {
  185. get_mce_event(NULL, true);
  186. }
  187. /*
  188. * Queue up the MCE event which then can be handled later.
  189. */
  190. void machine_check_ue_event(struct machine_check_event *evt)
  191. {
  192. int index;
  193. index = __this_cpu_inc_return(mce_ue_count) - 1;
  194. /* If queue is full, just return for now. */
  195. if (index >= MAX_MC_EVT) {
  196. __this_cpu_dec(mce_ue_count);
  197. return;
  198. }
  199. memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt));
  200. /* Queue work to process this event later. */
  201. schedule_work(&mce_ue_event_work);
  202. }
  203. /*
  204. * Queue up the MCE event which then can be handled later.
  205. */
  206. void machine_check_queue_event(void)
  207. {
  208. int index;
  209. struct machine_check_event evt;
  210. if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
  211. return;
  212. index = __this_cpu_inc_return(mce_queue_count) - 1;
  213. /* If queue is full, just return for now. */
  214. if (index >= MAX_MC_EVT) {
  215. __this_cpu_dec(mce_queue_count);
  216. return;
  217. }
  218. memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt));
  219. /* Queue irq work to process this event later. */
  220. irq_work_queue(&mce_event_process_work);
  221. }
  222. /*
  223. * process pending MCE event from the mce event queue. This function will be
  224. * called during syscall exit.
  225. */
  226. static void machine_process_ue_event(struct work_struct *work)
  227. {
  228. int index;
  229. struct machine_check_event *evt;
  230. while (__this_cpu_read(mce_ue_count) > 0) {
  231. index = __this_cpu_read(mce_ue_count) - 1;
  232. evt = this_cpu_ptr(&mce_ue_event_queue[index]);
  233. #ifdef CONFIG_MEMORY_FAILURE
  234. /*
  235. * This should probably queued elsewhere, but
  236. * oh! well
  237. */
  238. if (evt->error_type == MCE_ERROR_TYPE_UE) {
  239. if (evt->u.ue_error.physical_address_provided) {
  240. unsigned long pfn;
  241. pfn = evt->u.ue_error.physical_address >>
  242. PAGE_SHIFT;
  243. memory_failure(pfn, 0);
  244. } else
  245. pr_warn("Failed to identify bad address from "
  246. "where the uncorrectable error (UE) "
  247. "was generated\n");
  248. }
  249. #endif
  250. __this_cpu_dec(mce_ue_count);
  251. }
  252. }
  253. /*
  254. * process pending MCE event from the mce event queue. This function will be
  255. * called during syscall exit.
  256. */
  257. static void machine_check_process_queued_event(struct irq_work *work)
  258. {
  259. int index;
  260. struct machine_check_event *evt;
  261. add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
  262. /*
  263. * For now just print it to console.
  264. * TODO: log this error event to FSP or nvram.
  265. */
  266. while (__this_cpu_read(mce_queue_count) > 0) {
  267. index = __this_cpu_read(mce_queue_count) - 1;
  268. evt = this_cpu_ptr(&mce_event_queue[index]);
  269. machine_check_print_event_info(evt, false);
  270. __this_cpu_dec(mce_queue_count);
  271. }
  272. }
  273. void machine_check_print_event_info(struct machine_check_event *evt,
  274. bool user_mode)
  275. {
  276. const char *level, *sevstr, *subtype;
  277. static const char *mc_ue_types[] = {
  278. "Indeterminate",
  279. "Instruction fetch",
  280. "Page table walk ifetch",
  281. "Load/Store",
  282. "Page table walk Load/Store",
  283. };
  284. static const char *mc_slb_types[] = {
  285. "Indeterminate",
  286. "Parity",
  287. "Multihit",
  288. };
  289. static const char *mc_erat_types[] = {
  290. "Indeterminate",
  291. "Parity",
  292. "Multihit",
  293. };
  294. static const char *mc_tlb_types[] = {
  295. "Indeterminate",
  296. "Parity",
  297. "Multihit",
  298. };
  299. static const char *mc_user_types[] = {
  300. "Indeterminate",
  301. "tlbie(l) invalid",
  302. };
  303. static const char *mc_ra_types[] = {
  304. "Indeterminate",
  305. "Instruction fetch (bad)",
  306. "Instruction fetch (foreign)",
  307. "Page table walk ifetch (bad)",
  308. "Page table walk ifetch (foreign)",
  309. "Load (bad)",
  310. "Store (bad)",
  311. "Page table walk Load/Store (bad)",
  312. "Page table walk Load/Store (foreign)",
  313. "Load/Store (foreign)",
  314. };
  315. static const char *mc_link_types[] = {
  316. "Indeterminate",
  317. "Instruction fetch (timeout)",
  318. "Page table walk ifetch (timeout)",
  319. "Load (timeout)",
  320. "Store (timeout)",
  321. "Page table walk Load/Store (timeout)",
  322. };
  323. /* Print things out */
  324. if (evt->version != MCE_V1) {
  325. pr_err("Machine Check Exception, Unknown event version %d !\n",
  326. evt->version);
  327. return;
  328. }
  329. switch (evt->severity) {
  330. case MCE_SEV_NO_ERROR:
  331. level = KERN_INFO;
  332. sevstr = "Harmless";
  333. break;
  334. case MCE_SEV_WARNING:
  335. level = KERN_WARNING;
  336. sevstr = "";
  337. break;
  338. case MCE_SEV_ERROR_SYNC:
  339. level = KERN_ERR;
  340. sevstr = "Severe";
  341. break;
  342. case MCE_SEV_FATAL:
  343. default:
  344. level = KERN_ERR;
  345. sevstr = "Fatal";
  346. break;
  347. }
  348. printk("%s%s Machine check interrupt [%s]\n", level, sevstr,
  349. evt->disposition == MCE_DISPOSITION_RECOVERED ?
  350. "Recovered" : "Not recovered");
  351. if (user_mode) {
  352. printk("%s NIP: [%016llx] PID: %d Comm: %s\n", level,
  353. evt->srr0, current->pid, current->comm);
  354. } else {
  355. printk("%s NIP [%016llx]: %pS\n", level, evt->srr0,
  356. (void *)evt->srr0);
  357. }
  358. printk("%s Initiator: %s\n", level,
  359. evt->initiator == MCE_INITIATOR_CPU ? "CPU" : "Unknown");
  360. switch (evt->error_type) {
  361. case MCE_ERROR_TYPE_UE:
  362. subtype = evt->u.ue_error.ue_error_type <
  363. ARRAY_SIZE(mc_ue_types) ?
  364. mc_ue_types[evt->u.ue_error.ue_error_type]
  365. : "Unknown";
  366. printk("%s Error type: UE [%s]\n", level, subtype);
  367. if (evt->u.ue_error.effective_address_provided)
  368. printk("%s Effective address: %016llx\n",
  369. level, evt->u.ue_error.effective_address);
  370. if (evt->u.ue_error.physical_address_provided)
  371. printk("%s Physical address: %016llx\n",
  372. level, evt->u.ue_error.physical_address);
  373. break;
  374. case MCE_ERROR_TYPE_SLB:
  375. subtype = evt->u.slb_error.slb_error_type <
  376. ARRAY_SIZE(mc_slb_types) ?
  377. mc_slb_types[evt->u.slb_error.slb_error_type]
  378. : "Unknown";
  379. printk("%s Error type: SLB [%s]\n", level, subtype);
  380. if (evt->u.slb_error.effective_address_provided)
  381. printk("%s Effective address: %016llx\n",
  382. level, evt->u.slb_error.effective_address);
  383. break;
  384. case MCE_ERROR_TYPE_ERAT:
  385. subtype = evt->u.erat_error.erat_error_type <
  386. ARRAY_SIZE(mc_erat_types) ?
  387. mc_erat_types[evt->u.erat_error.erat_error_type]
  388. : "Unknown";
  389. printk("%s Error type: ERAT [%s]\n", level, subtype);
  390. if (evt->u.erat_error.effective_address_provided)
  391. printk("%s Effective address: %016llx\n",
  392. level, evt->u.erat_error.effective_address);
  393. break;
  394. case MCE_ERROR_TYPE_TLB:
  395. subtype = evt->u.tlb_error.tlb_error_type <
  396. ARRAY_SIZE(mc_tlb_types) ?
  397. mc_tlb_types[evt->u.tlb_error.tlb_error_type]
  398. : "Unknown";
  399. printk("%s Error type: TLB [%s]\n", level, subtype);
  400. if (evt->u.tlb_error.effective_address_provided)
  401. printk("%s Effective address: %016llx\n",
  402. level, evt->u.tlb_error.effective_address);
  403. break;
  404. case MCE_ERROR_TYPE_USER:
  405. subtype = evt->u.user_error.user_error_type <
  406. ARRAY_SIZE(mc_user_types) ?
  407. mc_user_types[evt->u.user_error.user_error_type]
  408. : "Unknown";
  409. printk("%s Error type: User [%s]\n", level, subtype);
  410. if (evt->u.user_error.effective_address_provided)
  411. printk("%s Effective address: %016llx\n",
  412. level, evt->u.user_error.effective_address);
  413. break;
  414. case MCE_ERROR_TYPE_RA:
  415. subtype = evt->u.ra_error.ra_error_type <
  416. ARRAY_SIZE(mc_ra_types) ?
  417. mc_ra_types[evt->u.ra_error.ra_error_type]
  418. : "Unknown";
  419. printk("%s Error type: Real address [%s]\n", level, subtype);
  420. if (evt->u.ra_error.effective_address_provided)
  421. printk("%s Effective address: %016llx\n",
  422. level, evt->u.ra_error.effective_address);
  423. break;
  424. case MCE_ERROR_TYPE_LINK:
  425. subtype = evt->u.link_error.link_error_type <
  426. ARRAY_SIZE(mc_link_types) ?
  427. mc_link_types[evt->u.link_error.link_error_type]
  428. : "Unknown";
  429. printk("%s Error type: Link [%s]\n", level, subtype);
  430. if (evt->u.link_error.effective_address_provided)
  431. printk("%s Effective address: %016llx\n",
  432. level, evt->u.link_error.effective_address);
  433. break;
  434. default:
  435. case MCE_ERROR_TYPE_UNKNOWN:
  436. printk("%s Error type: Unknown\n", level);
  437. break;
  438. }
  439. }
  440. EXPORT_SYMBOL_GPL(machine_check_print_event_info);
  441. /*
  442. * This function is called in real mode. Strictly no printk's please.
  443. *
  444. * regs->nip and regs->msr contains srr0 and ssr1.
  445. */
  446. long machine_check_early(struct pt_regs *regs)
  447. {
  448. long handled = 0;
  449. /*
  450. * See if platform is capable of handling machine check.
  451. */
  452. if (ppc_md.machine_check_early)
  453. handled = ppc_md.machine_check_early(regs);
  454. return handled;
  455. }
  456. /* Possible meanings for HMER_DEBUG_TRIG bit being set on POWER9 */
  457. static enum {
  458. DTRIG_UNKNOWN,
  459. DTRIG_VECTOR_CI, /* need to emulate vector CI load instr */
  460. DTRIG_SUSPEND_ESCAPE, /* need to escape from TM suspend mode */
  461. } hmer_debug_trig_function;
  462. static int init_debug_trig_function(void)
  463. {
  464. int pvr;
  465. struct device_node *cpun;
  466. struct property *prop = NULL;
  467. const char *str;
  468. /* First look in the device tree */
  469. preempt_disable();
  470. cpun = of_get_cpu_node(smp_processor_id(), NULL);
  471. if (cpun) {
  472. of_property_for_each_string(cpun, "ibm,hmi-special-triggers",
  473. prop, str) {
  474. if (strcmp(str, "bit17-vector-ci-load") == 0)
  475. hmer_debug_trig_function = DTRIG_VECTOR_CI;
  476. else if (strcmp(str, "bit17-tm-suspend-escape") == 0)
  477. hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
  478. }
  479. of_node_put(cpun);
  480. }
  481. preempt_enable();
  482. /* If we found the property, don't look at PVR */
  483. if (prop)
  484. goto out;
  485. pvr = mfspr(SPRN_PVR);
  486. /* Check for POWER9 Nimbus (scale-out) */
  487. if ((PVR_VER(pvr) == PVR_POWER9) && (pvr & 0xe000) == 0) {
  488. /* DD2.2 and later */
  489. if ((pvr & 0xfff) >= 0x202)
  490. hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
  491. /* DD2.0 and DD2.1 - used for vector CI load emulation */
  492. else if ((pvr & 0xfff) >= 0x200)
  493. hmer_debug_trig_function = DTRIG_VECTOR_CI;
  494. }
  495. out:
  496. switch (hmer_debug_trig_function) {
  497. case DTRIG_VECTOR_CI:
  498. pr_debug("HMI debug trigger used for vector CI load\n");
  499. break;
  500. case DTRIG_SUSPEND_ESCAPE:
  501. pr_debug("HMI debug trigger used for TM suspend escape\n");
  502. break;
  503. default:
  504. break;
  505. }
  506. return 0;
  507. }
  508. __initcall(init_debug_trig_function);
  509. /*
  510. * Handle HMIs that occur as a result of a debug trigger.
  511. * Return values:
  512. * -1 means this is not a HMI cause that we know about
  513. * 0 means no further handling is required
  514. * 1 means further handling is required
  515. */
  516. long hmi_handle_debugtrig(struct pt_regs *regs)
  517. {
  518. unsigned long hmer = mfspr(SPRN_HMER);
  519. long ret = 0;
  520. /* HMER_DEBUG_TRIG bit is used for various workarounds on P9 */
  521. if (!((hmer & HMER_DEBUG_TRIG)
  522. && hmer_debug_trig_function != DTRIG_UNKNOWN))
  523. return -1;
  524. hmer &= ~HMER_DEBUG_TRIG;
  525. /* HMER is a write-AND register */
  526. mtspr(SPRN_HMER, ~HMER_DEBUG_TRIG);
  527. switch (hmer_debug_trig_function) {
  528. case DTRIG_VECTOR_CI:
  529. /*
  530. * Now to avoid problems with soft-disable we
  531. * only do the emulation if we are coming from
  532. * host user space
  533. */
  534. if (regs && user_mode(regs))
  535. ret = local_paca->hmi_p9_special_emu = 1;
  536. break;
  537. default:
  538. break;
  539. }
  540. /*
  541. * See if any other HMI causes remain to be handled
  542. */
  543. if (hmer & mfspr(SPRN_HMEER))
  544. return -1;
  545. return ret;
  546. }
  547. /*
  548. * Return values:
  549. */
  550. long hmi_exception_realmode(struct pt_regs *regs)
  551. {
  552. int ret;
  553. __this_cpu_inc(irq_stat.hmi_exceptions);
  554. ret = hmi_handle_debugtrig(regs);
  555. if (ret >= 0)
  556. return ret;
  557. wait_for_subcore_guest_exit();
  558. if (ppc_md.hmi_exception_early)
  559. ppc_md.hmi_exception_early(regs);
  560. wait_for_tb_resync();
  561. return 1;
  562. }