|
|
@@ -45,6 +45,7 @@ static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT],
|
|
|
mce_ue_event_queue);
|
|
|
|
|
|
static void machine_check_process_queued_event(struct irq_work *work);
|
|
|
+static void machine_check_ue_irq_work(struct irq_work *work);
|
|
|
void machine_check_ue_event(struct machine_check_event *evt);
|
|
|
static void machine_process_ue_event(struct work_struct *work);
|
|
|
|
|
|
@@ -52,6 +53,10 @@ static struct irq_work mce_event_process_work = {
|
|
|
.func = machine_check_process_queued_event,
|
|
|
};
|
|
|
|
|
|
+static struct irq_work mce_ue_event_irq_work = {
|
|
|
+ .func = machine_check_ue_irq_work,
|
|
|
+};
|
|
|
+
|
|
|
DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
|
|
|
|
|
|
static void mce_set_error_info(struct machine_check_event *mce,
|
|
|
@@ -208,6 +213,10 @@ void release_mce_event(void)
|
|
|
get_mce_event(NULL, true);
|
|
|
}
|
|
|
|
|
|
+static void machine_check_ue_irq_work(struct irq_work *work)
|
|
|
+{
|
|
|
+ schedule_work(&mce_ue_event_work);
|
|
|
+}
|
|
|
|
|
|
/*
|
|
|
* Queue up the MCE event which then can be handled later.
|
|
|
@@ -225,7 +234,7 @@ void machine_check_ue_event(struct machine_check_event *evt)
|
|
|
memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt));
|
|
|
|
|
|
/* Queue work to process this event later. */
|
|
|
- schedule_work(&mce_ue_event_work);
|
|
|
+ irq_work_queue(&mce_ue_event_irq_work);
|
|
|
}
|
|
|
|
|
|
/*
|