|
@@ -2590,7 +2590,7 @@ static void nvme_async_event_work(struct work_struct *work)
|
|
container_of(work, struct nvme_ctrl, async_event_work);
|
|
container_of(work, struct nvme_ctrl, async_event_work);
|
|
|
|
|
|
spin_lock_irq(&ctrl->lock);
|
|
spin_lock_irq(&ctrl->lock);
|
|
- while (ctrl->event_limit > 0) {
|
|
|
|
|
|
+ while (ctrl->state == NVME_CTRL_LIVE && ctrl->event_limit > 0) {
|
|
int aer_idx = --ctrl->event_limit;
|
|
int aer_idx = --ctrl->event_limit;
|
|
|
|
|
|
spin_unlock_irq(&ctrl->lock);
|
|
spin_unlock_irq(&ctrl->lock);
|
|
@@ -2677,7 +2677,8 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
|
|
/*FALLTHRU*/
|
|
/*FALLTHRU*/
|
|
case NVME_SC_ABORT_REQ:
|
|
case NVME_SC_ABORT_REQ:
|
|
++ctrl->event_limit;
|
|
++ctrl->event_limit;
|
|
- queue_work(nvme_wq, &ctrl->async_event_work);
|
|
|
|
|
|
+ if (ctrl->state == NVME_CTRL_LIVE)
|
|
|
|
+ schedule_work(&ctrl->async_event_work);
|
|
break;
|
|
break;
|
|
default:
|
|
default:
|
|
break;
|
|
break;
|