|
@@ -1895,18 +1895,25 @@ static void nvme_async_event_work(struct work_struct *work)
|
|
|
spin_unlock_irq(&ctrl->lock);
|
|
|
}
|
|
|
|
|
|
-void nvme_complete_async_event(struct nvme_ctrl *ctrl,
|
|
|
- struct nvme_completion *cqe)
|
|
|
+void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
|
|
|
+ union nvme_result *res)
|
|
|
{
|
|
|
- u16 status = le16_to_cpu(cqe->status) >> 1;
|
|
|
- u32 result = le32_to_cpu(cqe->result.u32);
|
|
|
+ u32 result = le32_to_cpu(res->u32);
|
|
|
+ bool done = true;
|
|
|
|
|
|
- if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) {
|
|
|
+ switch (le16_to_cpu(status) >> 1) {
|
|
|
+ case NVME_SC_SUCCESS:
|
|
|
+ done = false;
|
|
|
+ /*FALLTHRU*/
|
|
|
+ case NVME_SC_ABORT_REQ:
|
|
|
++ctrl->event_limit;
|
|
|
schedule_work(&ctrl->async_event_work);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
}
|
|
|
|
|
|
- if (status != NVME_SC_SUCCESS)
|
|
|
+ if (done)
|
|
|
return;
|
|
|
|
|
|
switch (result & 0xff07) {
|