|
@@ -915,10 +915,10 @@ static void nvme_pci_complete_rq(struct request *req)
|
|
|
}
|
|
|
|
|
|
/* We read the CQE phase first to check if the rest of the entry is valid */
|
|
|
-static inline bool nvme_cqe_valid(struct nvme_queue *nvmeq, u16 head,
|
|
|
- u16 phase)
|
|
|
+static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq)
|
|
|
{
|
|
|
- return (le16_to_cpu(nvmeq->cqes[head].status) & 1) == phase;
|
|
|
+ return (le16_to_cpu(nvmeq->cqes[nvmeq->cq_head].status) & 1) ==
|
|
|
+ nvmeq->cq_phase;
|
|
|
}
|
|
|
|
|
|
static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
|
|
@@ -965,7 +965,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
|
|
|
static inline bool nvme_read_cqe(struct nvme_queue *nvmeq,
|
|
|
struct nvme_completion *cqe)
|
|
|
{
|
|
|
- if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) {
|
|
|
+ if (nvme_cqe_pending(nvmeq)) {
|
|
|
*cqe = nvmeq->cqes[nvmeq->cq_head];
|
|
|
|
|
|
if (++nvmeq->cq_head == nvmeq->q_depth) {
|
|
@@ -1006,7 +1006,7 @@ static irqreturn_t nvme_irq(int irq, void *data)
|
|
|
static irqreturn_t nvme_irq_check(int irq, void *data)
|
|
|
{
|
|
|
struct nvme_queue *nvmeq = data;
|
|
|
- if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase))
|
|
|
+ if (nvme_cqe_pending(nvmeq))
|
|
|
return IRQ_WAKE_THREAD;
|
|
|
return IRQ_NONE;
|
|
|
}
|
|
@@ -1016,7 +1016,7 @@ static int __nvme_poll(struct nvme_queue *nvmeq, unsigned int tag)
|
|
|
struct nvme_completion cqe;
|
|
|
int found = 0, consumed = 0;
|
|
|
|
|
|
- if (!nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase))
|
|
|
+ if (!nvme_cqe_pending(nvmeq))
|
|
|
return 0;
|
|
|
|
|
|
spin_lock_irq(&nvmeq->q_lock);
|