|
@@ -554,13 +554,13 @@ out:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static inline void *get_sq_wqe(struct pvrdma_qp *qp, int n)
|
|
|
+static inline void *get_sq_wqe(struct pvrdma_qp *qp, unsigned int n)
|
|
|
{
|
|
|
return pvrdma_page_dir_get_ptr(&qp->pdir,
|
|
|
qp->sq.offset + n * qp->sq.wqe_size);
|
|
|
}
|
|
|
|
|
|
-static inline void *get_rq_wqe(struct pvrdma_qp *qp, int n)
|
|
|
+static inline void *get_rq_wqe(struct pvrdma_qp *qp, unsigned int n)
|
|
|
{
|
|
|
return pvrdma_page_dir_get_ptr(&qp->pdir,
|
|
|
qp->rq.offset + n * qp->rq.wqe_size);
|
|
@@ -598,9 +598,7 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|
|
unsigned long flags;
|
|
|
struct pvrdma_sq_wqe_hdr *wqe_hdr;
|
|
|
struct pvrdma_sge *sge;
|
|
|
- int i, index;
|
|
|
- int nreq;
|
|
|
- int ret;
|
|
|
+ int i, ret;
|
|
|
|
|
|
/*
|
|
|
* In states lower than RTS, we can fail immediately. In other states,
|
|
@@ -613,9 +611,8 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|
|
|
|
|
spin_lock_irqsave(&qp->sq.lock, flags);
|
|
|
|
|
|
- index = pvrdma_idx(&qp->sq.ring->prod_tail, qp->sq.wqe_cnt);
|
|
|
- for (nreq = 0; wr; nreq++, wr = wr->next) {
|
|
|
- unsigned int tail;
|
|
|
+ while (wr) {
|
|
|
+ unsigned int tail = 0;
|
|
|
|
|
|
if (unlikely(!pvrdma_idx_ring_has_space(
|
|
|
qp->sq.ring, qp->sq.wqe_cnt, &tail))) {
|
|
@@ -680,7 +677,7 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, index);
|
|
|
+ wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, tail);
|
|
|
memset(wqe_hdr, 0, sizeof(*wqe_hdr));
|
|
|
wqe_hdr->wr_id = wr->wr_id;
|
|
|
wqe_hdr->num_sge = wr->num_sge;
|
|
@@ -771,12 +768,11 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|
|
/* Make sure wqe is written before index update */
|
|
|
smp_wmb();
|
|
|
|
|
|
- index++;
|
|
|
- if (unlikely(index >= qp->sq.wqe_cnt))
|
|
|
- index = 0;
|
|
|
/* Update shared sq ring */
|
|
|
pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail,
|
|
|
qp->sq.wqe_cnt);
|
|
|
+
|
|
|
+ wr = wr->next;
|
|
|
}
|
|
|
|
|
|
ret = 0;
|
|
@@ -806,7 +802,6 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
|
|
struct pvrdma_qp *qp = to_vqp(ibqp);
|
|
|
struct pvrdma_rq_wqe_hdr *wqe_hdr;
|
|
|
struct pvrdma_sge *sge;
|
|
|
- int index, nreq;
|
|
|
int ret = 0;
|
|
|
int i;
|
|
|
|
|
@@ -821,9 +816,8 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
|
|
|
|
|
spin_lock_irqsave(&qp->rq.lock, flags);
|
|
|
|
|
|
- index = pvrdma_idx(&qp->rq.ring->prod_tail, qp->rq.wqe_cnt);
|
|
|
- for (nreq = 0; wr; nreq++, wr = wr->next) {
|
|
|
- unsigned int tail;
|
|
|
+ while (wr) {
|
|
|
+ unsigned int tail = 0;
|
|
|
|
|
|
if (unlikely(wr->num_sge > qp->rq.max_sg ||
|
|
|
wr->num_sge < 0)) {
|
|
@@ -843,7 +837,7 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, index);
|
|
|
+ wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, tail);
|
|
|
wqe_hdr->wr_id = wr->wr_id;
|
|
|
wqe_hdr->num_sge = wr->num_sge;
|
|
|
wqe_hdr->total_len = 0;
|
|
@@ -859,12 +853,11 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
|
|
/* Make sure wqe is written before index update */
|
|
|
smp_wmb();
|
|
|
|
|
|
- index++;
|
|
|
- if (unlikely(index >= qp->rq.wqe_cnt))
|
|
|
- index = 0;
|
|
|
/* Update shared rq ring */
|
|
|
pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail,
|
|
|
qp->rq.wqe_cnt);
|
|
|
+
|
|
|
+ wr = wr->next;
|
|
|
}
|
|
|
|
|
|
spin_unlock_irqrestore(&qp->rq.lock, flags);
|