|
@@ -315,7 +315,7 @@ static void advance_oldest_read(struct t4_wq *wq)
|
|
|
* Deal with out-of-order and/or completions that complete
|
|
|
* prior unsignalled WRs.
|
|
|
*/
|
|
|
-void c4iw_flush_hw_cq(struct c4iw_cq *chp)
|
|
|
+void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp)
|
|
|
{
|
|
|
struct t4_cqe *hw_cqe, *swcqe, read_cqe;
|
|
|
struct c4iw_qp *qhp;
|
|
@@ -339,6 +339,13 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
|
|
|
if (qhp == NULL)
|
|
|
goto next_cqe;
|
|
|
|
|
|
+ if (flush_qhp != qhp) {
|
|
|
+ spin_lock(&qhp->lock);
|
|
|
+
|
|
|
+ if (qhp->wq.flushed == 1)
|
|
|
+ goto next_cqe;
|
|
|
+ }
|
|
|
+
|
|
|
if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
|
|
|
goto next_cqe;
|
|
|
|
|
@@ -390,6 +397,8 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
|
|
|
next_cqe:
|
|
|
t4_hwcq_consume(&chp->cq);
|
|
|
ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
|
|
|
+ if (qhp && flush_qhp != qhp)
|
|
|
+ spin_unlock(&qhp->lock);
|
|
|
}
|
|
|
}
|
|
|
|