|
@@ -637,7 +637,7 @@ repoll:
|
|
|
}
|
|
|
|
|
|
static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
|
|
|
- struct ib_wc *wc)
|
|
|
+ struct ib_wc *wc, bool is_fatal_err)
|
|
|
{
|
|
|
struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
|
|
|
struct mlx5_ib_wc *soft_wc, *next;
|
|
@@ -650,6 +650,10 @@ static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
|
|
|
mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
|
|
|
cq->mcq.cqn);
|
|
|
|
|
|
+ if (unlikely(is_fatal_err)) {
|
|
|
+ soft_wc->wc.status = IB_WC_WR_FLUSH_ERR;
|
|
|
+ soft_wc->wc.vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
|
|
|
+ }
|
|
|
wc[npolled++] = soft_wc->wc;
|
|
|
list_del(&soft_wc->list);
|
|
|
kfree(soft_wc);
|
|
@@ -670,12 +674,17 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
|
|
|
|
|
|
spin_lock_irqsave(&cq->lock, flags);
|
|
|
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
|
|
|
- mlx5_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
|
|
|
+ /* make sure no soft wqe's are waiting */
|
|
|
+ if (unlikely(!list_empty(&cq->wc_list)))
|
|
|
+ soft_polled = poll_soft_wc(cq, num_entries, wc, true);
|
|
|
+
|
|
|
+ mlx5_ib_poll_sw_comp(cq, num_entries - soft_polled,
|
|
|
+ wc + soft_polled, &npolled);
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
if (unlikely(!list_empty(&cq->wc_list)))
|
|
|
- soft_polled = poll_soft_wc(cq, num_entries, wc);
|
|
|
+ soft_polled = poll_soft_wc(cq, num_entries, wc, false);
|
|
|
|
|
|
for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
|
|
|
if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
|