|
@@ -169,19 +169,28 @@ int vnic_rq_disable(struct vnic_rq *rq)
|
|
|
{
|
|
|
unsigned int wait;
|
|
|
struct vnic_dev *vdev = rq->vdev;
|
|
|
+ int i;
|
|
|
|
|
|
- iowrite32(0, &rq->ctrl->enable);
|
|
|
+ /* Due to a race condition with clearing RQ "mini-cache" in hw, we need
|
|
|
+ * to disable the RQ twice to guarantee that stale descriptors are not
|
|
|
+ * used when this RQ is re-enabled.
|
|
|
+ */
|
|
|
+ for (i = 0; i < 2; i++) {
|
|
|
+ iowrite32(0, &rq->ctrl->enable);
|
|
|
|
|
|
- /* Wait for HW to ACK disable request */
|
|
|
- for (wait = 0; wait < 1000; wait++) {
|
|
|
- if (!(ioread32(&rq->ctrl->running)))
|
|
|
- return 0;
|
|
|
- udelay(10);
|
|
|
- }
|
|
|
+ /* Wait for HW to ACK disable request */
|
|
|
+ for (wait = 20000; wait > 0; wait--)
|
|
|
+ if (!ioread32(&rq->ctrl->running))
|
|
|
+ break;
|
|
|
+ if (!wait) {
|
|
|
+ vdev_neterr(vdev, "Failed to disable RQ[%d]\n",
|
|
|
+ rq->index);
|
|
|
|
|
|
- vdev_neterr(vdev, "Failed to disable RQ[%d]\n", rq->index);
|
|
|
+ return -ETIMEDOUT;
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
- return -ETIMEDOUT;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
void vnic_rq_clean(struct vnic_rq *rq,
|
|
@@ -212,6 +221,11 @@ void vnic_rq_clean(struct vnic_rq *rq,
|
|
|
[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
|
|
|
iowrite32(fetch_index, &rq->ctrl->posted_index);
|
|
|
|
|
|
+ /* Anytime we write fetch_index, we need to re-write 0 to rq->enable
|
|
|
+ * to re-sync internal VIC state.
|
|
|
+ */
|
|
|
+ iowrite32(0, &rq->ctrl->enable);
|
|
|
+
|
|
|
vnic_dev_clear_desc_ring(&rq->ring);
|
|
|
}
|
|
|
|