|
|
@@ -132,25 +132,22 @@ static int defer_packet_queue(
|
|
|
struct hfi1_user_sdma_pkt_q *pq =
|
|
|
container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
|
|
|
struct hfi1_ibdev *dev = &pq->dd->verbs_dev;
|
|
|
- struct user_sdma_txreq *tx =
|
|
|
- container_of(txreq, struct user_sdma_txreq, txreq);
|
|
|
|
|
|
- if (sdma_progress(sde, seq, txreq)) {
|
|
|
- if (tx->busycount++ < MAX_DEFER_RETRY_COUNT)
|
|
|
- goto eagain;
|
|
|
- }
|
|
|
+ write_seqlock(&dev->iowait_lock);
|
|
|
+ if (sdma_progress(sde, seq, txreq))
|
|
|
+ goto eagain;
|
|
|
/*
|
|
|
* We are assuming that if the list is enqueued somewhere, it
|
|
|
* is to the dmawait list since that is the only place where
|
|
|
* it is supposed to be enqueued.
|
|
|
*/
|
|
|
xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
|
|
|
- write_seqlock(&dev->iowait_lock);
|
|
|
if (list_empty(&pq->busy.list))
|
|
|
iowait_queue(pkts_sent, &pq->busy, &sde->dmawait);
|
|
|
write_sequnlock(&dev->iowait_lock);
|
|
|
return -EBUSY;
|
|
|
eagain:
|
|
|
+ write_sequnlock(&dev->iowait_lock);
|
|
|
return -EAGAIN;
|
|
|
}
|
|
|
|
|
|
@@ -803,7 +800,6 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
|
|
|
|
|
|
tx->flags = 0;
|
|
|
tx->req = req;
|
|
|
- tx->busycount = 0;
|
|
|
INIT_LIST_HEAD(&tx->list);
|
|
|
|
|
|
/*
|