|
@@ -187,7 +187,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
|
|
|
pq->ctxt = uctxt->ctxt;
|
|
|
pq->subctxt = fd->subctxt;
|
|
|
pq->n_max_reqs = hfi1_sdma_comp_ring_size;
|
|
|
- pq->state = SDMA_PKT_Q_INACTIVE;
|
|
|
atomic_set(&pq->n_reqs, 0);
|
|
|
init_waitqueue_head(&pq->wait);
|
|
|
atomic_set(&pq->n_locked, 0);
|
|
@@ -276,7 +275,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
|
|
|
/* Wait until all requests have been freed. */
|
|
|
wait_event_interruptible(
|
|
|
pq->wait,
|
|
|
- (READ_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
|
|
|
+ !atomic_read(&pq->n_reqs));
|
|
|
kfree(pq->reqs);
|
|
|
kfree(pq->req_in_use);
|
|
|
kmem_cache_destroy(pq->txreq_cache);
|
|
@@ -312,6 +311,13 @@ static u8 dlid_to_selector(u16 dlid)
|
|
|
return mapping[hash];
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * hfi1_user_sdma_process_request() - Process and start a user sdma request
|
|
|
+ * @fd: valid file descriptor
|
|
|
+ * @iovec: array of io vectors to process
|
|
|
+ * @dim: overall iovec array size
|
|
|
+ * @count: number of io vector array entries processed
|
|
|
+ */
|
|
|
int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
|
|
|
struct iovec *iovec, unsigned long dim,
|
|
|
unsigned long *count)
|
|
@@ -560,20 +566,12 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
|
|
|
req->ahg_idx = sdma_ahg_alloc(req->sde);
|
|
|
|
|
|
set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
|
|
|
+ pq->state = SDMA_PKT_Q_ACTIVE;
|
|
|
/* Send the first N packets in the request to buy us some time */
|
|
|
ret = user_sdma_send_pkts(req, pcount);
|
|
|
if (unlikely(ret < 0 && ret != -EBUSY))
|
|
|
goto free_req;
|
|
|
|
|
|
- /*
|
|
|
- * It is possible that the SDMA engine would have processed all the
|
|
|
- * submitted packets by the time we get here. Therefore, only set
|
|
|
- * packet queue state to ACTIVE if there are still uncompleted
|
|
|
- * requests.
|
|
|
- */
|
|
|
- if (atomic_read(&pq->n_reqs))
|
|
|
- xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
|
|
|
-
|
|
|
/*
|
|
|
* This is a somewhat blocking send implementation.
|
|
|
* The driver will block the caller until all packets of the
|
|
@@ -1409,10 +1407,8 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
|
|
|
|
|
|
static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
|
|
|
{
|
|
|
- if (atomic_dec_and_test(&pq->n_reqs)) {
|
|
|
- xchg(&pq->state, SDMA_PKT_Q_INACTIVE);
|
|
|
+ if (atomic_dec_and_test(&pq->n_reqs))
|
|
|
wake_up(&pq->wait);
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
|