|
@@ -810,20 +810,27 @@ static void xfer_work(struct work_struct *work)
|
|
|
{
|
|
|
struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
|
|
|
struct usbhs_pipe *pipe = pkt->pipe;
|
|
|
- struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
|
|
|
+ struct usbhs_fifo *fifo;
|
|
|
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
|
|
|
struct dma_async_tx_descriptor *desc;
|
|
|
- struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
|
|
|
+ struct dma_chan *chan;
|
|
|
struct device *dev = usbhs_priv_to_dev(priv);
|
|
|
enum dma_transfer_direction dir;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
+ usbhs_lock(priv, flags);
|
|
|
+ fifo = usbhs_pipe_to_fifo(pipe);
|
|
|
+ if (!fifo)
|
|
|
+ goto xfer_work_end;
|
|
|
+
|
|
|
+ chan = usbhsf_dma_chan_get(fifo, pkt);
|
|
|
dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
|
|
|
|
|
|
desc = dmaengine_prep_slave_single(chan, pkt->dma + pkt->actual,
|
|
|
pkt->trans, dir,
|
|
|
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
|
|
if (!desc)
|
|
|
- return;
|
|
|
+ goto xfer_work_end;
|
|
|
|
|
|
desc->callback = usbhsf_dma_complete;
|
|
|
desc->callback_param = pipe;
|
|
@@ -831,7 +838,7 @@ static void xfer_work(struct work_struct *work)
|
|
|
pkt->cookie = dmaengine_submit(desc);
|
|
|
if (pkt->cookie < 0) {
|
|
|
dev_err(dev, "Failed to submit dma descriptor\n");
|
|
|
- return;
|
|
|
+ goto xfer_work_end;
|
|
|
}
|
|
|
|
|
|
dev_dbg(dev, " %s %d (%d/ %d)\n",
|
|
@@ -842,6 +849,9 @@ static void xfer_work(struct work_struct *work)
|
|
|
usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
|
|
|
dma_async_issue_pending(chan);
|
|
|
usbhs_pipe_enable(pipe);
|
|
|
+
|
|
|
+xfer_work_end:
|
|
|
+ usbhs_unlock(priv, flags);
|
|
|
}
|
|
|
|
|
|
/*
|