|
@@ -802,9 +802,8 @@ static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
static void usbhsf_dma_complete(void *arg);
|
|
static void usbhsf_dma_complete(void *arg);
|
|
|
-static void xfer_work(struct work_struct *work)
|
|
|
|
|
|
|
+static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
|
|
|
{
|
|
{
|
|
|
- struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
|
|
|
|
|
struct usbhs_pipe *pipe = pkt->pipe;
|
|
struct usbhs_pipe *pipe = pkt->pipe;
|
|
|
struct usbhs_fifo *fifo;
|
|
struct usbhs_fifo *fifo;
|
|
|
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
|
|
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
|
|
@@ -812,12 +811,10 @@ static void xfer_work(struct work_struct *work)
|
|
|
struct dma_chan *chan;
|
|
struct dma_chan *chan;
|
|
|
struct device *dev = usbhs_priv_to_dev(priv);
|
|
struct device *dev = usbhs_priv_to_dev(priv);
|
|
|
enum dma_transfer_direction dir;
|
|
enum dma_transfer_direction dir;
|
|
|
- unsigned long flags;
|
|
|
|
|
|
|
|
|
|
- usbhs_lock(priv, flags);
|
|
|
|
|
fifo = usbhs_pipe_to_fifo(pipe);
|
|
fifo = usbhs_pipe_to_fifo(pipe);
|
|
|
if (!fifo)
|
|
if (!fifo)
|
|
|
- goto xfer_work_end;
|
|
|
|
|
|
|
+ return;
|
|
|
|
|
|
|
|
chan = usbhsf_dma_chan_get(fifo, pkt);
|
|
chan = usbhsf_dma_chan_get(fifo, pkt);
|
|
|
dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
|
|
dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
|
|
@@ -826,7 +823,7 @@ static void xfer_work(struct work_struct *work)
|
|
|
pkt->trans, dir,
|
|
pkt->trans, dir,
|
|
|
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
|
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
|
|
if (!desc)
|
|
if (!desc)
|
|
|
- goto xfer_work_end;
|
|
|
|
|
|
|
+ return;
|
|
|
|
|
|
|
|
desc->callback = usbhsf_dma_complete;
|
|
desc->callback = usbhsf_dma_complete;
|
|
|
desc->callback_param = pipe;
|
|
desc->callback_param = pipe;
|
|
@@ -834,7 +831,7 @@ static void xfer_work(struct work_struct *work)
|
|
|
pkt->cookie = dmaengine_submit(desc);
|
|
pkt->cookie = dmaengine_submit(desc);
|
|
|
if (pkt->cookie < 0) {
|
|
if (pkt->cookie < 0) {
|
|
|
dev_err(dev, "Failed to submit dma descriptor\n");
|
|
dev_err(dev, "Failed to submit dma descriptor\n");
|
|
|
- goto xfer_work_end;
|
|
|
|
|
|
|
+ return;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
dev_dbg(dev, " %s %d (%d/ %d)\n",
|
|
dev_dbg(dev, " %s %d (%d/ %d)\n",
|
|
@@ -845,8 +842,17 @@ static void xfer_work(struct work_struct *work)
|
|
|
dma_async_issue_pending(chan);
|
|
dma_async_issue_pending(chan);
|
|
|
usbhsf_dma_start(pipe, fifo);
|
|
usbhsf_dma_start(pipe, fifo);
|
|
|
usbhs_pipe_enable(pipe);
|
|
usbhs_pipe_enable(pipe);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static void xfer_work(struct work_struct *work)
|
|
|
|
|
+{
|
|
|
|
|
+ struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
|
|
|
|
|
+ struct usbhs_pipe *pipe = pkt->pipe;
|
|
|
|
|
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
|
|
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
|
|
-xfer_work_end:
|
|
|
|
|
|
|
+ usbhs_lock(priv, flags);
|
|
|
|
|
+ usbhsf_dma_xfer_preparing(pkt);
|
|
|
usbhs_unlock(priv, flags);
|
|
usbhs_unlock(priv, flags);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
@@ -899,8 +905,13 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
|
|
|
pkt->trans = len;
|
|
pkt->trans = len;
|
|
|
|
|
|
|
|
usbhsf_tx_irq_ctrl(pipe, 0);
|
|
usbhsf_tx_irq_ctrl(pipe, 0);
|
|
|
- INIT_WORK(&pkt->work, xfer_work);
|
|
|
|
|
- schedule_work(&pkt->work);
|
|
|
|
|
|
|
+ /* FIXME: Workaound for usb dmac that driver can be used in atomic */
|
|
|
|
|
+ if (usbhs_get_dparam(priv, has_usb_dmac)) {
|
|
|
|
|
+ usbhsf_dma_xfer_preparing(pkt);
|
|
|
|
|
+ } else {
|
|
|
|
|
+ INIT_WORK(&pkt->work, xfer_work);
|
|
|
|
|
+ schedule_work(&pkt->work);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
|
@@ -1006,8 +1017,7 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt,
|
|
|
|
|
|
|
|
pkt->trans = pkt->length;
|
|
pkt->trans = pkt->length;
|
|
|
|
|
|
|
|
- INIT_WORK(&pkt->work, xfer_work);
|
|
|
|
|
- schedule_work(&pkt->work);
|
|
|
|
|
|
|
+ usbhsf_dma_xfer_preparing(pkt);
|
|
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|