|
@@ -396,10 +396,9 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
|
|
|
struct fsldma_chan *chan = to_fsl_chan(tx->chan);
|
|
|
struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
|
|
|
struct fsl_desc_sw *child;
|
|
|
- unsigned long flags;
|
|
|
dma_cookie_t cookie = -EINVAL;
|
|
|
|
|
|
- spin_lock_irqsave(&chan->desc_lock, flags);
|
|
|
+ spin_lock_bh(&chan->desc_lock);
|
|
|
|
|
|
/*
|
|
|
* assign cookies to all of the software descriptors
|
|
@@ -412,7 +411,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
|
|
|
/* put this transaction onto the tail of the pending queue */
|
|
|
append_ld_queue(chan, desc);
|
|
|
|
|
|
- spin_unlock_irqrestore(&chan->desc_lock, flags);
|
|
|
+ spin_unlock_bh(&chan->desc_lock);
|
|
|
|
|
|
return cookie;
|
|
|
}
|
|
@@ -617,13 +616,12 @@ static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
|
|
|
static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
|
|
|
{
|
|
|
struct fsldma_chan *chan = to_fsl_chan(dchan);
|
|
|
- unsigned long flags;
|
|
|
|
|
|
chan_dbg(chan, "free all channel resources\n");
|
|
|
- spin_lock_irqsave(&chan->desc_lock, flags);
|
|
|
+ spin_lock_bh(&chan->desc_lock);
|
|
|
fsldma_free_desc_list(chan, &chan->ld_pending);
|
|
|
fsldma_free_desc_list(chan, &chan->ld_running);
|
|
|
- spin_unlock_irqrestore(&chan->desc_lock, flags);
|
|
|
+ spin_unlock_bh(&chan->desc_lock);
|
|
|
|
|
|
dma_pool_destroy(chan->desc_pool);
|
|
|
chan->desc_pool = NULL;
|
|
@@ -842,7 +840,6 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
|
|
|
{
|
|
|
struct dma_slave_config *config;
|
|
|
struct fsldma_chan *chan;
|
|
|
- unsigned long flags;
|
|
|
int size;
|
|
|
|
|
|
if (!dchan)
|
|
@@ -852,7 +849,7 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
|
|
|
|
|
|
switch (cmd) {
|
|
|
case DMA_TERMINATE_ALL:
|
|
|
- spin_lock_irqsave(&chan->desc_lock, flags);
|
|
|
+ spin_lock_bh(&chan->desc_lock);
|
|
|
|
|
|
/* Halt the DMA engine */
|
|
|
dma_halt(chan);
|
|
@@ -862,7 +859,7 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
|
|
|
fsldma_free_desc_list(chan, &chan->ld_running);
|
|
|
chan->idle = true;
|
|
|
|
|
|
- spin_unlock_irqrestore(&chan->desc_lock, flags);
|
|
|
+ spin_unlock_bh(&chan->desc_lock);
|
|
|
return 0;
|
|
|
|
|
|
case DMA_SLAVE_CONFIG:
|
|
@@ -904,11 +901,10 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
|
|
|
static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
|
|
|
{
|
|
|
struct fsldma_chan *chan = to_fsl_chan(dchan);
|
|
|
- unsigned long flags;
|
|
|
|
|
|
- spin_lock_irqsave(&chan->desc_lock, flags);
|
|
|
+ spin_lock_bh(&chan->desc_lock);
|
|
|
fsl_chan_xfer_ld_queue(chan);
|
|
|
- spin_unlock_irqrestore(&chan->desc_lock, flags);
|
|
|
+ spin_unlock_bh(&chan->desc_lock);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -998,11 +994,10 @@ static void dma_do_tasklet(unsigned long data)
|
|
|
struct fsldma_chan *chan = (struct fsldma_chan *)data;
|
|
|
struct fsl_desc_sw *desc, *_desc;
|
|
|
LIST_HEAD(ld_cleanup);
|
|
|
- unsigned long flags;
|
|
|
|
|
|
chan_dbg(chan, "tasklet entry\n");
|
|
|
|
|
|
- spin_lock_irqsave(&chan->desc_lock, flags);
|
|
|
+ spin_lock_bh(&chan->desc_lock);
|
|
|
|
|
|
/* update the cookie if we have some descriptors to cleanup */
|
|
|
if (!list_empty(&chan->ld_running)) {
|
|
@@ -1031,7 +1026,7 @@ static void dma_do_tasklet(unsigned long data)
|
|
|
* ahead and free the descriptors below.
|
|
|
*/
|
|
|
fsl_chan_xfer_ld_queue(chan);
|
|
|
- spin_unlock_irqrestore(&chan->desc_lock, flags);
|
|
|
+ spin_unlock_bh(&chan->desc_lock);
|
|
|
|
|
|
/* Run the callback for each descriptor, in order */
|
|
|
list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
|