瀏覽代碼

Merge branch 'topic/dmaengine_cleanups' into for-linus

Vinod Koul 9 年之前
父節點
當前提交
4bb0439626

+ 26 - 4
drivers/dma/coh901318.c

@@ -266,7 +266,7 @@ static int dma_memcpy_channels[] = {
 			COH901318_CX_CTRL_DDMA_LEGACY | \
 			COH901318_CX_CTRL_PRDD_SOURCE)
 
-const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = {
+static const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = {
 	{
 		.number = U300_DMA_MSL_TX_0,
 		.name = "MSL TX 0",
@@ -1280,6 +1280,7 @@ struct coh901318_desc {
 struct coh901318_base {
 	struct device *dev;
 	void __iomem *virtbase;
+	unsigned int irq;
 	struct coh901318_pool pool;
 	struct powersave pm;
 	struct dma_device dma_slave;
@@ -1364,7 +1365,6 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf,
 }
 
 static const struct file_operations coh901318_debugfs_status_operations = {
-	.owner		= THIS_MODULE,
 	.open		= simple_open,
 	.read		= coh901318_debugfs_read,
 	.llseek		= default_llseek,
@@ -2680,6 +2680,8 @@ static int __init coh901318_probe(struct platform_device *pdev)
 	if (err)
 		return err;
 
+	base->irq = irq;
+
 	err = coh901318_pool_create(&base->pool, &pdev->dev,
 				    sizeof(struct coh901318_lli),
 				    32);
@@ -2755,11 +2757,31 @@ static int __init coh901318_probe(struct platform_device *pdev)
 	coh901318_pool_destroy(&base->pool);
 	return err;
 }
+static void coh901318_base_remove(struct coh901318_base *base, const int *pick_chans)
+{
+	int chans_i;
+	int i = 0;
+	struct coh901318_chan *cohc;
+
+	for (chans_i = 0; pick_chans[chans_i] != -1; chans_i += 2) {
+		for (i = pick_chans[chans_i]; i <= pick_chans[chans_i+1]; i++) {
+			cohc = &base->chans[i];
+
+			tasklet_kill(&cohc->tasklet);
+		}
+	}
+
+}
 
 static int coh901318_remove(struct platform_device *pdev)
 {
 	struct coh901318_base *base = platform_get_drvdata(pdev);
 
+	devm_free_irq(&pdev->dev, base->irq, base);
+
+	coh901318_base_remove(base, dma_slave_channels);
+	coh901318_base_remove(base, dma_memcpy_channels);
+
 	of_dma_controller_free(pdev->dev.of_node);
 	dma_async_device_unregister(&base->dma_memcpy);
 	dma_async_device_unregister(&base->dma_slave);
@@ -2780,13 +2802,13 @@ static struct platform_driver coh901318_driver = {
 	},
 };
 
-int __init coh901318_init(void)
+static int __init coh901318_init(void)
 {
 	return platform_driver_probe(&coh901318_driver, coh901318_probe);
 }
 subsys_initcall(coh901318_init);
 
-void __exit coh901318_exit(void)
+static void __exit coh901318_exit(void)
 {
 	platform_driver_unregister(&coh901318_driver);
 }

+ 0 - 3
drivers/dma/cppi41.c

@@ -497,16 +497,13 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
 	struct cppi41_desc *d;
 	struct scatterlist *sg;
 	unsigned int i;
-	unsigned int num;
 
-	num = 0;
 	d = c->desc;
 	for_each_sg(sgl, sg, sg_len, i) {
 		u32 addr;
 		u32 len;
 
 		/* We need to use more than one desc once musb supports sg */
-		BUG_ON(num > 0);
 		addr = lower_32_bits(sg_dma_address(sg));
 		len = sg_dma_len(sg);
 

+ 14 - 0
drivers/dma/dma-jz4740.c

@@ -573,12 +573,26 @@ err_unregister:
 	return ret;
 }
 
+static void jz4740_cleanup_vchan(struct dma_device *dmadev)
+{
+	struct jz4740_dmaengine_chan *chan, *_chan;
+
+	list_for_each_entry_safe(chan, _chan,
+				&dmadev->channels, vchan.chan.device_node) {
+		list_del(&chan->vchan.chan.device_node);
+		tasklet_kill(&chan->vchan.task);
+	}
+}
+
+
 static int jz4740_dma_remove(struct platform_device *pdev)
 {
 	struct jz4740_dma_dev *dmadev = platform_get_drvdata(pdev);
 	int irq = platform_get_irq(pdev, 0);
 
 	free_irq(irq, dmadev);
+
+	jz4740_cleanup_vchan(&dmadev->ddev);
 	dma_async_device_unregister(&dmadev->ddev);
 	clk_disable_unprepare(dmadev->clk);
 

+ 21 - 0
drivers/dma/edma.c

@@ -239,6 +239,9 @@ struct edma_cc {
 	bool				chmap_exist;
 	enum dma_event_q		default_queue;
 
+	unsigned int			ccint;
+	unsigned int			ccerrint;
+
 	/*
 	 * The slot_inuse bit for each PaRAM slot is clear unless the slot is
 	 * in use by Linux or if it is allocated to be used by DSP.
@@ -2283,6 +2286,7 @@ static int edma_probe(struct platform_device *pdev)
 			dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
 			return ret;
 		}
+		ecc->ccint = irq;
 	}
 
 	irq = platform_get_irq_byname(pdev, "edma3_ccerrint");
@@ -2298,6 +2302,7 @@ static int edma_probe(struct platform_device *pdev)
 			dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
 			return ret;
 		}
+		ecc->ccerrint = irq;
 	}
 
 	ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
@@ -2388,11 +2393,27 @@ err_reg1:
 	return ret;
 }
 
+static void edma_cleanupp_vchan(struct dma_device *dmadev)
+{
+	struct edma_chan *echan, *_echan;
+
+	list_for_each_entry_safe(echan, _echan,
+			&dmadev->channels, vchan.chan.device_node) {
+		list_del(&echan->vchan.chan.device_node);
+		tasklet_kill(&echan->vchan.task);
+	}
+}
+
 static int edma_remove(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct edma_cc *ecc = dev_get_drvdata(dev);
 
+	devm_free_irq(dev, ecc->ccint, ecc);
+	devm_free_irq(dev, ecc->ccerrint, ecc);
+
+	edma_cleanupp_vchan(&ecc->dma_slave);
+
 	if (dev->of_node)
 		of_dma_controller_free(dev->of_node);
 	dma_async_device_unregister(&ecc->dma_slave);

+ 24 - 0
drivers/dma/fsl-edma.c

@@ -852,6 +852,17 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma
 	return 0;
 }
 
+static void fsl_edma_irq_exit(
+		struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
+{
+	if (fsl_edma->txirq == fsl_edma->errirq) {
+		devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
+	} else {
+		devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
+		devm_free_irq(&pdev->dev, fsl_edma->errirq, fsl_edma);
+	}
+}
+
 static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma)
 {
 	int i;
@@ -984,11 +995,24 @@ static int fsl_edma_probe(struct platform_device *pdev)
 	return 0;
 }
 
+static void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
+{
+	struct fsl_edma_chan *chan, *_chan;
+
+	list_for_each_entry_safe(chan, _chan,
+				&dmadev->channels, vchan.chan.device_node) {
+		list_del(&chan->vchan.chan.device_node);
+		tasklet_kill(&chan->vchan.task);
+	}
+}
+
 static int fsl_edma_remove(struct platform_device *pdev)
 {
 	struct device_node *np = pdev->dev.of_node;
 	struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev);
 
+	fsl_edma_irq_exit(pdev, fsl_edma);
+	fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
 	of_dma_controller_free(np);
 	dma_async_device_unregister(&fsl_edma->dma_dev);
 	fsl_disable_clocks(fsl_edma);

+ 5 - 4
drivers/dma/fsl_raid.c

@@ -337,7 +337,7 @@ static struct dma_async_tx_descriptor *fsl_re_prep_dma_genq(
 
 	re_chan = container_of(chan, struct fsl_re_chan, chan);
 	if (len > FSL_RE_MAX_DATA_LEN) {
-		dev_err(re_chan->dev, "genq tx length %lu, max length %d\n",
+		dev_err(re_chan->dev, "genq tx length %zu, max length %d\n",
 			len, FSL_RE_MAX_DATA_LEN);
 		return NULL;
 	}
@@ -424,7 +424,7 @@ static struct dma_async_tx_descriptor *fsl_re_prep_dma_pq(
 
 	re_chan = container_of(chan, struct fsl_re_chan, chan);
 	if (len > FSL_RE_MAX_DATA_LEN) {
-		dev_err(re_chan->dev, "pq tx length is %lu, max length is %d\n",
+		dev_err(re_chan->dev, "pq tx length is %zu, max length is %d\n",
 			len, FSL_RE_MAX_DATA_LEN);
 		return NULL;
 	}
@@ -545,7 +545,7 @@ static struct dma_async_tx_descriptor *fsl_re_prep_dma_memcpy(
 	re_chan = container_of(chan, struct fsl_re_chan, chan);
 
 	if (len > FSL_RE_MAX_DATA_LEN) {
-		dev_err(re_chan->dev, "cp tx length is %lu, max length is %d\n",
+		dev_err(re_chan->dev, "cp tx length is %zu, max length is %d\n",
 			len, FSL_RE_MAX_DATA_LEN);
 		return NULL;
 	}
@@ -856,6 +856,8 @@ static int fsl_re_probe(struct platform_device *ofdev)
 
 static void fsl_re_remove_chan(struct fsl_re_chan *chan)
 {
+	tasklet_kill(&chan->irqtask);
+
 	dma_pool_free(chan->re_dev->hw_desc_pool, chan->inb_ring_virt_addr,
 		      chan->inb_phys_addr);
 
@@ -890,7 +892,6 @@ static struct of_device_id fsl_re_ids[] = {
 static struct platform_driver fsl_re_driver = {
 	.driver = {
 		.name = "fsl-raideng",
-		.owner = THIS_MODULE,
 		.of_match_table = fsl_re_ids,
 	},
 	.probe = fsl_re_probe,

+ 30 - 1
drivers/dma/imx-dma.c

@@ -167,6 +167,7 @@ struct imxdma_channel {
 	u32				ccr_to_device;
 	bool				enabled_2d;
 	int				slot_2d;
+	unsigned int			irq;
 };
 
 enum imx_dma_type {
@@ -186,6 +187,9 @@ struct imxdma_engine {
 	struct imx_dma_2d_config	slots_2d[IMX_DMA_2D_SLOTS];
 	struct imxdma_channel		channel[IMX_DMA_CHANNELS];
 	enum imx_dma_type		devtype;
+	unsigned int			irq;
+	unsigned int			irq_err;
+
 };
 
 struct imxdma_filter_data {
@@ -1048,7 +1052,7 @@ static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec,
 }
 
 static int __init imxdma_probe(struct platform_device *pdev)
-	{
+{
 	struct imxdma_engine *imxdma;
 	struct resource *res;
 	const struct of_device_id *of_id;
@@ -1100,6 +1104,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
 			dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
 			goto disable_dma_ahb_clk;
 		}
+		imxdma->irq = irq;
 
 		irq_err = platform_get_irq(pdev, 1);
 		if (irq_err < 0) {
@@ -1113,6 +1118,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
 			dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
 			goto disable_dma_ahb_clk;
 		}
+		imxdma->irq_err = irq_err;
 	}
 
 	/* enable DMA module */
@@ -1150,6 +1156,8 @@ static int __init imxdma_probe(struct platform_device *pdev)
 					 irq + i, i);
 				goto disable_dma_ahb_clk;
 			}
+
+			imxdmac->irq = irq + i;
 			init_timer(&imxdmac->watchdog);
 			imxdmac->watchdog.function = &imxdma_watchdog;
 			imxdmac->watchdog.data = (unsigned long)imxdmac;
@@ -1217,10 +1225,31 @@ disable_dma_ipg_clk:
 	return ret;
 }
 
+static void imxdma_free_irq(struct platform_device *pdev, struct imxdma_engine *imxdma)
+{
+	int i;
+
+	if (is_imx1_dma(imxdma)) {
+		disable_irq(imxdma->irq);
+		disable_irq(imxdma->irq_err);
+	}
+
+	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
+		struct imxdma_channel *imxdmac = &imxdma->channel[i];
+
+		if (!is_imx1_dma(imxdma))
+			disable_irq(imxdmac->irq);
+
+		tasklet_kill(&imxdmac->dma_tasklet);
+	}
+}
+
 static int imxdma_remove(struct platform_device *pdev)
 {
 	struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
 
+	imxdma_free_irq(pdev, imxdma);
+
         dma_async_device_unregister(&imxdma->dma_device);
 
 	if (pdev->dev.of_node)

+ 5 - 4
drivers/dma/imx-sdma.c

@@ -386,6 +386,7 @@ struct sdma_engine {
 	const struct sdma_driver_data	*drvdata;
 	u32				spba_start_addr;
 	u32				spba_end_addr;
+	unsigned int			irq;
 };
 
 static struct sdma_driver_data sdma_imx31 = {
@@ -751,7 +752,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
 	 * These are needed once we start to support transfers between
 	 * two peripherals or memory-to-memory transfers
 	 */
-	int per_2_per = 0, emi_2_emi = 0;
+	int per_2_per = 0;
 
 	sdmac->pc_from_device = 0;
 	sdmac->pc_to_device = 0;
@@ -759,7 +760,6 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
 
 	switch (peripheral_type) {
 	case IMX_DMATYPE_MEMORY:
-		emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
 		break;
 	case IMX_DMATYPE_DSP:
 		emi_2_per = sdma->script_addrs->bp_2_ap_addr;
@@ -992,8 +992,6 @@ static int sdma_config_channel(struct dma_chan *chan)
 		} else
 			__set_bit(sdmac->event_id0, sdmac->event_mask);
 
-		/* Watermark Level */
-		sdmac->watermark_level |= sdmac->watermark_level;
 		/* Address */
 		sdmac->shp_addr = sdmac->per_address;
 		sdmac->per_addr = sdmac->per_address2;
@@ -1708,6 +1706,8 @@ static int sdma_probe(struct platform_device *pdev)
 	if (ret)
 		return ret;
 
+	sdma->irq = irq;
+
 	sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
 	if (!sdma->script_addrs)
 		return -ENOMEM;
@@ -1833,6 +1833,7 @@ static int sdma_remove(struct platform_device *pdev)
 	struct sdma_engine *sdma = platform_get_drvdata(pdev);
 	int i;
 
+	devm_free_irq(&pdev->dev, sdma->irq, sdma);
 	dma_async_device_unregister(&sdma->dma_device);
 	kfree(sdma->script_addrs);
 	/* Kill the tasklet */

+ 1 - 1
drivers/dma/ioat/init.c

@@ -1212,7 +1212,7 @@ static void ioat_shutdown(struct pci_dev *pdev)
 	ioat_disable_interrupts(ioat_dma);
 }
 
-void ioat_resume(struct ioatdma_device *ioat_dma)
+static void ioat_resume(struct ioatdma_device *ioat_dma)
 {
 	struct ioatdma_chan *ioat_chan;
 	u32 chanerr;

+ 5 - 0
drivers/dma/k3dma.c

@@ -102,6 +102,7 @@ struct k3_dma_dev {
 	struct clk		*clk;
 	u32			dma_channels;
 	u32			dma_requests;
+	unsigned int		irq;
 };
 
 #define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
@@ -703,6 +704,8 @@ static int k3_dma_probe(struct platform_device *op)
 	if (ret)
 		return ret;
 
+	d->irq = irq;
+
 	/* init phy channel */
 	d->phy = devm_kzalloc(&op->dev,
 		d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL);
@@ -787,6 +790,8 @@ static int k3_dma_remove(struct platform_device *op)
 	dma_async_device_unregister(&d->slave);
 	of_dma_controller_free((&op->dev)->of_node);
 
+	devm_free_irq(&op->dev, d->irq, d);
+
 	list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
 		list_del(&c->vc.chan.device_node);
 		tasklet_kill(&c->vc.task);

+ 19 - 0
drivers/dma/mmp_pdma.c

@@ -931,6 +931,25 @@ static void dma_do_tasklet(unsigned long data)
 static int mmp_pdma_remove(struct platform_device *op)
 {
 	struct mmp_pdma_device *pdev = platform_get_drvdata(op);
+	struct mmp_pdma_phy *phy;
+	int i, irq = 0, irq_num = 0;
+
+
+	for (i = 0; i < pdev->dma_channels; i++) {
+		if (platform_get_irq(op, i) > 0)
+			irq_num++;
+	}
+
+	if (irq_num != pdev->dma_channels) {
+		irq = platform_get_irq(op, 0);
+		devm_free_irq(&op->dev, irq, pdev);
+	} else {
+		for (i = 0; i < pdev->dma_channels; i++) {
+			phy = &pdev->phy[i];
+			irq = platform_get_irq(op, i);
+			devm_free_irq(&op->dev, irq, phy);
+		}
+	}
 
 	dma_async_device_unregister(&pdev->device);
 	return 0;

+ 2 - 2
drivers/dma/mmp_tdma.c

@@ -404,7 +404,7 @@ static void mmp_tdma_free_chan_resources(struct dma_chan *chan)
 	return;
 }
 
-struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac)
+static struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac)
 {
 	struct gen_pool *gpool;
 	int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
@@ -592,7 +592,7 @@ static bool mmp_tdma_filter_fn(struct dma_chan *chan, void *fn_param)
 	return true;
 }
 
-struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec,
+static struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec,
 			       struct of_dma *ofdma)
 {
 	struct mmp_tdma_device *tdev = ofdma->of_dma_data;

+ 4 - 0
drivers/dma/moxart-dma.c

@@ -148,6 +148,7 @@ struct moxart_chan {
 struct moxart_dmadev {
 	struct dma_device		dma_slave;
 	struct moxart_chan		slave_chans[APB_DMA_MAX_CHANNEL];
+	unsigned int			irq;
 };
 
 struct moxart_filter_data {
@@ -615,6 +616,7 @@ static int moxart_probe(struct platform_device *pdev)
 		dev_err(dev, "devm_request_irq failed\n");
 		return ret;
 	}
+	mdc->irq = irq;
 
 	ret = dma_async_device_register(&mdc->dma_slave);
 	if (ret) {
@@ -638,6 +640,8 @@ static int moxart_remove(struct platform_device *pdev)
 {
 	struct moxart_dmadev *m = platform_get_drvdata(pdev);
 
+	devm_free_irq(&pdev->dev, m->irq, m);
+
 	dma_async_device_unregister(&m->dma_slave);
 
 	if (pdev->dev.of_node)

+ 1 - 0
drivers/dma/mpc512x_dma.c

@@ -1110,6 +1110,7 @@ static int mpc_dma_remove(struct platform_device *op)
 	}
 	free_irq(mdma->irq, mdma);
 	irq_dispose_mapping(mdma->irq);
+	tasklet_kill(&mdma->tasklet);
 
 	return 0;
 }

+ 13 - 0
drivers/dma/nbpfaxi.c

@@ -227,6 +227,7 @@ struct nbpf_device {
 	void __iomem *base;
 	struct clk *clk;
 	const struct nbpf_config *config;
+	unsigned int eirq;
 	struct nbpf_channel chan[];
 };
 
@@ -1375,6 +1376,7 @@ static int nbpf_probe(struct platform_device *pdev)
 			       IRQF_SHARED, "dma error", nbpf);
 	if (ret < 0)
 		return ret;
+	nbpf->eirq = eirq;
 
 	INIT_LIST_HEAD(&dma_dev->channels);
 
@@ -1446,6 +1448,17 @@ e_clk_off:
 static int nbpf_remove(struct platform_device *pdev)
 {
 	struct nbpf_device *nbpf = platform_get_drvdata(pdev);
+	int i;
+
+	devm_free_irq(&pdev->dev, nbpf->eirq, nbpf);
+
+	for (i = 0; i < nbpf->config->num_channels; i++) {
+		struct nbpf_channel *chan = nbpf->chan + i;
+
+		devm_free_irq(&pdev->dev, chan->irq, chan);
+
+		tasklet_kill(&chan->tasklet);
+	}
 
 	of_dma_controller_free(pdev->dev.of_node);
 	dma_async_device_unregister(&nbpf->dma_dev);

+ 4 - 0
drivers/dma/omap-dma.c

@@ -1292,10 +1292,14 @@ static int omap_dma_probe(struct platform_device *pdev)
 static int omap_dma_remove(struct platform_device *pdev)
 {
 	struct omap_dmadev *od = platform_get_drvdata(pdev);
+	int irq;
 
 	if (pdev->dev.of_node)
 		of_dma_controller_free(pdev->dev.of_node);
 
+	irq = platform_get_irq(pdev, 1);
+	devm_free_irq(&pdev->dev, irq, od);
+
 	dma_async_device_unregister(&od->ddev);
 
 	if (!od->legacy) {

+ 6 - 0
drivers/dma/pl330.c

@@ -3002,12 +3002,18 @@ static int pl330_remove(struct amba_device *adev)
 {
 	struct pl330_dmac *pl330 = amba_get_drvdata(adev);
 	struct dma_pl330_chan *pch, *_p;
+	int i, irq;
 
 	pm_runtime_get_noresume(pl330->ddma.dev);
 
 	if (adev->dev.of_node)
 		of_dma_controller_free(adev->dev.of_node);
 
+	for (i = 0; i < AMBA_NR_IRQS; i++) {
+		irq = adev->irq[i];
+		devm_free_irq(&adev->dev, irq, pl330);
+	}
+
 	dma_async_device_unregister(&pl330->ddma);
 
 	/* Idle the DMAC */

+ 15 - 1
drivers/dma/pxa_dma.c

@@ -21,6 +21,7 @@
 #include <linux/of_device.h>
 #include <linux/of_dma.h>
 #include <linux/of.h>
+#include <linux/wait.h>
 #include <linux/dma/pxa-dma.h>
 
 #include "dmaengine.h"
@@ -118,6 +119,8 @@ struct pxad_chan {
 	struct pxad_phy		*phy;
 	struct dma_pool		*desc_pool;	/* Descriptors pool */
 	dma_cookie_t		bus_error;
+
+	wait_queue_head_t	wq_state;
 };
 
 struct pxad_device {
@@ -318,7 +321,6 @@ static int dbg_open_##name(struct inode *inode, struct file *file) \
 	return single_open(file, dbg_show_##name, inode->i_private); \
 } \
 static const struct file_operations dbg_fops_##name = { \
-	.owner		= THIS_MODULE, \
 	.open		= dbg_open_##name, \
 	.llseek		= seq_lseek, \
 	.read		= seq_read, \
@@ -572,6 +574,7 @@ static void pxad_launch_chan(struct pxad_chan *chan,
 	 */
 	phy_writel(chan->phy, desc->first, DDADR);
 	phy_enable(chan->phy, chan->misaligned);
+	wake_up(&chan->wq_state);
 }
 
 static void set_updater_desc(struct pxad_desc_sw *sw_desc,
@@ -717,6 +720,7 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
 		}
 	}
 	spin_unlock_irqrestore(&chan->vc.lock, flags);
+	wake_up(&chan->wq_state);
 
 	return IRQ_HANDLED;
 }
@@ -1268,6 +1272,14 @@ static enum dma_status pxad_tx_status(struct dma_chan *dchan,
 	return ret;
 }
 
+static void pxad_synchronize(struct dma_chan *dchan)
+{
+	struct pxad_chan *chan = to_pxad_chan(dchan);
+
+	wait_event(chan->wq_state, !is_chan_running(chan));
+	vchan_synchronize(&chan->vc);
+}
+
 static void pxad_free_channels(struct dma_device *dmadev)
 {
 	struct pxad_chan *c, *cn;
@@ -1372,6 +1384,7 @@ static int pxad_init_dmadev(struct platform_device *op,
 	pdev->slave.device_tx_status = pxad_tx_status;
 	pdev->slave.device_issue_pending = pxad_issue_pending;
 	pdev->slave.device_config = pxad_config;
+	pdev->slave.device_synchronize = pxad_synchronize;
 	pdev->slave.device_terminate_all = pxad_terminate_all;
 
 	if (op->dev.coherent_dma_mask)
@@ -1389,6 +1402,7 @@ static int pxad_init_dmadev(struct platform_device *op,
 			return -ENOMEM;
 		c->vc.desc_free = pxad_free_desc;
 		vchan_init(&c->vc, &pdev->slave);
+		init_waitqueue_head(&c->wq_state);
 	}
 
 	return dma_async_device_register(&pdev->slave);

+ 1 - 0
drivers/dma/qcom/hidma.c

@@ -708,6 +708,7 @@ static int hidma_remove(struct platform_device *pdev)
 	pm_runtime_get_sync(dmadev->ddev.dev);
 	dma_async_device_unregister(&dmadev->ddev);
 	devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
+	tasklet_kill(&dmadev->task);
 	hidma_debug_uninit(dmadev);
 	hidma_ll_uninit(dmadev->lldev);
 	hidma_free(dmadev);

+ 1 - 0
drivers/dma/qcom/hidma_ll.c

@@ -831,6 +831,7 @@ int hidma_ll_uninit(struct hidma_lldev *lldev)
 
 	required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres;
 	tasklet_kill(&lldev->task);
+	tasklet_kill(&lldev->rst_task);
 	memset(lldev->trepool, 0, required_bytes);
 	lldev->trepool = NULL;
 	lldev->pending_tre_count = 0;

+ 17 - 1
drivers/dma/s3c24xx-dma.c

@@ -1136,8 +1136,10 @@ static void s3c24xx_dma_free_virtual_channels(struct dma_device *dmadev)
 	struct s3c24xx_dma_chan *next;
 
 	list_for_each_entry_safe(chan,
-				 next, &dmadev->channels, vc.chan.device_node)
+				 next, &dmadev->channels, vc.chan.device_node) {
 		list_del(&chan->vc.chan.device_node);
+		tasklet_kill(&chan->vc.task);
+	}
 }
 
 /* s3c2410, s3c2440 and s3c2442 have a 0x40 stride without separate clocks */
@@ -1359,6 +1361,18 @@ err_memcpy:
 	return ret;
 }
 
+static void s3c24xx_dma_free_irq(struct platform_device *pdev,
+				struct s3c24xx_dma_engine *s3cdma)
+{
+	int i;
+
+	for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
+		struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
+
+		devm_free_irq(&pdev->dev, phy->irq, phy);
+	}
+}
+
 static int s3c24xx_dma_remove(struct platform_device *pdev)
 {
 	const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
@@ -1369,6 +1383,8 @@ static int s3c24xx_dma_remove(struct platform_device *pdev)
 	dma_async_device_unregister(&s3cdma->slave);
 	dma_async_device_unregister(&s3cdma->memcpy);
 
+	s3c24xx_dma_free_irq(pdev, s3cdma);
+
 	s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
 	s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
 

+ 1 - 0
drivers/dma/sirf-dma.c

@@ -980,6 +980,7 @@ static int sirfsoc_dma_remove(struct platform_device *op)
 	of_dma_controller_free(op->dev.of_node);
 	dma_async_device_unregister(&sdma->dma);
 	free_irq(sdma->irq, sdma);
+	tasklet_kill(&sdma->tasklet);
 	irq_dispose_mapping(sdma->irq);
 	pm_runtime_disable(&op->dev);
 	if (!pm_runtime_status_suspended(&op->dev))

+ 7 - 2
drivers/dma/txx9dmac.c

@@ -1165,9 +1165,12 @@ static int txx9dmac_chan_remove(struct platform_device *pdev)
 {
 	struct txx9dmac_chan *dc = platform_get_drvdata(pdev);
 
+
 	dma_async_device_unregister(&dc->dma);
-	if (dc->irq >= 0)
+	if (dc->irq >= 0) {
+		devm_free_irq(&pdev->dev, dc->irq, dc);
 		tasklet_kill(&dc->tasklet);
+	}
 	dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL;
 	return 0;
 }
@@ -1228,8 +1231,10 @@ static int txx9dmac_remove(struct platform_device *pdev)
 	struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
 
 	txx9dmac_off(ddev);
-	if (ddev->irq >= 0)
+	if (ddev->irq >= 0) {
+		devm_free_irq(&pdev->dev, ddev->irq, ddev);
 		tasklet_kill(&ddev->tasklet);
+	}
 	return 0;
 }