Эх сурвалжийг харах

Merge branch 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine updates from Vinod Koul:
 "This update brings:

   - the big cleanup up by Maxime for device control and slave
     capabilities.  This makes the API much cleaner.

   - new IMG MDC driver by Andrew

   - new Renesas R-Car Gen2 DMA Controller driver by Laurent along with
     bunch of fixes on rcar drivers

   - odd fixes and updates spread over driver"

* 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma: (130 commits)
  dmaengine: pl330: add DMA_PAUSE feature
  dmaengine: pl330: improve pl330_tx_status() function
  dmaengine: rcar-dmac: Disable channel 0 when using IOMMU
  dmaengine: rcar-dmac: Work around descriptor mode IOMMU errata
  dmaengine: rcar-dmac: Allocate hardware descriptors with DMAC device
  dmaengine: rcar-dmac: Fix oops due to unintialized list in error ISR
  dmaengine: rcar-dmac: Fix spinlock issues in interrupt
  dmaenegine: edma: fix sparse warnings
  dmaengine: rcar-dmac: Fix uninitialized variable usage
  dmaengine: shdmac: extend PM methods
  dmaengine: shdmac: use SET_RUNTIME_PM_OPS()
  dmaengine: pl330: fix bug that cause start the same descs in cyclic
  dmaengine: at_xdmac: allow muliple dwidths when doing slave transfers
  dmaengine: at_xdmac: simplify channel configuration stuff
  dmaengine: at_xdmac: introduce save_cc field
  dmaengine: at_xdmac: wait for in-progress transaction to complete after pausing a channel
  ioat: fail self-test if wait_for_completion times out
  dmaengine: dw: define DW_DMA_MAX_NR_MASTERS
  dmaengine: dw: amend description of dma_dev field
  dmatest: move src_off, dst_off, len inside loop
  ...
Linus Torvalds 10 жил өмнө
parent
commit
ce1d3fde87
71 өөрчлөгдсөн 4736 нэмэгдсэн , 1911 устгасан
  1. 57 0
      Documentation/devicetree/bindings/dma/img-mdc-dma.txt
  2. 0 3
      Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
  3. 1 1
      Documentation/devicetree/bindings/dma/snps-dma.txt
  4. 55 42
      Documentation/dmaengine/provider.txt
  5. 1 0
      MAINTAINERS
  6. 1 1
      arch/arc/boot/dts/abilis_tb10x.dtsi
  7. 2 2
      arch/arm/boot/dts/spear13xx.dtsi
  8. 1 1
      arch/avr32/mach-at32ap/at32ap700x.c
  9. 2 2
      drivers/crypto/ux500/cryp/cryp_core.c
  10. 1 1
      drivers/crypto/ux500/hash/hash_core.c
  11. 9 0
      drivers/dma/Kconfig
  12. 2 1
      drivers/dma/Makefile
  13. 90 66
      drivers/dma/amba-pl08x.c
  14. 82 48
      drivers/dma/at_hdmac.c
  15. 2 1
      drivers/dma/at_hdmac_regs.h
  16. 98 88
      drivers/dma/at_xdmac.c
  17. 11 35
      drivers/dma/bcm2835-dma.c
  18. 70 83
      drivers/dma/coh901318.c
  19. 1 29
      drivers/dma/cppi41.c
  20. 3 17
      drivers/dma/dma-jz4740.c
  21. 62 22
      drivers/dma/dmaengine.c
  22. 18 17
      drivers/dma/dmatest.c
  23. 61 40
      drivers/dma/dw/core.c
  24. 2 2
      drivers/dma/dw/platform.c
  25. 2 2
      drivers/dma/dw/regs.h
  26. 23 50
      drivers/dma/edma.c
  27. 8 35
      drivers/dma/ep93xx_dma.c
  28. 62 61
      drivers/dma/fsl-edma.c
  29. 37 60
      drivers/dma/fsldma.c
  30. 4 0
      drivers/dma/fsldma.h
  31. 1011 0
      drivers/dma/img-mdc-dma.c
  32. 51 57
      drivers/dma/imx-dma.c
  33. 59 91
      drivers/dma/imx-sdma.c
  34. 6 19
      drivers/dma/intel_mid_dma.c
  35. 22 3
      drivers/dma/ioat/dma_v3.c
  36. 5 0
      drivers/dma/ioat/hw.h
  37. 5 0
      drivers/dma/ioat/pci.c
  38. 51 45
      drivers/dma/ipu/ipu_idmac.c
  39. 110 93
      drivers/dma/k3dma.c
  40. 56 53
      drivers/dma/mmp_pdma.c
  41. 46 39
      drivers/dma/mmp_tdma.c
  42. 2 23
      drivers/dma/moxart-dma.c
  43. 51 60
      drivers/dma/mpc512x_dma.c
  44. 0 9
      drivers/dma/mv_xor.c
  45. 28 37
      drivers/dma/mxs-dma.c
  46. 51 61
      drivers/dma/nbpfaxi.c
  47. 4 0
      drivers/dma/of-dma.c
  48. 19 50
      drivers/dma/omap-dma.c
  49. 2 6
      drivers/dma/pch_dma.c
  50. 155 75
      drivers/dma/pl330.c
  51. 43 42
      drivers/dma/qcom_bam_dma.c
  52. 36 37
      drivers/dma/s3c24xx-dma.c
  53. 82 75
      drivers/dma/sa11x0-dma.c
  54. 13 1
      drivers/dma/sh/Kconfig
  55. 1 0
      drivers/dma/sh/Makefile
  56. 1770 0
      drivers/dma/sh/rcar-dmac.c
  57. 6 0
      drivers/dma/sh/rcar-hpbdma.c
  58. 33 39
      drivers/dma/sh/shdma-base.c
  59. 15 8
      drivers/dma/sh/shdmac.c
  60. 16 43
      drivers/dma/sirf-dma.c
  61. 30 33
      drivers/dma/ste_dma40.c
  62. 87 73
      drivers/dma/sun6i-dma.c
  63. 20 22
      drivers/dma/tegra20-apb-dma.c
  64. 2 6
      drivers/dma/timb_dma.c
  65. 2 7
      drivers/dma/txx9dmac.c
  66. 6 23
      drivers/dma/xilinx/xilinx_vdma.c
  67. 2 6
      drivers/rapidio/devices/tsi721_dma.c
  68. 58 62
      include/linux/dmaengine.h
  69. 4 2
      include/linux/platform_data/dma-dw.h
  70. 7 0
      include/linux/platform_data/dma-mmp_tdma.h
  71. 1 1
      sound/soc/soc-generic-dmaengine-pcm.c

+ 57 - 0
Documentation/devicetree/bindings/dma/img-mdc-dma.txt

@@ -0,0 +1,57 @@
+* IMG Multi-threaded DMA Controller (MDC)
+
+Required properties:
+- compatible: Must be "img,pistachio-mdc-dma".
+- reg: Must contain the base address and length of the MDC registers.
+- interrupts: Must contain all the per-channel DMA interrupts.
+- clocks: Must contain an entry for each entry in clock-names.
+  See ../clock/clock-bindings.txt for details.
+- clock-names: Must include the following entries:
+  - sys: MDC system interface clock.
+- img,cr-periph: Must contain a phandle to the peripheral control syscon
+  node which contains the DMA request to channel mapping registers.
+- img,max-burst-multiplier: Must be the maximum supported burst size multiplier.
+  The maximum burst size is this value multiplied by the hardware-reported bus
+  width.
+- #dma-cells: Must be 3:
+  - The first cell is the peripheral's DMA request line.
+  - The second cell is a bitmap specifying to which channels the DMA request
+    line may be mapped (i.e. bit N set indicates channel N is usable).
+  - The third cell is the thread ID to be used by the channel.
+
+Optional properties:
+- dma-channels: Number of supported DMA channels, up to 32.  If not specified
+  the number reported by the hardware is used.
+
+Example:
+
+mdc: dma-controller@18143000 {
+	compatible = "img,pistachio-mdc-dma";
+	reg = <0x18143000 0x1000>;
+	interrupts = <GIC_SHARED 27 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SHARED 28 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SHARED 29 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SHARED 30 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SHARED 31 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SHARED 32 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SHARED 33 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SHARED 34 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SHARED 35 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SHARED 36 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SHARED 37 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SHARED 38 IRQ_TYPE_LEVEL_HIGH>;
+	clocks = <&system_clk>;
+	clock-names = "sys";
+
+	img,max-burst-multiplier = <16>;
+	img,cr-periph = <&cr_periph>;
+
+	#dma-cells = <3>;
+};
+
+spi@18100f00 {
+	...
+	dmas = <&mdc 9 0xffffffff 0>, <&mdc 10 0xffffffff 0>;
+	dma-names = "tx", "rx";
+	...
+};

+ 0 - 3
Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt

@@ -5,9 +5,6 @@ controller instances named DMAC capable of serving multiple clients. Channels
 can be dedicated to specific clients or shared between a large number of
 can be dedicated to specific clients or shared between a large number of
 clients.
 clients.
 
 
-DMA clients are connected to the DMAC ports referenced by an 8-bit identifier
-called MID/RID.
-
 Each DMA client is connected to one dedicated port of the DMAC, identified by
 Each DMA client is connected to one dedicated port of the DMAC, identified by
 an 8-bit port number called the MID/RID. A DMA controller can thus serve up to
 an 8-bit port number called the MID/RID. A DMA controller can thus serve up to
 256 clients in total. When the number of hardware channels is lower than the
 256 clients in total. When the number of hardware channels is lower than the

+ 1 - 1
Documentation/devicetree/bindings/dma/snps-dma.txt

@@ -38,7 +38,7 @@ Example:
 		chan_allocation_order = <1>;
 		chan_allocation_order = <1>;
 		chan_priority = <1>;
 		chan_priority = <1>;
 		block_size = <0xfff>;
 		block_size = <0xfff>;
-		data_width = <3 3 0 0>;
+		data_width = <3 3>;
 	};
 	};
 
 
 DMA clients connected to the Designware DMA controller must use the format
 DMA clients connected to the Designware DMA controller must use the format

+ 55 - 42
Documentation/dmaengine/provider.txt

@@ -113,6 +113,31 @@ need to initialize a few fields in there:
   * channels:	should be initialized as a list using the
   * channels:	should be initialized as a list using the
 		INIT_LIST_HEAD macro for example
 		INIT_LIST_HEAD macro for example
 
 
+  * src_addr_widths:
+    - should contain a bitmask of the supported source transfer width
+
+  * dst_addr_widths:
+    - should contain a bitmask of the supported destination transfer
+      width
+
+  * directions:
+    - should contain a bitmask of the supported slave directions
+      (i.e. excluding mem2mem transfers)
+
+  * residue_granularity:
+    - Granularity of the transfer residue reported to dma_set_residue.
+    - This can be either:
+      + Descriptor
+        -> Your device doesn't support any kind of residue
+           reporting. The framework will only know that a particular
+           transaction descriptor is done.
+      + Segment
+        -> Your device is able to report which chunks have been
+           transferred
+      + Burst
+        -> Your device is able to report which burst have been
+           transferred
+
   * dev: 	should hold the pointer to the struct device associated
   * dev: 	should hold the pointer to the struct device associated
 		to your current driver instance.
 		to your current driver instance.
 
 
@@ -274,48 +299,36 @@ supported.
        account the current period.
        account the current period.
      - This function can be called in an interrupt context.
      - This function can be called in an interrupt context.
 
 
-   * device_control
-     - Used by client drivers to control and configure the channel it
-       has a handle on.
-     - Called with a command and an argument
-       + The command is one of the values listed by the enum
-         dma_ctrl_cmd. The valid commands are:
-         + DMA_PAUSE
-           + Pauses a transfer on the channel
-           + This command should operate synchronously on the channel,
-             pausing right away the work of the given channel
-         + DMA_RESUME
-           + Restarts a transfer on the channel
-           + This command should operate synchronously on the channel,
-             resuming right away the work of the given channel
-         + DMA_TERMINATE_ALL
-           + Aborts all the pending and ongoing transfers on the
-             channel
-           + This command should operate synchronously on the channel,
-             terminating right away all the channels
-         + DMA_SLAVE_CONFIG
-           + Reconfigures the channel with passed configuration
-           + This command should NOT perform synchronously, or on any
-             currently queued transfers, but only on subsequent ones
-           + In this case, the function will receive a
-             dma_slave_config structure pointer as an argument, that
-             will detail which configuration to use.
-           + Even though that structure contains a direction field,
-             this field is deprecated in favor of the direction
-             argument given to the prep_* functions
-         + FSLDMA_EXTERNAL_START
-           + TODO: Why does that even exist?
-       + The argument is an opaque unsigned long. This actually is a
-         pointer to a struct dma_slave_config that should be used only
-         in the DMA_SLAVE_CONFIG.
-
-  * device_slave_caps
-    - Called through the framework by client drivers in order to have
-      an idea of what are the properties of the channel allocated to
-      them.
-    - Such properties are the buswidth, available directions, etc.
-    - Required for every generic layer doing DMA transfers, such as
-      ASoC.
+   * device_config
+     - Reconfigures the channel with the configuration given as
+       argument
+     - This command should NOT perform synchronously, or on any
+       currently queued transfers, but only on subsequent ones
+     - In this case, the function will receive a dma_slave_config
+       structure pointer as an argument, that will detail which
+       configuration to use.
+     - Even though that structure contains a direction field, this
+       field is deprecated in favor of the direction argument given to
+       the prep_* functions
+     - This call is mandatory for slave operations only. This should NOT be
+       set or expected to be set for memcpy operations.
+       If a driver support both, it should use this call for slave
+       operations only and not for memcpy ones.
+
+   * device_pause
+     - Pauses a transfer on the channel
+     - This command should operate synchronously on the channel,
+       pausing right away the work of the given channel
+
+   * device_resume
+     - Resumes a transfer on the channel
+     - This command should operate synchronously on the channel,
+       pausing right away the work of the given channel
+
+   * device_terminate_all
+     - Aborts all the pending and ongoing transfers on the channel
+     - This command should operate synchronously on the channel,
+       terminating right away all the channels
 
 
 Misc notes (stuff that should be documented, but don't really know
 Misc notes (stuff that should be documented, but don't really know
 where to put them)
 where to put them)

+ 1 - 0
MAINTAINERS

@@ -8503,6 +8503,7 @@ SYNOPSYS DESIGNWARE DMAC DRIVER
 M:	Viresh Kumar <viresh.linux@gmail.com>
 M:	Viresh Kumar <viresh.linux@gmail.com>
 M:	Andy Shevchenko <andriy.shevchenko@linux.intel.com>
 M:	Andy Shevchenko <andriy.shevchenko@linux.intel.com>
 S:	Maintained
 S:	Maintained
+F:	include/linux/dma/dw.h
 F:	include/linux/platform_data/dma-dw.h
 F:	include/linux/platform_data/dma-dw.h
 F:	drivers/dma/dw/
 F:	drivers/dma/dw/
 
 

+ 1 - 1
arch/arc/boot/dts/abilis_tb10x.dtsi

@@ -112,7 +112,7 @@
 			chan_allocation_order = <0>;
 			chan_allocation_order = <0>;
 			chan_priority = <1>;
 			chan_priority = <1>;
 			block_size = <0x7ff>;
 			block_size = <0x7ff>;
-			data_width = <2 0 0 0>;
+			data_width = <2>;
 			clocks = <&ahb_clk>;
 			clocks = <&ahb_clk>;
 			clock-names = "hclk";
 			clock-names = "hclk";
 		};
 		};

+ 2 - 2
arch/arm/boot/dts/spear13xx.dtsi

@@ -117,7 +117,7 @@
 			chan_priority = <1>;
 			chan_priority = <1>;
 			block_size = <0xfff>;
 			block_size = <0xfff>;
 			dma-masters = <2>;
 			dma-masters = <2>;
-			data_width = <3 3 0 0>;
+			data_width = <3 3>;
 		};
 		};
 
 
 		dma@eb000000 {
 		dma@eb000000 {
@@ -133,7 +133,7 @@
 			chan_allocation_order = <1>;
 			chan_allocation_order = <1>;
 			chan_priority = <1>;
 			chan_priority = <1>;
 			block_size = <0xfff>;
 			block_size = <0xfff>;
-			data_width = <3 3 0 0>;
+			data_width = <3 3>;
 		};
 		};
 
 
 		fsmc: flash@b0000000 {
 		fsmc: flash@b0000000 {

+ 1 - 1
arch/avr32/mach-at32ap/at32ap700x.c

@@ -607,7 +607,7 @@ static struct dw_dma_platform_data dw_dmac0_data = {
 	.nr_channels	= 3,
 	.nr_channels	= 3,
 	.block_size	= 4095U,
 	.block_size	= 4095U,
 	.nr_masters	= 2,
 	.nr_masters	= 2,
-	.data_width	= { 2, 2, 0, 0 },
+	.data_width	= { 2, 2 },
 };
 };
 
 
 static struct resource dw_dmac0_resource[] = {
 static struct resource dw_dmac0_resource[] = {

+ 2 - 2
drivers/crypto/ux500/cryp/cryp_core.c

@@ -606,12 +606,12 @@ static void cryp_dma_done(struct cryp_ctx *ctx)
 	dev_dbg(ctx->device->dev, "[%s]: ", __func__);
 	dev_dbg(ctx->device->dev, "[%s]: ", __func__);
 
 
 	chan = ctx->device->dma.chan_mem2cryp;
 	chan = ctx->device->dma.chan_mem2cryp;
-	dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
+	dmaengine_terminate_all(chan);
 	dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src,
 	dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src,
 		     ctx->device->dma.sg_src_len, DMA_TO_DEVICE);
 		     ctx->device->dma.sg_src_len, DMA_TO_DEVICE);
 
 
 	chan = ctx->device->dma.chan_cryp2mem;
 	chan = ctx->device->dma.chan_cryp2mem;
-	dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
+	dmaengine_terminate_all(chan);
 	dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst,
 	dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst,
 		     ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE);
 		     ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE);
 }
 }

+ 1 - 1
drivers/crypto/ux500/hash/hash_core.c

@@ -202,7 +202,7 @@ static void hash_dma_done(struct hash_ctx *ctx)
 	struct dma_chan *chan;
 	struct dma_chan *chan;
 
 
 	chan = ctx->device->dma.chan_mem2hash;
 	chan = ctx->device->dma.chan_mem2hash;
-	dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
+	dmaengine_terminate_all(chan);
 	dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
 	dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
 		     ctx->device->dma.sg_len, DMA_TO_DEVICE);
 		     ctx->device->dma.sg_len, DMA_TO_DEVICE);
 }
 }

+ 9 - 0
drivers/dma/Kconfig

@@ -416,6 +416,15 @@ config NBPFAXI_DMA
 	help
 	help
 	  Support for "Type-AXI" NBPF DMA IPs from Renesas
 	  Support for "Type-AXI" NBPF DMA IPs from Renesas
 
 
+config IMG_MDC_DMA
+	tristate "IMG MDC support"
+	depends on MIPS || COMPILE_TEST
+	depends on MFD_SYSCON
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable support for the IMG multi-threaded DMA controller (MDC).
+
 config DMA_ENGINE
 config DMA_ENGINE
 	bool
 	bool
 
 

+ 2 - 1
drivers/dma/Makefile

@@ -19,7 +19,7 @@ obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
 obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
 obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
 obj-$(CONFIG_MX3_IPU) += ipu/
 obj-$(CONFIG_MX3_IPU) += ipu/
 obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
 obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
-obj-$(CONFIG_SH_DMAE_BASE) += sh/
+obj-$(CONFIG_RENESAS_DMA) += sh/
 obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
 obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
 obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
 obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
 obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
 obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
@@ -50,3 +50,4 @@ obj-y += xilinx/
 obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
 obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
 obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
 obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
 obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
 obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
+obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o

+ 90 - 66
drivers/dma/amba-pl08x.c

@@ -1386,32 +1386,6 @@ static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan,
 	return pl08x_cctl(cctl);
 	return pl08x_cctl(cctl);
 }
 }
 
 
-static int dma_set_runtime_config(struct dma_chan *chan,
-				  struct dma_slave_config *config)
-{
-	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
-	struct pl08x_driver_data *pl08x = plchan->host;
-
-	if (!plchan->slave)
-		return -EINVAL;
-
-	/* Reject definitely invalid configurations */
-	if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
-	    config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
-		return -EINVAL;
-
-	if (config->device_fc && pl08x->vd->pl080s) {
-		dev_err(&pl08x->adev->dev,
-			"%s: PL080S does not support peripheral flow control\n",
-			__func__);
-		return -EINVAL;
-	}
-
-	plchan->cfg = *config;
-
-	return 0;
-}
-
 /*
 /*
  * Slave transactions callback to the slave device to allow
  * Slave transactions callback to the slave device to allow
  * synchronization of slave DMA signals with the DMAC enable
  * synchronization of slave DMA signals with the DMAC enable
@@ -1693,20 +1667,71 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic(
 	return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
 	return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
 }
 }
 
 
-static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-			 unsigned long arg)
+static int pl08x_config(struct dma_chan *chan,
+			struct dma_slave_config *config)
+{
+	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+	struct pl08x_driver_data *pl08x = plchan->host;
+
+	if (!plchan->slave)
+		return -EINVAL;
+
+	/* Reject definitely invalid configurations */
+	if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
+	    config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
+		return -EINVAL;
+
+	if (config->device_fc && pl08x->vd->pl080s) {
+		dev_err(&pl08x->adev->dev,
+			"%s: PL080S does not support peripheral flow control\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	plchan->cfg = *config;
+
+	return 0;
+}
+
+static int pl08x_terminate_all(struct dma_chan *chan)
 {
 {
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
 	struct pl08x_driver_data *pl08x = plchan->host;
 	struct pl08x_driver_data *pl08x = plchan->host;
 	unsigned long flags;
 	unsigned long flags;
-	int ret = 0;
 
 
-	/* Controls applicable to inactive channels */
-	if (cmd == DMA_SLAVE_CONFIG) {
-		return dma_set_runtime_config(chan,
-					      (struct dma_slave_config *)arg);
+	spin_lock_irqsave(&plchan->vc.lock, flags);
+	if (!plchan->phychan && !plchan->at) {
+		spin_unlock_irqrestore(&plchan->vc.lock, flags);
+		return 0;
 	}
 	}
 
 
+	plchan->state = PL08X_CHAN_IDLE;
+
+	if (plchan->phychan) {
+		/*
+		 * Mark physical channel as free and free any slave
+		 * signal
+		 */
+		pl08x_phy_free(plchan);
+	}
+	/* Dequeue jobs and free LLIs */
+	if (plchan->at) {
+		pl08x_desc_free(&plchan->at->vd);
+		plchan->at = NULL;
+	}
+	/* Dequeue jobs not yet fired as well */
+	pl08x_free_txd_list(pl08x, plchan);
+
+	spin_unlock_irqrestore(&plchan->vc.lock, flags);
+
+	return 0;
+}
+
+static int pl08x_pause(struct dma_chan *chan)
+{
+	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+	unsigned long flags;
+
 	/*
 	/*
 	 * Anything succeeds on channels with no physical allocation and
 	 * Anything succeeds on channels with no physical allocation and
 	 * no queued transfers.
 	 * no queued transfers.
@@ -1717,42 +1742,35 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 		return 0;
 		return 0;
 	}
 	}
 
 
-	switch (cmd) {
-	case DMA_TERMINATE_ALL:
-		plchan->state = PL08X_CHAN_IDLE;
+	pl08x_pause_phy_chan(plchan->phychan);
+	plchan->state = PL08X_CHAN_PAUSED;
 
 
-		if (plchan->phychan) {
-			/*
-			 * Mark physical channel as free and free any slave
-			 * signal
-			 */
-			pl08x_phy_free(plchan);
-		}
-		/* Dequeue jobs and free LLIs */
-		if (plchan->at) {
-			pl08x_desc_free(&plchan->at->vd);
-			plchan->at = NULL;
-		}
-		/* Dequeue jobs not yet fired as well */
-		pl08x_free_txd_list(pl08x, plchan);
-		break;
-	case DMA_PAUSE:
-		pl08x_pause_phy_chan(plchan->phychan);
-		plchan->state = PL08X_CHAN_PAUSED;
-		break;
-	case DMA_RESUME:
-		pl08x_resume_phy_chan(plchan->phychan);
-		plchan->state = PL08X_CHAN_RUNNING;
-		break;
-	default:
-		/* Unknown command */
-		ret = -ENXIO;
-		break;
+	spin_unlock_irqrestore(&plchan->vc.lock, flags);
+
+	return 0;
+}
+
+static int pl08x_resume(struct dma_chan *chan)
+{
+	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+	unsigned long flags;
+
+	/*
+	 * Anything succeeds on channels with no physical allocation and
+	 * no queued transfers.
+	 */
+	spin_lock_irqsave(&plchan->vc.lock, flags);
+	if (!plchan->phychan && !plchan->at) {
+		spin_unlock_irqrestore(&plchan->vc.lock, flags);
+		return 0;
 	}
 	}
 
 
+	pl08x_resume_phy_chan(plchan->phychan);
+	plchan->state = PL08X_CHAN_RUNNING;
+
 	spin_unlock_irqrestore(&plchan->vc.lock, flags);
 	spin_unlock_irqrestore(&plchan->vc.lock, flags);
 
 
-	return ret;
+	return 0;
 }
 }
 
 
 bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
 bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
@@ -2048,7 +2066,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
 	pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
 	pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
 	pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
 	pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
 	pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
 	pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
-	pl08x->memcpy.device_control = pl08x_control;
+	pl08x->memcpy.device_config = pl08x_config;
+	pl08x->memcpy.device_pause = pl08x_pause;
+	pl08x->memcpy.device_resume = pl08x_resume;
+	pl08x->memcpy.device_terminate_all = pl08x_terminate_all;
 
 
 	/* Initialize slave engine */
 	/* Initialize slave engine */
 	dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
 	dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
@@ -2061,7 +2082,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
 	pl08x->slave.device_issue_pending = pl08x_issue_pending;
 	pl08x->slave.device_issue_pending = pl08x_issue_pending;
 	pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
 	pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
 	pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic;
 	pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic;
-	pl08x->slave.device_control = pl08x_control;
+	pl08x->slave.device_config = pl08x_config;
+	pl08x->slave.device_pause = pl08x_pause;
+	pl08x->slave.device_resume = pl08x_resume;
+	pl08x->slave.device_terminate_all = pl08x_terminate_all;
 
 
 	/* Get the platform data */
 	/* Get the platform data */
 	pl08x->pd = dev_get_platdata(&adev->dev);
 	pl08x->pd = dev_get_platdata(&adev->dev);

+ 82 - 48
drivers/dma/at_hdmac.c

@@ -42,6 +42,11 @@
 #define	ATC_DEFAULT_CFG		(ATC_FIFOCFG_HALFFIFO)
 #define	ATC_DEFAULT_CFG		(ATC_FIFOCFG_HALFFIFO)
 #define	ATC_DEFAULT_CTRLB	(ATC_SIF(AT_DMA_MEM_IF) \
 #define	ATC_DEFAULT_CTRLB	(ATC_SIF(AT_DMA_MEM_IF) \
 				|ATC_DIF(AT_DMA_MEM_IF))
 				|ATC_DIF(AT_DMA_MEM_IF))
+#define ATC_DMA_BUSWIDTHS\
+	(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
+	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
+	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
+	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
 
 
 /*
 /*
  * Initial number of descriptors to allocate for each channel. This could
  * Initial number of descriptors to allocate for each channel. This could
@@ -972,11 +977,13 @@ err_out:
 	return NULL;
 	return NULL;
 }
 }
 
 
-static int set_runtime_config(struct dma_chan *chan,
-			      struct dma_slave_config *sconfig)
+static int atc_config(struct dma_chan *chan,
+		      struct dma_slave_config *sconfig)
 {
 {
 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
 
 
+	dev_vdbg(chan2dev(chan), "%s\n", __func__);
+
 	/* Check if it is chan is configured for slave transfers */
 	/* Check if it is chan is configured for slave transfers */
 	if (!chan->private)
 	if (!chan->private)
 		return -EINVAL;
 		return -EINVAL;
@@ -989,9 +996,28 @@ static int set_runtime_config(struct dma_chan *chan,
 	return 0;
 	return 0;
 }
 }
 
 
+static int atc_pause(struct dma_chan *chan)
+{
+	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
+	struct at_dma		*atdma = to_at_dma(chan->device);
+	int			chan_id = atchan->chan_common.chan_id;
+	unsigned long		flags;
 
 
-static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-		       unsigned long arg)
+	LIST_HEAD(list);
+
+	dev_vdbg(chan2dev(chan), "%s\n", __func__);
+
+	spin_lock_irqsave(&atchan->lock, flags);
+
+	dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
+	set_bit(ATC_IS_PAUSED, &atchan->status);
+
+	spin_unlock_irqrestore(&atchan->lock, flags);
+
+	return 0;
+}
+
+static int atc_resume(struct dma_chan *chan)
 {
 {
 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
 	struct at_dma		*atdma = to_at_dma(chan->device);
 	struct at_dma		*atdma = to_at_dma(chan->device);
@@ -1000,60 +1026,61 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 
 
 	LIST_HEAD(list);
 	LIST_HEAD(list);
 
 
-	dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
+	dev_vdbg(chan2dev(chan), "%s\n", __func__);
 
 
-	if (cmd == DMA_PAUSE) {
-		spin_lock_irqsave(&atchan->lock, flags);
+	if (!atc_chan_is_paused(atchan))
+		return 0;
 
 
-		dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
-		set_bit(ATC_IS_PAUSED, &atchan->status);
+	spin_lock_irqsave(&atchan->lock, flags);
 
 
-		spin_unlock_irqrestore(&atchan->lock, flags);
-	} else if (cmd == DMA_RESUME) {
-		if (!atc_chan_is_paused(atchan))
-			return 0;
+	dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
+	clear_bit(ATC_IS_PAUSED, &atchan->status);
 
 
-		spin_lock_irqsave(&atchan->lock, flags);
+	spin_unlock_irqrestore(&atchan->lock, flags);
 
 
-		dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
-		clear_bit(ATC_IS_PAUSED, &atchan->status);
+	return 0;
+}
 
 
-		spin_unlock_irqrestore(&atchan->lock, flags);
-	} else if (cmd == DMA_TERMINATE_ALL) {
-		struct at_desc	*desc, *_desc;
-		/*
-		 * This is only called when something went wrong elsewhere, so
-		 * we don't really care about the data. Just disable the
-		 * channel. We still have to poll the channel enable bit due
-		 * to AHB/HSB limitations.
-		 */
-		spin_lock_irqsave(&atchan->lock, flags);
+static int atc_terminate_all(struct dma_chan *chan)
+{
+	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
+	struct at_dma		*atdma = to_at_dma(chan->device);
+	int			chan_id = atchan->chan_common.chan_id;
+	struct at_desc		*desc, *_desc;
+	unsigned long		flags;
 
 
-		/* disabling channel: must also remove suspend state */
-		dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
+	LIST_HEAD(list);
 
 
-		/* confirm that this channel is disabled */
-		while (dma_readl(atdma, CHSR) & atchan->mask)
-			cpu_relax();
+	dev_vdbg(chan2dev(chan), "%s\n", __func__);
 
 
-		/* active_list entries will end up before queued entries */
-		list_splice_init(&atchan->queue, &list);
-		list_splice_init(&atchan->active_list, &list);
+	/*
+	 * This is only called when something went wrong elsewhere, so
+	 * we don't really care about the data. Just disable the
+	 * channel. We still have to poll the channel enable bit due
+	 * to AHB/HSB limitations.
+	 */
+	spin_lock_irqsave(&atchan->lock, flags);
 
 
-		/* Flush all pending and queued descriptors */
-		list_for_each_entry_safe(desc, _desc, &list, desc_node)
-			atc_chain_complete(atchan, desc);
+	/* disabling channel: must also remove suspend state */
+	dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
 
 
-		clear_bit(ATC_IS_PAUSED, &atchan->status);
-		/* if channel dedicated to cyclic operations, free it */
-		clear_bit(ATC_IS_CYCLIC, &atchan->status);
+	/* confirm that this channel is disabled */
+	while (dma_readl(atdma, CHSR) & atchan->mask)
+		cpu_relax();
 
 
-		spin_unlock_irqrestore(&atchan->lock, flags);
-	} else if (cmd == DMA_SLAVE_CONFIG) {
-		return set_runtime_config(chan, (struct dma_slave_config *)arg);
-	} else {
-		return -ENXIO;
-	}
+	/* active_list entries will end up before queued entries */
+	list_splice_init(&atchan->queue, &list);
+	list_splice_init(&atchan->active_list, &list);
+
+	/* Flush all pending and queued descriptors */
+	list_for_each_entry_safe(desc, _desc, &list, desc_node)
+		atc_chain_complete(atchan, desc);
+
+	clear_bit(ATC_IS_PAUSED, &atchan->status);
+	/* if channel dedicated to cyclic operations, free it */
+	clear_bit(ATC_IS_CYCLIC, &atchan->status);
+
+	spin_unlock_irqrestore(&atchan->lock, flags);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -1505,7 +1532,14 @@ static int __init at_dma_probe(struct platform_device *pdev)
 		/* controller can do slave DMA: can trigger cyclic transfers */
 		/* controller can do slave DMA: can trigger cyclic transfers */
 		dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
 		dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
 		atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
 		atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
-		atdma->dma_common.device_control = atc_control;
+		atdma->dma_common.device_config = atc_config;
+		atdma->dma_common.device_pause = atc_pause;
+		atdma->dma_common.device_resume = atc_resume;
+		atdma->dma_common.device_terminate_all = atc_terminate_all;
+		atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS;
+		atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS;
+		atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+		atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
 	}
 	}
 
 
 	dma_writel(atdma, EN, AT_DMA_ENABLE);
 	dma_writel(atdma, EN, AT_DMA_ENABLE);
@@ -1622,7 +1656,7 @@ static void atc_suspend_cyclic(struct at_dma_chan *atchan)
 	if (!atc_chan_is_paused(atchan)) {
 	if (!atc_chan_is_paused(atchan)) {
 		dev_warn(chan2dev(chan),
 		dev_warn(chan2dev(chan),
 		"cyclic channel not paused, should be done by channel user\n");
 		"cyclic channel not paused, should be done by channel user\n");
-		atc_control(chan, DMA_PAUSE, 0);
+		atc_pause(chan);
 	}
 	}
 
 
 	/* now preserve additional data for cyclic operations */
 	/* now preserve additional data for cyclic operations */

+ 2 - 1
drivers/dma/at_hdmac_regs.h

@@ -232,7 +232,8 @@ enum atc_status {
  * @save_dscr: for cyclic operations, preserve next descriptor address in
  * @save_dscr: for cyclic operations, preserve next descriptor address in
  *             the cyclic list on suspend/resume cycle
  *             the cyclic list on suspend/resume cycle
  * @remain_desc: to save remain desc length
  * @remain_desc: to save remain desc length
- * @dma_sconfig: configuration for slave transfers, passed via DMA_SLAVE_CONFIG
+ * @dma_sconfig: configuration for slave transfers, passed via
+ * .device_config
  * @lock: serializes enqueue/dequeue operations to descriptors lists
  * @lock: serializes enqueue/dequeue operations to descriptors lists
  * @active_list: list of descriptors dmaengine is being running on
  * @active_list: list of descriptors dmaengine is being running on
  * @queue: list of descriptors ready to be submitted to engine
  * @queue: list of descriptors ready to be submitted to engine

+ 98 - 88
drivers/dma/at_xdmac.c

@@ -25,6 +25,7 @@
 #include <linux/dmapool.h>
 #include <linux/dmapool.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/irq.h>
+#include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/list.h>
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/of_dma.h>
 #include <linux/of_dma.h>
@@ -174,6 +175,13 @@
 
 
 #define AT_XDMAC_MAX_CHAN	0x20
 #define AT_XDMAC_MAX_CHAN	0x20
 
 
+#define AT_XDMAC_DMA_BUSWIDTHS\
+	(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
+	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
+	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
+	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
+	BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
 enum atc_status {
 enum atc_status {
 	AT_XDMAC_CHAN_IS_CYCLIC = 0,
 	AT_XDMAC_CHAN_IS_CYCLIC = 0,
 	AT_XDMAC_CHAN_IS_PAUSED,
 	AT_XDMAC_CHAN_IS_PAUSED,
@@ -184,15 +192,15 @@ struct at_xdmac_chan {
 	struct dma_chan			chan;
 	struct dma_chan			chan;
 	void __iomem			*ch_regs;
 	void __iomem			*ch_regs;
 	u32				mask;		/* Channel Mask */
 	u32				mask;		/* Channel Mask */
-	u32				cfg[3];		/* Channel Configuration Register */
-	#define	AT_XDMAC_CUR_CFG	0		/* Current channel conf */
-	#define	AT_XDMAC_DEV_TO_MEM_CFG	1		/* Predifined dev to mem channel conf */
-	#define	AT_XDMAC_MEM_TO_DEV_CFG	2		/* Predifined mem to dev channel conf */
+	u32				cfg[2];		/* Channel Configuration Register */
+	#define	AT_XDMAC_DEV_TO_MEM_CFG	0		/* Predifined dev to mem channel conf */
+	#define	AT_XDMAC_MEM_TO_DEV_CFG	1		/* Predifined mem to dev channel conf */
 	u8				perid;		/* Peripheral ID */
 	u8				perid;		/* Peripheral ID */
 	u8				perif;		/* Peripheral Interface */
 	u8				perif;		/* Peripheral Interface */
 	u8				memif;		/* Memory Interface */
 	u8				memif;		/* Memory Interface */
 	u32				per_src_addr;
 	u32				per_src_addr;
 	u32				per_dst_addr;
 	u32				per_dst_addr;
+	u32				save_cc;
 	u32				save_cim;
 	u32				save_cim;
 	u32				save_cnda;
 	u32				save_cnda;
 	u32				save_cndc;
 	u32				save_cndc;
@@ -344,20 +352,13 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
 	at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
 	at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
 
 
 	/*
 	/*
-	 * When doing memory to memory transfer we need to use the next
+	 * When doing non cyclic transfer we need to use the next
 	 * descriptor view 2 since some fields of the configuration register
 	 * descriptor view 2 since some fields of the configuration register
 	 * depend on transfer size and src/dest addresses.
 	 * depend on transfer size and src/dest addresses.
 	 */
 	 */
-	if (is_slave_direction(first->direction)) {
+	if (at_xdmac_chan_is_cyclic(atchan)) {
 		reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
 		reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
-		if (first->direction == DMA_MEM_TO_DEV)
-			atchan->cfg[AT_XDMAC_CUR_CFG] =
-				atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
-		else
-			atchan->cfg[AT_XDMAC_CUR_CFG] =
-				atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
-		at_xdmac_chan_write(atchan, AT_XDMAC_CC,
-				    atchan->cfg[AT_XDMAC_CUR_CFG]);
+		at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
 	} else {
 	} else {
 		/*
 		/*
 		 * No need to write AT_XDMAC_CC reg, it will be done when the
 		 * No need to write AT_XDMAC_CC reg, it will be done when the
@@ -561,7 +562,6 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 	struct at_xdmac_desc	*first = NULL, *prev = NULL;
 	struct at_xdmac_desc	*first = NULL, *prev = NULL;
 	struct scatterlist	*sg;
 	struct scatterlist	*sg;
 	int			i;
 	int			i;
-	u32			cfg;
 	unsigned int		xfer_size = 0;
 	unsigned int		xfer_size = 0;
 
 
 	if (!sgl)
 	if (!sgl)
@@ -583,7 +583,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 	/* Prepare descriptors. */
 	/* Prepare descriptors. */
 	for_each_sg(sgl, sg, sg_len, i) {
 	for_each_sg(sgl, sg, sg_len, i) {
 		struct at_xdmac_desc	*desc = NULL;
 		struct at_xdmac_desc	*desc = NULL;
-		u32			len, mem;
+		u32			len, mem, dwidth, fixed_dwidth;
 
 
 		len = sg_dma_len(sg);
 		len = sg_dma_len(sg);
 		mem = sg_dma_address(sg);
 		mem = sg_dma_address(sg);
@@ -608,17 +608,21 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 		if (direction == DMA_DEV_TO_MEM) {
 		if (direction == DMA_DEV_TO_MEM) {
 			desc->lld.mbr_sa = atchan->per_src_addr;
 			desc->lld.mbr_sa = atchan->per_src_addr;
 			desc->lld.mbr_da = mem;
 			desc->lld.mbr_da = mem;
-			cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
+			desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
 		} else {
 		} else {
 			desc->lld.mbr_sa = mem;
 			desc->lld.mbr_sa = mem;
 			desc->lld.mbr_da = atchan->per_dst_addr;
 			desc->lld.mbr_da = atchan->per_dst_addr;
-			cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
+			desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
 		}
 		}
-		desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1		/* next descriptor view */
-			| AT_XDMAC_MBR_UBC_NDEN				/* next descriptor dst parameter update */
-			| AT_XDMAC_MBR_UBC_NSEN				/* next descriptor src parameter update */
-			| (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE)	/* descriptor fetch */
-			| len / (1 << at_xdmac_get_dwidth(cfg));	/* microblock length */
+		dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
+		fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
+			       ? at_xdmac_get_dwidth(desc->lld.mbr_cfg)
+			       : AT_XDMAC_CC_DWIDTH_BYTE;
+		desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2			/* next descriptor view */
+			| AT_XDMAC_MBR_UBC_NDEN					/* next descriptor dst parameter update */
+			| AT_XDMAC_MBR_UBC_NSEN					/* next descriptor src parameter update */
+			| (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE)		/* descriptor fetch */
+			| (len >> fixed_dwidth);				/* microblock length */
 		dev_dbg(chan2dev(chan),
 		dev_dbg(chan2dev(chan),
 			 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
 			 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
 			 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
 			 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
@@ -882,7 +886,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
 	enum dma_status		ret;
 	enum dma_status		ret;
 	int			residue;
 	int			residue;
 	u32			cur_nda, mask, value;
 	u32			cur_nda, mask, value;
-	u8			dwidth = at_xdmac_get_dwidth(atchan->cfg[AT_XDMAC_CUR_CFG]);
+	u8			dwidth = 0;
 
 
 	ret = dma_cookie_status(chan, cookie, txstate);
 	ret = dma_cookie_status(chan, cookie, txstate);
 	if (ret == DMA_COMPLETE)
 	if (ret == DMA_COMPLETE)
@@ -912,7 +916,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
 	 */
 	 */
 	mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
 	mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
 	value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
 	value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
-	if ((atchan->cfg[AT_XDMAC_CUR_CFG] & mask) == value) {
+	if ((desc->lld.mbr_cfg & mask) == value) {
 		at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
 		at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
 		while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
 		while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
 			cpu_relax();
 			cpu_relax();
@@ -926,6 +930,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
 	 */
 	 */
 	descs_list = &desc->descs_list;
 	descs_list = &desc->descs_list;
 	list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
 	list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
+		dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
 		residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
 		residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
 		if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
 		if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
 			break;
 			break;
@@ -1107,58 +1112,80 @@ static void at_xdmac_issue_pending(struct dma_chan *chan)
 	return;
 	return;
 }
 }
 
 
-static int at_xdmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-			    unsigned long arg)
+static int at_xdmac_device_config(struct dma_chan *chan,
+				  struct dma_slave_config *config)
+{
+	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
+	int ret;
+
+	dev_dbg(chan2dev(chan), "%s\n", __func__);
+
+	spin_lock_bh(&atchan->lock);
+	ret = at_xdmac_set_slave_config(chan, config);
+	spin_unlock_bh(&atchan->lock);
+
+	return ret;
+}
+
+static int at_xdmac_device_pause(struct dma_chan *chan)
 {
 {
-	struct at_xdmac_desc	*desc, *_desc;
 	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
 	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
 	struct at_xdmac		*atxdmac = to_at_xdmac(atchan->chan.device);
 	struct at_xdmac		*atxdmac = to_at_xdmac(atchan->chan.device);
-	int			ret = 0;
 
 
-	dev_dbg(chan2dev(chan), "%s: cmd=%d\n", __func__, cmd);
+	dev_dbg(chan2dev(chan), "%s\n", __func__);
+
+	if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
+		return 0;
 
 
 	spin_lock_bh(&atchan->lock);
 	spin_lock_bh(&atchan->lock);
+	at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
+	while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
+	       & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
+		cpu_relax();
+	spin_unlock_bh(&atchan->lock);
 
 
-	switch (cmd) {
-	case DMA_PAUSE:
-		at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
-		set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
-		break;
+	return 0;
+}
 
 
-	case DMA_RESUME:
-		if (!at_xdmac_chan_is_paused(atchan))
-			break;
+static int at_xdmac_device_resume(struct dma_chan *chan)
+{
+	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
+	struct at_xdmac		*atxdmac = to_at_xdmac(atchan->chan.device);
 
 
-		at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
-		clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
-		break;
+	dev_dbg(chan2dev(chan), "%s\n", __func__);
 
 
-	case DMA_TERMINATE_ALL:
-		at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
-		while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
-			cpu_relax();
+	spin_lock_bh(&atchan->lock);
+	if (!at_xdmac_chan_is_paused(atchan))
+		return 0;
+
+	at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
+	clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
+	spin_unlock_bh(&atchan->lock);
 
 
-		/* Cancel all pending transfers. */
-		list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
-			at_xdmac_remove_xfer(atchan, desc);
+	return 0;
+}
+
+static int at_xdmac_device_terminate_all(struct dma_chan *chan)
+{
+	struct at_xdmac_desc	*desc, *_desc;
+	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
+	struct at_xdmac		*atxdmac = to_at_xdmac(atchan->chan.device);
 
 
-		clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
-		break;
+	dev_dbg(chan2dev(chan), "%s\n", __func__);
 
 
-	case DMA_SLAVE_CONFIG:
-		ret = at_xdmac_set_slave_config(chan,
-				(struct dma_slave_config *)arg);
-		break;
+	spin_lock_bh(&atchan->lock);
+	at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
+	while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
+		cpu_relax();
 
 
-	default:
-		dev_err(chan2dev(chan),
-			"unmanaged or unknown dma control cmd: %d\n", cmd);
-		ret = -ENXIO;
-	}
+	/* Cancel all pending transfers. */
+	list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
+		at_xdmac_remove_xfer(atchan, desc);
 
 
+	clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
 	spin_unlock_bh(&atchan->lock);
 	spin_unlock_bh(&atchan->lock);
 
 
-	return ret;
+	return 0;
 }
 }
 
 
 static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
 static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
@@ -1217,27 +1244,6 @@ static void at_xdmac_free_chan_resources(struct dma_chan *chan)
 	return;
 	return;
 }
 }
 
 
-#define AT_XDMAC_DMA_BUSWIDTHS\
-	(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
-	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
-	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
-	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
-	BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
-
-static int at_xdmac_device_slave_caps(struct dma_chan *dchan,
-				      struct dma_slave_caps *caps)
-{
-
-	caps->src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
-	caps->dstn_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
-	caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-	caps->cmd_pause = true;
-	caps->cmd_terminate = true;
-	caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
-
-	return 0;
-}
-
 #ifdef CONFIG_PM
 #ifdef CONFIG_PM
 static int atmel_xdmac_prepare(struct device *dev)
 static int atmel_xdmac_prepare(struct device *dev)
 {
 {
@@ -1268,9 +1274,10 @@ static int atmel_xdmac_suspend(struct device *dev)
 	list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
 	list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
 		struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
 		struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
 
 
+		atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC);
 		if (at_xdmac_chan_is_cyclic(atchan)) {
 		if (at_xdmac_chan_is_cyclic(atchan)) {
 			if (!at_xdmac_chan_is_paused(atchan))
 			if (!at_xdmac_chan_is_paused(atchan))
-				at_xdmac_control(chan, DMA_PAUSE, 0);
+				at_xdmac_device_pause(chan);
 			atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
 			atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
 			atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
 			atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
 			atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
 			atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
@@ -1290,7 +1297,6 @@ static int atmel_xdmac_resume(struct device *dev)
 	struct at_xdmac_chan	*atchan;
 	struct at_xdmac_chan	*atchan;
 	struct dma_chan		*chan, *_chan;
 	struct dma_chan		*chan, *_chan;
 	int			i;
 	int			i;
-	u32			cfg;
 
 
 	clk_prepare_enable(atxdmac->clk);
 	clk_prepare_enable(atxdmac->clk);
 
 
@@ -1305,8 +1311,7 @@ static int atmel_xdmac_resume(struct device *dev)
 	at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs);
 	at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs);
 	list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
 	list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
 		atchan = to_at_xdmac_chan(chan);
 		atchan = to_at_xdmac_chan(chan);
-		cfg = atchan->cfg[AT_XDMAC_CUR_CFG];
-		at_xdmac_chan_write(atchan, AT_XDMAC_CC, cfg);
+		at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
 		if (at_xdmac_chan_is_cyclic(atchan)) {
 		if (at_xdmac_chan_is_cyclic(atchan)) {
 			at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
 			at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
 			at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
 			at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
@@ -1407,8 +1412,14 @@ static int at_xdmac_probe(struct platform_device *pdev)
 	atxdmac->dma.device_prep_dma_cyclic		= at_xdmac_prep_dma_cyclic;
 	atxdmac->dma.device_prep_dma_cyclic		= at_xdmac_prep_dma_cyclic;
 	atxdmac->dma.device_prep_dma_memcpy		= at_xdmac_prep_dma_memcpy;
 	atxdmac->dma.device_prep_dma_memcpy		= at_xdmac_prep_dma_memcpy;
 	atxdmac->dma.device_prep_slave_sg		= at_xdmac_prep_slave_sg;
 	atxdmac->dma.device_prep_slave_sg		= at_xdmac_prep_slave_sg;
-	atxdmac->dma.device_control			= at_xdmac_control;
-	atxdmac->dma.device_slave_caps			= at_xdmac_device_slave_caps;
+	atxdmac->dma.device_config			= at_xdmac_device_config;
+	atxdmac->dma.device_pause			= at_xdmac_device_pause;
+	atxdmac->dma.device_resume			= at_xdmac_device_resume;
+	atxdmac->dma.device_terminate_all		= at_xdmac_device_terminate_all;
+	atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
+	atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
+	atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+	atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
 
 
 	/* Disable all chans and interrupts. */
 	/* Disable all chans and interrupts. */
 	at_xdmac_off(atxdmac);
 	at_xdmac_off(atxdmac);
@@ -1507,7 +1518,6 @@ static struct platform_driver at_xdmac_driver = {
 	.remove		= at_xdmac_remove,
 	.remove		= at_xdmac_remove,
 	.driver = {
 	.driver = {
 		.name		= "at_xdmac",
 		.name		= "at_xdmac",
-		.owner		= THIS_MODULE,
 		.of_match_table	= of_match_ptr(atmel_xdmac_dt_ids),
 		.of_match_table	= of_match_ptr(atmel_xdmac_dt_ids),
 		.pm		= &atmel_xdmac_dev_pm_ops,
 		.pm		= &atmel_xdmac_dev_pm_ops,
 	}
 	}

+ 11 - 35
drivers/dma/bcm2835-dma.c

@@ -436,9 +436,11 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
 	return vchan_tx_prep(&c->vc, &d->vd, flags);
 	return vchan_tx_prep(&c->vc, &d->vd, flags);
 }
 }
 
 
-static int bcm2835_dma_slave_config(struct bcm2835_chan *c,
-		struct dma_slave_config *cfg)
+static int bcm2835_dma_slave_config(struct dma_chan *chan,
+				    struct dma_slave_config *cfg)
 {
 {
+	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+
 	if ((cfg->direction == DMA_DEV_TO_MEM &&
 	if ((cfg->direction == DMA_DEV_TO_MEM &&
 	     cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
 	     cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
 	    (cfg->direction == DMA_MEM_TO_DEV &&
 	    (cfg->direction == DMA_MEM_TO_DEV &&
@@ -452,8 +454,9 @@ static int bcm2835_dma_slave_config(struct bcm2835_chan *c,
 	return 0;
 	return 0;
 }
 }
 
 
-static int bcm2835_dma_terminate_all(struct bcm2835_chan *c)
+static int bcm2835_dma_terminate_all(struct dma_chan *chan)
 {
 {
+	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
 	struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
 	struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
 	unsigned long flags;
 	unsigned long flags;
 	int timeout = 10000;
 	int timeout = 10000;
@@ -495,24 +498,6 @@ static int bcm2835_dma_terminate_all(struct bcm2835_chan *c)
 	return 0;
 	return 0;
 }
 }
 
 
-static int bcm2835_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-	unsigned long arg)
-{
-	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
-
-	switch (cmd) {
-	case DMA_SLAVE_CONFIG:
-		return bcm2835_dma_slave_config(c,
-				(struct dma_slave_config *)arg);
-
-	case DMA_TERMINATE_ALL:
-		return bcm2835_dma_terminate_all(c);
-
-	default:
-		return -ENXIO;
-	}
-}
-
 static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq)
 static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq)
 {
 {
 	struct bcm2835_chan *c;
 	struct bcm2835_chan *c;
@@ -565,18 +550,6 @@ static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
 	return chan;
 	return chan;
 }
 }
 
 
-static int bcm2835_dma_device_slave_caps(struct dma_chan *dchan,
-	struct dma_slave_caps *caps)
-{
-	caps->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
-	caps->dstn_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
-	caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-	caps->cmd_pause = false;
-	caps->cmd_terminate = true;
-
-	return 0;
-}
-
 static int bcm2835_dma_probe(struct platform_device *pdev)
 static int bcm2835_dma_probe(struct platform_device *pdev)
 {
 {
 	struct bcm2835_dmadev *od;
 	struct bcm2835_dmadev *od;
@@ -615,9 +588,12 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
 	od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
 	od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
 	od->ddev.device_tx_status = bcm2835_dma_tx_status;
 	od->ddev.device_tx_status = bcm2835_dma_tx_status;
 	od->ddev.device_issue_pending = bcm2835_dma_issue_pending;
 	od->ddev.device_issue_pending = bcm2835_dma_issue_pending;
-	od->ddev.device_slave_caps = bcm2835_dma_device_slave_caps;
 	od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic;
 	od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic;
-	od->ddev.device_control = bcm2835_dma_control;
+	od->ddev.device_config = bcm2835_dma_slave_config;
+	od->ddev.device_terminate_all = bcm2835_dma_terminate_all;
+	od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+	od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+	od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
 	od->ddev.dev = &pdev->dev;
 	od->ddev.dev = &pdev->dev;
 	INIT_LIST_HEAD(&od->ddev.channels);
 	INIT_LIST_HEAD(&od->ddev.channels);
 	spin_lock_init(&od->lock);
 	spin_lock_init(&od->lock);

+ 70 - 83
drivers/dma/coh901318.c

@@ -1690,7 +1690,7 @@ static u32 coh901318_get_bytes_left(struct dma_chan *chan)
  * Pauses a transfer without losing data. Enables power save.
  * Pauses a transfer without losing data. Enables power save.
  * Use this function in conjunction with coh901318_resume.
  * Use this function in conjunction with coh901318_resume.
  */
  */
-static void coh901318_pause(struct dma_chan *chan)
+static int coh901318_pause(struct dma_chan *chan)
 {
 {
 	u32 val;
 	u32 val;
 	unsigned long flags;
 	unsigned long flags;
@@ -1730,12 +1730,13 @@ static void coh901318_pause(struct dma_chan *chan)
 	enable_powersave(cohc);
 	enable_powersave(cohc);
 
 
 	spin_unlock_irqrestore(&cohc->lock, flags);
 	spin_unlock_irqrestore(&cohc->lock, flags);
+	return 0;
 }
 }
 
 
 /* Resumes a transfer that has been stopped via 300_dma_stop(..).
 /* Resumes a transfer that has been stopped via 300_dma_stop(..).
    Power save is handled.
    Power save is handled.
 */
 */
-static void coh901318_resume(struct dma_chan *chan)
+static int coh901318_resume(struct dma_chan *chan)
 {
 {
 	u32 val;
 	u32 val;
 	unsigned long flags;
 	unsigned long flags;
@@ -1760,6 +1761,7 @@ static void coh901318_resume(struct dma_chan *chan)
 	}
 	}
 
 
 	spin_unlock_irqrestore(&cohc->lock, flags);
 	spin_unlock_irqrestore(&cohc->lock, flags);
+	return 0;
 }
 }
 
 
 bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
 bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
@@ -2114,6 +2116,57 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id)
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;
 }
 }
 
 
+static int coh901318_terminate_all(struct dma_chan *chan)
+{
+	unsigned long flags;
+	struct coh901318_chan *cohc = to_coh901318_chan(chan);
+	struct coh901318_desc *cohd;
+	void __iomem *virtbase = cohc->base->virtbase;
+
+	/* The remainder of this function terminates the transfer */
+	coh901318_pause(chan);
+	spin_lock_irqsave(&cohc->lock, flags);
+
+	/* Clear any pending BE or TC interrupt */
+	if (cohc->id < 32) {
+		writel(1 << cohc->id, virtbase + COH901318_BE_INT_CLEAR1);
+		writel(1 << cohc->id, virtbase + COH901318_TC_INT_CLEAR1);
+	} else {
+		writel(1 << (cohc->id - 32), virtbase +
+		       COH901318_BE_INT_CLEAR2);
+		writel(1 << (cohc->id - 32), virtbase +
+		       COH901318_TC_INT_CLEAR2);
+	}
+
+	enable_powersave(cohc);
+
+	while ((cohd = coh901318_first_active_get(cohc))) {
+		/* release the lli allocation*/
+		coh901318_lli_free(&cohc->base->pool, &cohd->lli);
+
+		/* return desc to free-list */
+		coh901318_desc_remove(cohd);
+		coh901318_desc_free(cohc, cohd);
+	}
+
+	while ((cohd = coh901318_first_queued(cohc))) {
+		/* release the lli allocation*/
+		coh901318_lli_free(&cohc->base->pool, &cohd->lli);
+
+		/* return desc to free-list */
+		coh901318_desc_remove(cohd);
+		coh901318_desc_free(cohc, cohd);
+	}
+
+
+	cohc->nbr_active_done = 0;
+	cohc->busy = 0;
+
+	spin_unlock_irqrestore(&cohc->lock, flags);
+
+	return 0;
+}
+
 static int coh901318_alloc_chan_resources(struct dma_chan *chan)
 static int coh901318_alloc_chan_resources(struct dma_chan *chan)
 {
 {
 	struct coh901318_chan	*cohc = to_coh901318_chan(chan);
 	struct coh901318_chan	*cohc = to_coh901318_chan(chan);
@@ -2156,7 +2209,7 @@ coh901318_free_chan_resources(struct dma_chan *chan)
 
 
 	spin_unlock_irqrestore(&cohc->lock, flags);
 	spin_unlock_irqrestore(&cohc->lock, flags);
 
 
-	dmaengine_terminate_all(chan);
+	coh901318_terminate_all(chan);
 }
 }
 
 
 
 
@@ -2461,8 +2514,8 @@ static const struct burst_table burst_sizes[] = {
 	},
 	},
 };
 };
 
 
-static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
-			struct dma_slave_config *config)
+static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
+					   struct dma_slave_config *config)
 {
 {
 	struct coh901318_chan *cohc = to_coh901318_chan(chan);
 	struct coh901318_chan *cohc = to_coh901318_chan(chan);
 	dma_addr_t addr;
 	dma_addr_t addr;
@@ -2482,7 +2535,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
 		maxburst = config->dst_maxburst;
 		maxburst = config->dst_maxburst;
 	} else {
 	} else {
 		dev_err(COHC_2_DEV(cohc), "illegal channel mode\n");
 		dev_err(COHC_2_DEV(cohc), "illegal channel mode\n");
-		return;
+		return -EINVAL;
 	}
 	}
 
 
 	dev_dbg(COHC_2_DEV(cohc), "configure channel for %d byte transfers\n",
 	dev_dbg(COHC_2_DEV(cohc), "configure channel for %d byte transfers\n",
@@ -2528,7 +2581,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
 	default:
 	default:
 		dev_err(COHC_2_DEV(cohc),
 		dev_err(COHC_2_DEV(cohc),
 			"bad runtimeconfig: alien address width\n");
 			"bad runtimeconfig: alien address width\n");
-		return;
+		return -EINVAL;
 	}
 	}
 
 
 	ctrl |= burst_sizes[i].reg;
 	ctrl |= burst_sizes[i].reg;
@@ -2538,84 +2591,12 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
 
 
 	cohc->addr = addr;
 	cohc->addr = addr;
 	cohc->ctrl = ctrl;
 	cohc->ctrl = ctrl;
-}
-
-static int
-coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-		  unsigned long arg)
-{
-	unsigned long flags;
-	struct coh901318_chan *cohc = to_coh901318_chan(chan);
-	struct coh901318_desc *cohd;
-	void __iomem *virtbase = cohc->base->virtbase;
-
-	if (cmd == DMA_SLAVE_CONFIG) {
-		struct dma_slave_config *config =
-			(struct dma_slave_config *) arg;
-
-		coh901318_dma_set_runtimeconfig(chan, config);
-		return 0;
-	  }
-
-	if (cmd == DMA_PAUSE) {
-		coh901318_pause(chan);
-		return 0;
-	}
-
-	if (cmd == DMA_RESUME) {
-		coh901318_resume(chan);
-		return 0;
-	}
-
-	if (cmd != DMA_TERMINATE_ALL)
-		return -ENXIO;
-
-	/* The remainder of this function terminates the transfer */
-	coh901318_pause(chan);
-	spin_lock_irqsave(&cohc->lock, flags);
-
-	/* Clear any pending BE or TC interrupt */
-	if (cohc->id < 32) {
-		writel(1 << cohc->id, virtbase + COH901318_BE_INT_CLEAR1);
-		writel(1 << cohc->id, virtbase + COH901318_TC_INT_CLEAR1);
-	} else {
-		writel(1 << (cohc->id - 32), virtbase +
-		       COH901318_BE_INT_CLEAR2);
-		writel(1 << (cohc->id - 32), virtbase +
-		       COH901318_TC_INT_CLEAR2);
-	}
-
-	enable_powersave(cohc);
-
-	while ((cohd = coh901318_first_active_get(cohc))) {
-		/* release the lli allocation*/
-		coh901318_lli_free(&cohc->base->pool, &cohd->lli);
-
-		/* return desc to free-list */
-		coh901318_desc_remove(cohd);
-		coh901318_desc_free(cohc, cohd);
-	}
-
-	while ((cohd = coh901318_first_queued(cohc))) {
-		/* release the lli allocation*/
-		coh901318_lli_free(&cohc->base->pool, &cohd->lli);
-
-		/* return desc to free-list */
-		coh901318_desc_remove(cohd);
-		coh901318_desc_free(cohc, cohd);
-	}
-
-
-	cohc->nbr_active_done = 0;
-	cohc->busy = 0;
-
-	spin_unlock_irqrestore(&cohc->lock, flags);
 
 
 	return 0;
 	return 0;
 }
 }
 
 
-void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
-			 struct coh901318_base *base)
+static void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
+				struct coh901318_base *base)
 {
 {
 	int chans_i;
 	int chans_i;
 	int i = 0;
 	int i = 0;
@@ -2717,7 +2698,10 @@ static int __init coh901318_probe(struct platform_device *pdev)
 	base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg;
 	base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg;
 	base->dma_slave.device_tx_status = coh901318_tx_status;
 	base->dma_slave.device_tx_status = coh901318_tx_status;
 	base->dma_slave.device_issue_pending = coh901318_issue_pending;
 	base->dma_slave.device_issue_pending = coh901318_issue_pending;
-	base->dma_slave.device_control = coh901318_control;
+	base->dma_slave.device_config = coh901318_dma_set_runtimeconfig;
+	base->dma_slave.device_pause = coh901318_pause;
+	base->dma_slave.device_resume = coh901318_resume;
+	base->dma_slave.device_terminate_all = coh901318_terminate_all;
 	base->dma_slave.dev = &pdev->dev;
 	base->dma_slave.dev = &pdev->dev;
 
 
 	err = dma_async_device_register(&base->dma_slave);
 	err = dma_async_device_register(&base->dma_slave);
@@ -2737,7 +2721,10 @@ static int __init coh901318_probe(struct platform_device *pdev)
 	base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy;
 	base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy;
 	base->dma_memcpy.device_tx_status = coh901318_tx_status;
 	base->dma_memcpy.device_tx_status = coh901318_tx_status;
 	base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
 	base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
-	base->dma_memcpy.device_control = coh901318_control;
+	base->dma_memcpy.device_config = coh901318_dma_set_runtimeconfig;
+	base->dma_memcpy.device_pause = coh901318_pause;
+	base->dma_memcpy.device_resume = coh901318_resume;
+	base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
 	base->dma_memcpy.dev = &pdev->dev;
 	base->dma_memcpy.dev = &pdev->dev;
 	/*
 	/*
 	 * This controller can only access address at even 32bit boundaries,
 	 * This controller can only access address at even 32bit boundaries,

+ 1 - 29
drivers/dma/cppi41.c

@@ -525,12 +525,6 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
 	return &c->txd;
 	return &c->txd;
 }
 }
 
 
-static int cpp41_cfg_chan(struct cppi41_channel *c,
-		struct dma_slave_config *cfg)
-{
-	return 0;
-}
-
 static void cppi41_compute_td_desc(struct cppi41_desc *d)
 static void cppi41_compute_td_desc(struct cppi41_desc *d)
 {
 {
 	d->pd0 = DESC_TYPE_TEARD << DESC_TYPE;
 	d->pd0 = DESC_TYPE_TEARD << DESC_TYPE;
@@ -647,28 +641,6 @@ static int cppi41_stop_chan(struct dma_chan *chan)
 	return 0;
 	return 0;
 }
 }
 
 
-static int cppi41_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-	unsigned long arg)
-{
-	struct cppi41_channel *c = to_cpp41_chan(chan);
-	int ret;
-
-	switch (cmd) {
-	case DMA_SLAVE_CONFIG:
-		ret = cpp41_cfg_chan(c, (struct dma_slave_config *) arg);
-		break;
-
-	case DMA_TERMINATE_ALL:
-		ret = cppi41_stop_chan(chan);
-		break;
-
-	default:
-		ret = -ENXIO;
-		break;
-	}
-	return ret;
-}
-
 static void cleanup_chans(struct cppi41_dd *cdd)
 static void cleanup_chans(struct cppi41_dd *cdd)
 {
 {
 	while (!list_empty(&cdd->ddev.channels)) {
 	while (!list_empty(&cdd->ddev.channels)) {
@@ -953,7 +925,7 @@ static int cppi41_dma_probe(struct platform_device *pdev)
 	cdd->ddev.device_tx_status = cppi41_dma_tx_status;
 	cdd->ddev.device_tx_status = cppi41_dma_tx_status;
 	cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
 	cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
 	cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
 	cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
-	cdd->ddev.device_control = cppi41_dma_control;
+	cdd->ddev.device_terminate_all = cppi41_stop_chan;
 	cdd->ddev.dev = dev;
 	cdd->ddev.dev = dev;
 	INIT_LIST_HEAD(&cdd->ddev.channels);
 	INIT_LIST_HEAD(&cdd->ddev.channels);
 	cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
 	cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;

+ 3 - 17
drivers/dma/dma-jz4740.c

@@ -210,7 +210,7 @@ static enum jz4740_dma_transfer_size jz4740_dma_maxburst(u32 maxburst)
 }
 }
 
 
 static int jz4740_dma_slave_config(struct dma_chan *c,
 static int jz4740_dma_slave_config(struct dma_chan *c,
-	const struct dma_slave_config *config)
+				   struct dma_slave_config *config)
 {
 {
 	struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
 	struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
 	struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
 	struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
@@ -290,21 +290,6 @@ static int jz4740_dma_terminate_all(struct dma_chan *c)
 	return 0;
 	return 0;
 }
 }
 
 
-static int jz4740_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-	unsigned long arg)
-{
-	struct dma_slave_config *config = (struct dma_slave_config *)arg;
-
-	switch (cmd) {
-	case DMA_SLAVE_CONFIG:
-		return jz4740_dma_slave_config(chan, config);
-	case DMA_TERMINATE_ALL:
-		return jz4740_dma_terminate_all(chan);
-	default:
-		return -ENOSYS;
-	}
-}
-
 static int jz4740_dma_start_transfer(struct jz4740_dmaengine_chan *chan)
 static int jz4740_dma_start_transfer(struct jz4740_dmaengine_chan *chan)
 {
 {
 	struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
 	struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
@@ -561,7 +546,8 @@ static int jz4740_dma_probe(struct platform_device *pdev)
 	dd->device_issue_pending = jz4740_dma_issue_pending;
 	dd->device_issue_pending = jz4740_dma_issue_pending;
 	dd->device_prep_slave_sg = jz4740_dma_prep_slave_sg;
 	dd->device_prep_slave_sg = jz4740_dma_prep_slave_sg;
 	dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
 	dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
-	dd->device_control = jz4740_dma_control;
+	dd->device_config = jz4740_dma_slave_config;
+	dd->device_terminate_all = jz4740_dma_terminate_all;
 	dd->dev = &pdev->dev;
 	dd->dev = &pdev->dev;
 	INIT_LIST_HEAD(&dd->channels);
 	INIT_LIST_HEAD(&dd->channels);
 
 

+ 62 - 22
drivers/dma/dmaengine.c

@@ -222,31 +222,35 @@ static void balance_ref_count(struct dma_chan *chan)
  */
  */
 static int dma_chan_get(struct dma_chan *chan)
 static int dma_chan_get(struct dma_chan *chan)
 {
 {
-	int err = -ENODEV;
 	struct module *owner = dma_chan_to_owner(chan);
 	struct module *owner = dma_chan_to_owner(chan);
+	int ret;
 
 
+	/* The channel is already in use, update client count */
 	if (chan->client_count) {
 	if (chan->client_count) {
 		__module_get(owner);
 		__module_get(owner);
-		err = 0;
-	} else if (try_module_get(owner))
-		err = 0;
+		goto out;
+	}
 
 
-	if (err == 0)
-		chan->client_count++;
+	if (!try_module_get(owner))
+		return -ENODEV;
 
 
 	/* allocate upon first client reference */
 	/* allocate upon first client reference */
-	if (chan->client_count == 1 && err == 0) {
-		int desc_cnt = chan->device->device_alloc_chan_resources(chan);
-
-		if (desc_cnt < 0) {
-			err = desc_cnt;
-			chan->client_count = 0;
-			module_put(owner);
-		} else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
-			balance_ref_count(chan);
+	if (chan->device->device_alloc_chan_resources) {
+		ret = chan->device->device_alloc_chan_resources(chan);
+		if (ret < 0)
+			goto err_out;
 	}
 	}
 
 
-	return err;
+	if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
+		balance_ref_count(chan);
+
+out:
+	chan->client_count++;
+	return 0;
+
+err_out:
+	module_put(owner);
+	return ret;
 }
 }
 
 
 /**
 /**
@@ -257,11 +261,15 @@ static int dma_chan_get(struct dma_chan *chan)
  */
  */
 static void dma_chan_put(struct dma_chan *chan)
 static void dma_chan_put(struct dma_chan *chan)
 {
 {
+	/* This channel is not in use, bail out */
 	if (!chan->client_count)
 	if (!chan->client_count)
-		return; /* this channel failed alloc_chan_resources */
+		return;
+
 	chan->client_count--;
 	chan->client_count--;
 	module_put(dma_chan_to_owner(chan));
 	module_put(dma_chan_to_owner(chan));
-	if (chan->client_count == 0)
+
+	/* This channel is not in use anymore, free it */
+	if (!chan->client_count && chan->device->device_free_chan_resources)
 		chan->device->device_free_chan_resources(chan);
 		chan->device->device_free_chan_resources(chan);
 }
 }
 
 
@@ -471,6 +479,39 @@ static void dma_channel_rebalance(void)
 		}
 		}
 }
 }
 
 
+int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
+{
+	struct dma_device *device;
+
+	if (!chan || !caps)
+		return -EINVAL;
+
+	device = chan->device;
+
+	/* check if the channel supports slave transactions */
+	if (!test_bit(DMA_SLAVE, device->cap_mask.bits))
+		return -ENXIO;
+
+	/*
+	 * Check whether it reports it uses the generic slave
+	 * capabilities, if not, that means it doesn't support any
+	 * kind of slave capabilities reporting.
+	 */
+	if (!device->directions)
+		return -ENXIO;
+
+	caps->src_addr_widths = device->src_addr_widths;
+	caps->dst_addr_widths = device->dst_addr_widths;
+	caps->directions = device->directions;
+	caps->residue_granularity = device->residue_granularity;
+
+	caps->cmd_pause = !!device->device_pause;
+	caps->cmd_terminate = !!device->device_terminate_all;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(dma_get_slave_caps);
+
 static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
 static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
 					  struct dma_device *dev,
 					  struct dma_device *dev,
 					  dma_filter_fn fn, void *fn_param)
 					  dma_filter_fn fn, void *fn_param)
@@ -811,17 +852,16 @@ int dma_async_device_register(struct dma_device *device)
 		!device->device_prep_dma_sg);
 		!device->device_prep_dma_sg);
 	BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
 	BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
 		!device->device_prep_dma_cyclic);
 		!device->device_prep_dma_cyclic);
-	BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
-		!device->device_control);
 	BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
 	BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
 		!device->device_prep_interleaved_dma);
 		!device->device_prep_interleaved_dma);
 
 
-	BUG_ON(!device->device_alloc_chan_resources);
-	BUG_ON(!device->device_free_chan_resources);
 	BUG_ON(!device->device_tx_status);
 	BUG_ON(!device->device_tx_status);
 	BUG_ON(!device->device_issue_pending);
 	BUG_ON(!device->device_issue_pending);
 	BUG_ON(!device->dev);
 	BUG_ON(!device->dev);
 
 
+	WARN(dma_has_cap(DMA_SLAVE, device->cap_mask) && !device->directions,
+	     "this driver doesn't support generic slave capabilities reporting\n");
+
 	/* note: this only matters in the
 	/* note: this only matters in the
 	 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
 	 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
 	 */
 	 */

+ 18 - 17
drivers/dma/dmatest.c

@@ -349,14 +349,14 @@ static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
 		       unsigned long data)
 		       unsigned long data)
 {
 {
 	pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
 	pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
-		   current->comm, n, err, src_off, dst_off, len, data);
+		 current->comm, n, err, src_off, dst_off, len, data);
 }
 }
 
 
-#define verbose_result(err, n, src_off, dst_off, len, data) ({ \
-	if (verbose) \
-		result(err, n, src_off, dst_off, len, data); \
-	else \
-		dbg_result(err, n, src_off, dst_off, len, data); \
+#define verbose_result(err, n, src_off, dst_off, len, data) ({	\
+	if (verbose)						\
+		result(err, n, src_off, dst_off, len, data);	\
+	else							\
+		dbg_result(err, n, src_off, dst_off, len, data);\
 })
 })
 
 
 static unsigned long long dmatest_persec(s64 runtime, unsigned int val)
 static unsigned long long dmatest_persec(s64 runtime, unsigned int val)
@@ -405,7 +405,6 @@ static int dmatest_func(void *data)
 	struct dmatest_params	*params;
 	struct dmatest_params	*params;
 	struct dma_chan		*chan;
 	struct dma_chan		*chan;
 	struct dma_device	*dev;
 	struct dma_device	*dev;
-	unsigned int		src_off, dst_off, len;
 	unsigned int		error_count;
 	unsigned int		error_count;
 	unsigned int		failed_tests = 0;
 	unsigned int		failed_tests = 0;
 	unsigned int		total_tests = 0;
 	unsigned int		total_tests = 0;
@@ -484,6 +483,7 @@ static int dmatest_func(void *data)
 		struct dmaengine_unmap_data *um;
 		struct dmaengine_unmap_data *um;
 		dma_addr_t srcs[src_cnt];
 		dma_addr_t srcs[src_cnt];
 		dma_addr_t *dsts;
 		dma_addr_t *dsts;
+		unsigned int src_off, dst_off, len;
 		u8 align = 0;
 		u8 align = 0;
 
 
 		total_tests++;
 		total_tests++;
@@ -502,15 +502,21 @@ static int dmatest_func(void *data)
 			break;
 			break;
 		}
 		}
 
 
-		if (params->noverify) {
+		if (params->noverify)
 			len = params->buf_size;
 			len = params->buf_size;
+		else
+			len = dmatest_random() % params->buf_size + 1;
+
+		len = (len >> align) << align;
+		if (!len)
+			len = 1 << align;
+
+		total_len += len;
+
+		if (params->noverify) {
 			src_off = 0;
 			src_off = 0;
 			dst_off = 0;
 			dst_off = 0;
 		} else {
 		} else {
-			len = dmatest_random() % params->buf_size + 1;
-			len = (len >> align) << align;
-			if (!len)
-				len = 1 << align;
 			src_off = dmatest_random() % (params->buf_size - len + 1);
 			src_off = dmatest_random() % (params->buf_size - len + 1);
 			dst_off = dmatest_random() % (params->buf_size - len + 1);
 			dst_off = dmatest_random() % (params->buf_size - len + 1);
 
 
@@ -523,11 +529,6 @@ static int dmatest_func(void *data)
 					  params->buf_size);
 					  params->buf_size);
 		}
 		}
 
 
-		len = (len >> align) << align;
-		if (!len)
-			len = 1 << align;
-		total_len += len;
-
 		um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt,
 		um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt,
 					      GFP_KERNEL);
 					      GFP_KERNEL);
 		if (!um) {
 		if (!um) {

+ 61 - 40
drivers/dma/dw/core.c

@@ -61,6 +61,13 @@
  */
  */
 #define NR_DESCS_PER_CHANNEL	64
 #define NR_DESCS_PER_CHANNEL	64
 
 
+/* The set of bus widths supported by the DMA controller */
+#define DW_DMA_BUSWIDTHS			  \
+	BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED)	| \
+	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE)		| \
+	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES)		| \
+	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
+
 /*----------------------------------------------------------------------*/
 /*----------------------------------------------------------------------*/
 
 
 static struct device *chan2dev(struct dma_chan *chan)
 static struct device *chan2dev(struct dma_chan *chan)
@@ -955,8 +962,7 @@ static inline void convert_burst(u32 *maxburst)
 		*maxburst = 0;
 		*maxburst = 0;
 }
 }
 
 
-static int
-set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
+static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
 {
 {
 	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
 	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
 
 
@@ -973,16 +979,25 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
 	return 0;
 	return 0;
 }
 }
 
 
-static inline void dwc_chan_pause(struct dw_dma_chan *dwc)
+static int dwc_pause(struct dma_chan *chan)
 {
 {
-	u32 cfglo = channel_readl(dwc, CFG_LO);
-	unsigned int count = 20;	/* timeout iterations */
+	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
+	unsigned long		flags;
+	unsigned int		count = 20;	/* timeout iterations */
+	u32			cfglo;
+
+	spin_lock_irqsave(&dwc->lock, flags);
 
 
+	cfglo = channel_readl(dwc, CFG_LO);
 	channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
 	channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
 	while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
 	while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
 		udelay(2);
 		udelay(2);
 
 
 	dwc->paused = true;
 	dwc->paused = true;
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	return 0;
 }
 }
 
 
 static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
 static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
@@ -994,53 +1009,48 @@ static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
 	dwc->paused = false;
 	dwc->paused = false;
 }
 }
 
 
-static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-		       unsigned long arg)
+static int dwc_resume(struct dma_chan *chan)
 {
 {
 	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
 	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
-	struct dw_dma		*dw = to_dw_dma(chan->device);
-	struct dw_desc		*desc, *_desc;
 	unsigned long		flags;
 	unsigned long		flags;
-	LIST_HEAD(list);
 
 
-	if (cmd == DMA_PAUSE) {
-		spin_lock_irqsave(&dwc->lock, flags);
+	if (!dwc->paused)
+		return 0;
 
 
-		dwc_chan_pause(dwc);
+	spin_lock_irqsave(&dwc->lock, flags);
 
 
-		spin_unlock_irqrestore(&dwc->lock, flags);
-	} else if (cmd == DMA_RESUME) {
-		if (!dwc->paused)
-			return 0;
+	dwc_chan_resume(dwc);
 
 
-		spin_lock_irqsave(&dwc->lock, flags);
+	spin_unlock_irqrestore(&dwc->lock, flags);
 
 
-		dwc_chan_resume(dwc);
+	return 0;
+}
 
 
-		spin_unlock_irqrestore(&dwc->lock, flags);
-	} else if (cmd == DMA_TERMINATE_ALL) {
-		spin_lock_irqsave(&dwc->lock, flags);
+static int dwc_terminate_all(struct dma_chan *chan)
+{
+	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
+	struct dw_dma		*dw = to_dw_dma(chan->device);
+	struct dw_desc		*desc, *_desc;
+	unsigned long		flags;
+	LIST_HEAD(list);
 
 
-		clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
+	spin_lock_irqsave(&dwc->lock, flags);
 
 
-		dwc_chan_disable(dw, dwc);
+	clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
+
+	dwc_chan_disable(dw, dwc);
 
 
-		dwc_chan_resume(dwc);
+	dwc_chan_resume(dwc);
 
 
-		/* active_list entries will end up before queued entries */
-		list_splice_init(&dwc->queue, &list);
-		list_splice_init(&dwc->active_list, &list);
+	/* active_list entries will end up before queued entries */
+	list_splice_init(&dwc->queue, &list);
+	list_splice_init(&dwc->active_list, &list);
 
 
-		spin_unlock_irqrestore(&dwc->lock, flags);
+	spin_unlock_irqrestore(&dwc->lock, flags);
 
 
-		/* Flush all pending and queued descriptors */
-		list_for_each_entry_safe(desc, _desc, &list, desc_node)
-			dwc_descriptor_complete(dwc, desc, false);
-	} else if (cmd == DMA_SLAVE_CONFIG) {
-		return set_runtime_config(chan, (struct dma_slave_config *)arg);
-	} else {
-		return -ENXIO;
-	}
+	/* Flush all pending and queued descriptors */
+	list_for_each_entry_safe(desc, _desc, &list, desc_node)
+		dwc_descriptor_complete(dwc, desc, false);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -1551,7 +1561,8 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
 		}
 		}
 	} else {
 	} else {
 		dw->nr_masters = pdata->nr_masters;
 		dw->nr_masters = pdata->nr_masters;
-		memcpy(dw->data_width, pdata->data_width, 4);
+		for (i = 0; i < dw->nr_masters; i++)
+			dw->data_width[i] = pdata->data_width[i];
 	}
 	}
 
 
 	/* Calculate all channel mask before DMA setup */
 	/* Calculate all channel mask before DMA setup */
@@ -1656,13 +1667,23 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
 	dw->dma.device_free_chan_resources = dwc_free_chan_resources;
 	dw->dma.device_free_chan_resources = dwc_free_chan_resources;
 
 
 	dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
 	dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
-
 	dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
 	dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
-	dw->dma.device_control = dwc_control;
+
+	dw->dma.device_config = dwc_config;
+	dw->dma.device_pause = dwc_pause;
+	dw->dma.device_resume = dwc_resume;
+	dw->dma.device_terminate_all = dwc_terminate_all;
 
 
 	dw->dma.device_tx_status = dwc_tx_status;
 	dw->dma.device_tx_status = dwc_tx_status;
 	dw->dma.device_issue_pending = dwc_issue_pending;
 	dw->dma.device_issue_pending = dwc_issue_pending;
 
 
+	/* DMA capabilities */
+	dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS;
+	dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS;
+	dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
+			     BIT(DMA_MEM_TO_MEM);
+	dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
 	err = dma_async_device_register(&dw->dma);
 	err = dma_async_device_register(&dw->dma);
 	if (err)
 	if (err)
 		goto err_dma_register;
 		goto err_dma_register;

+ 2 - 2
drivers/dma/dw/platform.c

@@ -100,7 +100,7 @@ dw_dma_parse_dt(struct platform_device *pdev)
 {
 {
 	struct device_node *np = pdev->dev.of_node;
 	struct device_node *np = pdev->dev.of_node;
 	struct dw_dma_platform_data *pdata;
 	struct dw_dma_platform_data *pdata;
-	u32 tmp, arr[4];
+	u32 tmp, arr[DW_DMA_MAX_NR_MASTERS];
 
 
 	if (!np) {
 	if (!np) {
 		dev_err(&pdev->dev, "Missing DT data\n");
 		dev_err(&pdev->dev, "Missing DT data\n");
@@ -127,7 +127,7 @@ dw_dma_parse_dt(struct platform_device *pdev)
 		pdata->block_size = tmp;
 		pdata->block_size = tmp;
 
 
 	if (!of_property_read_u32(np, "dma-masters", &tmp)) {
 	if (!of_property_read_u32(np, "dma-masters", &tmp)) {
-		if (tmp > 4)
+		if (tmp > DW_DMA_MAX_NR_MASTERS)
 			return NULL;
 			return NULL;
 
 
 		pdata->nr_masters = tmp;
 		pdata->nr_masters = tmp;

+ 2 - 2
drivers/dma/dw/regs.h

@@ -252,7 +252,7 @@ struct dw_dma_chan {
 	u8			src_master;
 	u8			src_master;
 	u8			dst_master;
 	u8			dst_master;
 
 
-	/* configuration passed via DMA_SLAVE_CONFIG */
+	/* configuration passed via .device_config */
 	struct dma_slave_config dma_sconfig;
 	struct dma_slave_config dma_sconfig;
 };
 };
 
 
@@ -285,7 +285,7 @@ struct dw_dma {
 
 
 	/* hardware configuration */
 	/* hardware configuration */
 	unsigned char		nr_masters;
 	unsigned char		nr_masters;
-	unsigned char		data_width[4];
+	unsigned char		data_width[DW_DMA_MAX_NR_MASTERS];
 };
 };
 
 
 static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
 static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)

+ 23 - 50
drivers/dma/edma.c

@@ -15,6 +15,7 @@
 
 
 #include <linux/dmaengine.h>
 #include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
 #include <linux/dma-mapping.h>
+#include <linux/edma.h>
 #include <linux/err.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
@@ -244,8 +245,9 @@ static void edma_execute(struct edma_chan *echan)
 	}
 	}
 }
 }
 
 
-static int edma_terminate_all(struct edma_chan *echan)
+static int edma_terminate_all(struct dma_chan *chan)
 {
 {
+	struct edma_chan *echan = to_edma_chan(chan);
 	unsigned long flags;
 	unsigned long flags;
 	LIST_HEAD(head);
 	LIST_HEAD(head);
 
 
@@ -273,9 +275,11 @@ static int edma_terminate_all(struct edma_chan *echan)
 	return 0;
 	return 0;
 }
 }
 
 
-static int edma_slave_config(struct edma_chan *echan,
+static int edma_slave_config(struct dma_chan *chan,
 	struct dma_slave_config *cfg)
 	struct dma_slave_config *cfg)
 {
 {
+	struct edma_chan *echan = to_edma_chan(chan);
+
 	if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
 	if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
 	    cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
 	    cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
 		return -EINVAL;
 		return -EINVAL;
@@ -285,8 +289,10 @@ static int edma_slave_config(struct edma_chan *echan,
 	return 0;
 	return 0;
 }
 }
 
 
-static int edma_dma_pause(struct edma_chan *echan)
+static int edma_dma_pause(struct dma_chan *chan)
 {
 {
+	struct edma_chan *echan = to_edma_chan(chan);
+
 	/* Pause/Resume only allowed with cyclic mode */
 	/* Pause/Resume only allowed with cyclic mode */
 	if (!echan->edesc || !echan->edesc->cyclic)
 	if (!echan->edesc || !echan->edesc->cyclic)
 		return -EINVAL;
 		return -EINVAL;
@@ -295,8 +301,10 @@ static int edma_dma_pause(struct edma_chan *echan)
 	return 0;
 	return 0;
 }
 }
 
 
-static int edma_dma_resume(struct edma_chan *echan)
+static int edma_dma_resume(struct dma_chan *chan)
 {
 {
+	struct edma_chan *echan = to_edma_chan(chan);
+
 	/* Pause/Resume only allowed with cyclic mode */
 	/* Pause/Resume only allowed with cyclic mode */
 	if (!echan->edesc->cyclic)
 	if (!echan->edesc->cyclic)
 		return -EINVAL;
 		return -EINVAL;
@@ -305,36 +313,6 @@ static int edma_dma_resume(struct edma_chan *echan)
 	return 0;
 	return 0;
 }
 }
 
 
-static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-			unsigned long arg)
-{
-	int ret = 0;
-	struct dma_slave_config *config;
-	struct edma_chan *echan = to_edma_chan(chan);
-
-	switch (cmd) {
-	case DMA_TERMINATE_ALL:
-		edma_terminate_all(echan);
-		break;
-	case DMA_SLAVE_CONFIG:
-		config = (struct dma_slave_config *)arg;
-		ret = edma_slave_config(echan, config);
-		break;
-	case DMA_PAUSE:
-		ret = edma_dma_pause(echan);
-		break;
-
-	case DMA_RESUME:
-		ret = edma_dma_resume(echan);
-		break;
-
-	default:
-		ret = -ENOSYS;
-	}
-
-	return ret;
-}
-
 /*
 /*
  * A PaRAM set configuration abstraction used by other modes
  * A PaRAM set configuration abstraction used by other modes
  * @chan: Channel who's PaRAM set we're configuring
  * @chan: Channel who's PaRAM set we're configuring
@@ -557,7 +535,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
 	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
 	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
 }
 }
 
 
-struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
+static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
 	struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
 	struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
 	size_t len, unsigned long tx_flags)
 	size_t len, unsigned long tx_flags)
 {
 {
@@ -994,19 +972,6 @@ static void __init edma_chan_init(struct edma_cc *ecc,
 				 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
 				 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
 				 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
 				 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
 
 
-static int edma_dma_device_slave_caps(struct dma_chan *dchan,
-				      struct dma_slave_caps *caps)
-{
-	caps->src_addr_widths = EDMA_DMA_BUSWIDTHS;
-	caps->dstn_addr_widths = EDMA_DMA_BUSWIDTHS;
-	caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-	caps->cmd_pause = true;
-	caps->cmd_terminate = true;
-	caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
-
-	return 0;
-}
-
 static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
 static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
 			  struct device *dev)
 			  struct device *dev)
 {
 {
@@ -1017,8 +982,16 @@ static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
 	dma->device_free_chan_resources = edma_free_chan_resources;
 	dma->device_free_chan_resources = edma_free_chan_resources;
 	dma->device_issue_pending = edma_issue_pending;
 	dma->device_issue_pending = edma_issue_pending;
 	dma->device_tx_status = edma_tx_status;
 	dma->device_tx_status = edma_tx_status;
-	dma->device_control = edma_control;
-	dma->device_slave_caps = edma_dma_device_slave_caps;
+	dma->device_config = edma_slave_config;
+	dma->device_pause = edma_dma_pause;
+	dma->device_resume = edma_dma_resume;
+	dma->device_terminate_all = edma_terminate_all;
+
+	dma->src_addr_widths = EDMA_DMA_BUSWIDTHS;
+	dma->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
+	dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+	dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
 	dma->dev = dev;
 	dma->dev = dev;
 
 
 	/*
 	/*

+ 8 - 35
drivers/dma/ep93xx_dma.c

@@ -144,7 +144,7 @@ struct ep93xx_dma_desc {
  * @queue: pending descriptors which are handled next
  * @queue: pending descriptors which are handled next
  * @free_list: list of free descriptors which can be used
  * @free_list: list of free descriptors which can be used
  * @runtime_addr: physical address currently used as dest/src (M2M only). This
  * @runtime_addr: physical address currently used as dest/src (M2M only). This
- *                is set via %DMA_SLAVE_CONFIG before slave operation is
+ *                is set via .device_config before slave operation is
  *                prepared
  *                prepared
  * @runtime_ctrl: M2M runtime values for the control register.
  * @runtime_ctrl: M2M runtime values for the control register.
  *
  *
@@ -1164,13 +1164,14 @@ fail:
 
 
 /**
 /**
  * ep93xx_dma_terminate_all - terminate all transactions
  * ep93xx_dma_terminate_all - terminate all transactions
- * @edmac: channel
+ * @chan: channel
  *
  *
  * Stops all DMA transactions. All descriptors are put back to the
  * Stops all DMA transactions. All descriptors are put back to the
  * @edmac->free_list and callbacks are _not_ called.
  * @edmac->free_list and callbacks are _not_ called.
  */
  */
-static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac)
+static int ep93xx_dma_terminate_all(struct dma_chan *chan)
 {
 {
+	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
 	struct ep93xx_dma_desc *desc, *_d;
 	struct ep93xx_dma_desc *desc, *_d;
 	unsigned long flags;
 	unsigned long flags;
 	LIST_HEAD(list);
 	LIST_HEAD(list);
@@ -1194,9 +1195,10 @@ static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac)
 	return 0;
 	return 0;
 }
 }
 
 
-static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
+static int ep93xx_dma_slave_config(struct dma_chan *chan,
 				   struct dma_slave_config *config)
 				   struct dma_slave_config *config)
 {
 {
+	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
 	enum dma_slave_buswidth width;
 	enum dma_slave_buswidth width;
 	unsigned long flags;
 	unsigned long flags;
 	u32 addr, ctrl;
 	u32 addr, ctrl;
@@ -1241,36 +1243,6 @@ static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
 	return 0;
 	return 0;
 }
 }
 
 
-/**
- * ep93xx_dma_control - manipulate all pending operations on a channel
- * @chan: channel
- * @cmd: control command to perform
- * @arg: optional argument
- *
- * Controls the channel. Function returns %0 in case of success or negative
- * error in case of failure.
- */
-static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-			      unsigned long arg)
-{
-	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
-	struct dma_slave_config *config;
-
-	switch (cmd) {
-	case DMA_TERMINATE_ALL:
-		return ep93xx_dma_terminate_all(edmac);
-
-	case DMA_SLAVE_CONFIG:
-		config = (struct dma_slave_config *)arg;
-		return ep93xx_dma_slave_config(edmac, config);
-
-	default:
-		break;
-	}
-
-	return -ENOSYS;
-}
-
 /**
 /**
  * ep93xx_dma_tx_status - check if a transaction is completed
  * ep93xx_dma_tx_status - check if a transaction is completed
  * @chan: channel
  * @chan: channel
@@ -1352,7 +1324,8 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
 	dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
 	dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
 	dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
 	dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
 	dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
 	dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
-	dma_dev->device_control = ep93xx_dma_control;
+	dma_dev->device_config = ep93xx_dma_slave_config;
+	dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
 	dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
 	dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
 	dma_dev->device_tx_status = ep93xx_dma_tx_status;
 	dma_dev->device_tx_status = ep93xx_dma_tx_status;
 
 

+ 62 - 61
drivers/dma/fsl-edma.c

@@ -289,62 +289,69 @@ static void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
 	kfree(fsl_desc);
 	kfree(fsl_desc);
 }
 }
 
 
-static int fsl_edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-		unsigned long arg)
+static int fsl_edma_terminate_all(struct dma_chan *chan)
 {
 {
 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
-	struct dma_slave_config *cfg = (void *)arg;
 	unsigned long flags;
 	unsigned long flags;
 	LIST_HEAD(head);
 	LIST_HEAD(head);
 
 
-	switch (cmd) {
-	case DMA_TERMINATE_ALL:
-		spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+	fsl_edma_disable_request(fsl_chan);
+	fsl_chan->edesc = NULL;
+	vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+	vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+	return 0;
+}
+
+static int fsl_edma_pause(struct dma_chan *chan)
+{
+	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+	if (fsl_chan->edesc) {
 		fsl_edma_disable_request(fsl_chan);
 		fsl_edma_disable_request(fsl_chan);
-		fsl_chan->edesc = NULL;
-		vchan_get_all_descriptors(&fsl_chan->vchan, &head);
-		spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
-		vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
-		return 0;
-
-	case DMA_SLAVE_CONFIG:
-		fsl_chan->fsc.dir = cfg->direction;
-		if (cfg->direction == DMA_DEV_TO_MEM) {
-			fsl_chan->fsc.dev_addr = cfg->src_addr;
-			fsl_chan->fsc.addr_width = cfg->src_addr_width;
-			fsl_chan->fsc.burst = cfg->src_maxburst;
-			fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width);
-		} else if (cfg->direction == DMA_MEM_TO_DEV) {
-			fsl_chan->fsc.dev_addr = cfg->dst_addr;
-			fsl_chan->fsc.addr_width = cfg->dst_addr_width;
-			fsl_chan->fsc.burst = cfg->dst_maxburst;
-			fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width);
-		} else {
-			return -EINVAL;
-		}
-		return 0;
+		fsl_chan->status = DMA_PAUSED;
+	}
+	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+	return 0;
+}
 
 
-	case DMA_PAUSE:
-		spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
-		if (fsl_chan->edesc) {
-			fsl_edma_disable_request(fsl_chan);
-			fsl_chan->status = DMA_PAUSED;
-		}
-		spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
-		return 0;
-
-	case DMA_RESUME:
-		spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
-		if (fsl_chan->edesc) {
-			fsl_edma_enable_request(fsl_chan);
-			fsl_chan->status = DMA_IN_PROGRESS;
-		}
-		spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
-		return 0;
+static int fsl_edma_resume(struct dma_chan *chan)
+{
+	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+	unsigned long flags;
 
 
-	default:
-		return -ENXIO;
+	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+	if (fsl_chan->edesc) {
+		fsl_edma_enable_request(fsl_chan);
+		fsl_chan->status = DMA_IN_PROGRESS;
+	}
+	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+	return 0;
+}
+
+static int fsl_edma_slave_config(struct dma_chan *chan,
+				 struct dma_slave_config *cfg)
+{
+	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+
+	fsl_chan->fsc.dir = cfg->direction;
+	if (cfg->direction == DMA_DEV_TO_MEM) {
+		fsl_chan->fsc.dev_addr = cfg->src_addr;
+		fsl_chan->fsc.addr_width = cfg->src_addr_width;
+		fsl_chan->fsc.burst = cfg->src_maxburst;
+		fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width);
+	} else if (cfg->direction == DMA_MEM_TO_DEV) {
+		fsl_chan->fsc.dev_addr = cfg->dst_addr;
+		fsl_chan->fsc.addr_width = cfg->dst_addr_width;
+		fsl_chan->fsc.burst = cfg->dst_maxburst;
+		fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width);
+	} else {
+			return -EINVAL;
 	}
 	}
+	return 0;
 }
 }
 
 
 static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
 static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
@@ -780,18 +787,6 @@ static void fsl_edma_free_chan_resources(struct dma_chan *chan)
 	fsl_chan->tcd_pool = NULL;
 	fsl_chan->tcd_pool = NULL;
 }
 }
 
 
-static int fsl_dma_device_slave_caps(struct dma_chan *dchan,
-		struct dma_slave_caps *caps)
-{
-	caps->src_addr_widths = FSL_EDMA_BUSWIDTHS;
-	caps->dstn_addr_widths = FSL_EDMA_BUSWIDTHS;
-	caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-	caps->cmd_pause = true;
-	caps->cmd_terminate = true;
-
-	return 0;
-}
-
 static int
 static int
 fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
 fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
 {
 {
@@ -917,9 +912,15 @@ static int fsl_edma_probe(struct platform_device *pdev)
 	fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
 	fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
 	fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
 	fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
 	fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
 	fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
-	fsl_edma->dma_dev.device_control = fsl_edma_control;
+	fsl_edma->dma_dev.device_config = fsl_edma_slave_config;
+	fsl_edma->dma_dev.device_pause = fsl_edma_pause;
+	fsl_edma->dma_dev.device_resume = fsl_edma_resume;
+	fsl_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
 	fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
 	fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
-	fsl_edma->dma_dev.device_slave_caps = fsl_dma_device_slave_caps;
+
+	fsl_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
+	fsl_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
+	fsl_edma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
 
 
 	platform_set_drvdata(pdev, fsl_edma);
 	platform_set_drvdata(pdev, fsl_edma);
 
 

+ 37 - 60
drivers/dma/fsldma.c

@@ -941,84 +941,56 @@ fail:
 	return NULL;
 	return NULL;
 }
 }
 
 
-/**
- * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
- * @chan: DMA channel
- * @sgl: scatterlist to transfer to/from
- * @sg_len: number of entries in @scatterlist
- * @direction: DMA direction
- * @flags: DMAEngine flags
- * @context: transaction context (ignored)
- *
- * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
- * DMA_SLAVE API, this gets the device-specific information from the
- * chan->private variable.
- */
-static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
-	struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
-	enum dma_transfer_direction direction, unsigned long flags,
-	void *context)
+static int fsl_dma_device_terminate_all(struct dma_chan *dchan)
 {
 {
-	/*
-	 * This operation is not supported on the Freescale DMA controller
-	 *
-	 * However, we need to provide the function pointer to allow the
-	 * device_control() method to work.
-	 */
-	return NULL;
-}
-
-static int fsl_dma_device_control(struct dma_chan *dchan,
-				  enum dma_ctrl_cmd cmd, unsigned long arg)
-{
-	struct dma_slave_config *config;
 	struct fsldma_chan *chan;
 	struct fsldma_chan *chan;
-	int size;
 
 
 	if (!dchan)
 	if (!dchan)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	chan = to_fsl_chan(dchan);
 	chan = to_fsl_chan(dchan);
 
 
-	switch (cmd) {
-	case DMA_TERMINATE_ALL:
-		spin_lock_bh(&chan->desc_lock);
-
-		/* Halt the DMA engine */
-		dma_halt(chan);
+	spin_lock_bh(&chan->desc_lock);
 
 
-		/* Remove and free all of the descriptors in the LD queue */
-		fsldma_free_desc_list(chan, &chan->ld_pending);
-		fsldma_free_desc_list(chan, &chan->ld_running);
-		fsldma_free_desc_list(chan, &chan->ld_completed);
-		chan->idle = true;
+	/* Halt the DMA engine */
+	dma_halt(chan);
 
 
-		spin_unlock_bh(&chan->desc_lock);
-		return 0;
+	/* Remove and free all of the descriptors in the LD queue */
+	fsldma_free_desc_list(chan, &chan->ld_pending);
+	fsldma_free_desc_list(chan, &chan->ld_running);
+	fsldma_free_desc_list(chan, &chan->ld_completed);
+	chan->idle = true;
 
 
-	case DMA_SLAVE_CONFIG:
-		config = (struct dma_slave_config *)arg;
+	spin_unlock_bh(&chan->desc_lock);
+	return 0;
+}
 
 
-		/* make sure the channel supports setting burst size */
-		if (!chan->set_request_count)
-			return -ENXIO;
+static int fsl_dma_device_config(struct dma_chan *dchan,
+				 struct dma_slave_config *config)
+{
+	struct fsldma_chan *chan;
+	int size;
 
 
-		/* we set the controller burst size depending on direction */
-		if (config->direction == DMA_MEM_TO_DEV)
-			size = config->dst_addr_width * config->dst_maxburst;
-		else
-			size = config->src_addr_width * config->src_maxburst;
+	if (!dchan)
+		return -EINVAL;
 
 
-		chan->set_request_count(chan, size);
-		return 0;
+	chan = to_fsl_chan(dchan);
 
 
-	default:
+	/* make sure the channel supports setting burst size */
+	if (!chan->set_request_count)
 		return -ENXIO;
 		return -ENXIO;
-	}
 
 
+	/* we set the controller burst size depending on direction */
+	if (config->direction == DMA_MEM_TO_DEV)
+		size = config->dst_addr_width * config->dst_maxburst;
+	else
+		size = config->src_addr_width * config->src_maxburst;
+
+	chan->set_request_count(chan, size);
 	return 0;
 	return 0;
 }
 }
 
 
+
 /**
 /**
  * fsl_dma_memcpy_issue_pending - Issue the DMA start command
  * fsl_dma_memcpy_issue_pending - Issue the DMA start command
  * @chan : Freescale DMA channel
  * @chan : Freescale DMA channel
@@ -1395,10 +1367,15 @@ static int fsldma_of_probe(struct platform_device *op)
 	fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
 	fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
 	fdev->common.device_tx_status = fsl_tx_status;
 	fdev->common.device_tx_status = fsl_tx_status;
 	fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
 	fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
-	fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
-	fdev->common.device_control = fsl_dma_device_control;
+	fdev->common.device_config = fsl_dma_device_config;
+	fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
 	fdev->common.dev = &op->dev;
 	fdev->common.dev = &op->dev;
 
 
+	fdev->common.src_addr_widths = FSL_DMA_BUSWIDTHS;
+	fdev->common.dst_addr_widths = FSL_DMA_BUSWIDTHS;
+	fdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+	fdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+
 	dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
 	dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
 
 
 	platform_set_drvdata(op, fdev);
 	platform_set_drvdata(op, fdev);

+ 4 - 0
drivers/dma/fsldma.h

@@ -83,6 +83,10 @@
 #define FSL_DMA_DGSR_EOSI	0x02
 #define FSL_DMA_DGSR_EOSI	0x02
 #define FSL_DMA_DGSR_EOLSI	0x01
 #define FSL_DMA_DGSR_EOLSI	0x01
 
 
+#define FSL_DMA_BUSWIDTHS	(BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+				BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+				BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+				BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
 typedef u64 __bitwise v64;
 typedef u64 __bitwise v64;
 typedef u32 __bitwise v32;
 typedef u32 __bitwise v32;
 
 

+ 1011 - 0
drivers/dma/img-mdc-dma.c

@@ -0,0 +1,1011 @@
+/*
+ * IMG Multi-threaded DMA Controller (MDC)
+ *
+ * Copyright (C) 2009,2012,2013 Imagination Technologies Ltd.
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define MDC_MAX_DMA_CHANNELS			32
+
+#define MDC_GENERAL_CONFIG			0x000
+#define MDC_GENERAL_CONFIG_LIST_IEN		BIT(31)
+#define MDC_GENERAL_CONFIG_IEN			BIT(29)
+#define MDC_GENERAL_CONFIG_LEVEL_INT		BIT(28)
+#define MDC_GENERAL_CONFIG_INC_W		BIT(12)
+#define MDC_GENERAL_CONFIG_INC_R		BIT(8)
+#define MDC_GENERAL_CONFIG_PHYSICAL_W		BIT(7)
+#define MDC_GENERAL_CONFIG_WIDTH_W_SHIFT	4
+#define MDC_GENERAL_CONFIG_WIDTH_W_MASK		0x7
+#define MDC_GENERAL_CONFIG_PHYSICAL_R		BIT(3)
+#define MDC_GENERAL_CONFIG_WIDTH_R_SHIFT	0
+#define MDC_GENERAL_CONFIG_WIDTH_R_MASK		0x7
+
+#define MDC_READ_PORT_CONFIG			0x004
+#define MDC_READ_PORT_CONFIG_STHREAD_SHIFT	28
+#define MDC_READ_PORT_CONFIG_STHREAD_MASK	0xf
+#define MDC_READ_PORT_CONFIG_RTHREAD_SHIFT	24
+#define MDC_READ_PORT_CONFIG_RTHREAD_MASK	0xf
+#define MDC_READ_PORT_CONFIG_WTHREAD_SHIFT	16
+#define MDC_READ_PORT_CONFIG_WTHREAD_MASK	0xf
+#define MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT	4
+#define MDC_READ_PORT_CONFIG_BURST_SIZE_MASK	0xff
+#define MDC_READ_PORT_CONFIG_DREQ_ENABLE	BIT(1)
+
+#define MDC_READ_ADDRESS			0x008
+
+#define MDC_WRITE_ADDRESS			0x00c
+
+#define MDC_TRANSFER_SIZE			0x010
+#define MDC_TRANSFER_SIZE_MASK			0xffffff
+
+#define MDC_LIST_NODE_ADDRESS			0x014
+
+#define MDC_CMDS_PROCESSED			0x018
+#define MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT	16
+#define MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK	0x3f
+#define MDC_CMDS_PROCESSED_INT_ACTIVE		BIT(8)
+#define MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT	0
+#define MDC_CMDS_PROCESSED_CMDS_DONE_MASK	0x3f
+
+#define MDC_CONTROL_AND_STATUS			0x01c
+#define MDC_CONTROL_AND_STATUS_CANCEL		BIT(20)
+#define MDC_CONTROL_AND_STATUS_LIST_EN		BIT(4)
+#define MDC_CONTROL_AND_STATUS_EN		BIT(0)
+
+#define MDC_ACTIVE_TRANSFER_SIZE		0x030
+
+#define MDC_GLOBAL_CONFIG_A				0x900
+#define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT	16
+#define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK	0xff
+#define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT		8
+#define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK		0xff
+#define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT		0
+#define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK		0xff
+
+struct mdc_hw_list_desc {
+	u32 gen_conf;
+	u32 readport_conf;
+	u32 read_addr;
+	u32 write_addr;
+	u32 xfer_size;
+	u32 node_addr;
+	u32 cmds_done;
+	u32 ctrl_status;
+	/*
+	 * Not part of the list descriptor, but instead used by the CPU to
+	 * traverse the list.
+	 */
+	struct mdc_hw_list_desc *next_desc;
+};
+
+struct mdc_tx_desc {
+	struct mdc_chan *chan;
+	struct virt_dma_desc vd;
+	dma_addr_t list_phys;
+	struct mdc_hw_list_desc *list;
+	bool cyclic;
+	bool cmd_loaded;
+	unsigned int list_len;
+	unsigned int list_period_len;
+	size_t list_xfer_size;
+	unsigned int list_cmds_done;
+};
+
+struct mdc_chan {
+	struct mdc_dma *mdma;
+	struct virt_dma_chan vc;
+	struct dma_slave_config config;
+	struct mdc_tx_desc *desc;
+	int irq;
+	unsigned int periph;
+	unsigned int thread;
+	unsigned int chan_nr;
+};
+
+struct mdc_dma_soc_data {
+	void (*enable_chan)(struct mdc_chan *mchan);
+	void (*disable_chan)(struct mdc_chan *mchan);
+};
+
+struct mdc_dma {
+	struct dma_device dma_dev;
+	void __iomem *regs;
+	struct clk *clk;
+	struct dma_pool *desc_pool;
+	struct regmap *periph_regs;
+	spinlock_t lock;
+	unsigned int nr_threads;
+	unsigned int nr_channels;
+	unsigned int bus_width;
+	unsigned int max_burst_mult;
+	unsigned int max_xfer_size;
+	const struct mdc_dma_soc_data *soc;
+	struct mdc_chan channels[MDC_MAX_DMA_CHANNELS];
+};
+
+static inline u32 mdc_readl(struct mdc_dma *mdma, u32 reg)
+{
+	return readl(mdma->regs + reg);
+}
+
+static inline void mdc_writel(struct mdc_dma *mdma, u32 val, u32 reg)
+{
+	writel(val, mdma->regs + reg);
+}
+
+static inline u32 mdc_chan_readl(struct mdc_chan *mchan, u32 reg)
+{
+	return mdc_readl(mchan->mdma, mchan->chan_nr * 0x040 + reg);
+}
+
+static inline void mdc_chan_writel(struct mdc_chan *mchan, u32 val, u32 reg)
+{
+	mdc_writel(mchan->mdma, val, mchan->chan_nr * 0x040 + reg);
+}
+
+static inline struct mdc_chan *to_mdc_chan(struct dma_chan *c)
+{
+	return container_of(to_virt_chan(c), struct mdc_chan, vc);
+}
+
+static inline struct mdc_tx_desc *to_mdc_desc(struct dma_async_tx_descriptor *t)
+{
+	struct virt_dma_desc *vdesc = container_of(t, struct virt_dma_desc, tx);
+
+	return container_of(vdesc, struct mdc_tx_desc, vd);
+}
+
+static inline struct device *mdma2dev(struct mdc_dma *mdma)
+{
+	return mdma->dma_dev.dev;
+}
+
+static inline unsigned int to_mdc_width(unsigned int bytes)
+{
+	return ffs(bytes) - 1;
+}
+
+static inline void mdc_set_read_width(struct mdc_hw_list_desc *ldesc,
+				      unsigned int bytes)
+{
+	ldesc->gen_conf |= to_mdc_width(bytes) <<
+		MDC_GENERAL_CONFIG_WIDTH_R_SHIFT;
+}
+
+static inline void mdc_set_write_width(struct mdc_hw_list_desc *ldesc,
+				       unsigned int bytes)
+{
+	ldesc->gen_conf |= to_mdc_width(bytes) <<
+		MDC_GENERAL_CONFIG_WIDTH_W_SHIFT;
+}
+
+static void mdc_list_desc_config(struct mdc_chan *mchan,
+				 struct mdc_hw_list_desc *ldesc,
+				 enum dma_transfer_direction dir,
+				 dma_addr_t src, dma_addr_t dst, size_t len)
+{
+	struct mdc_dma *mdma = mchan->mdma;
+	unsigned int max_burst, burst_size;
+
+	ldesc->gen_conf = MDC_GENERAL_CONFIG_IEN | MDC_GENERAL_CONFIG_LIST_IEN |
+		MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W |
+		MDC_GENERAL_CONFIG_PHYSICAL_R;
+	ldesc->readport_conf =
+		(mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) |
+		(mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) |
+		(mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT);
+	ldesc->read_addr = src;
+	ldesc->write_addr = dst;
+	ldesc->xfer_size = len - 1;
+	ldesc->node_addr = 0;
+	ldesc->cmds_done = 0;
+	ldesc->ctrl_status = MDC_CONTROL_AND_STATUS_LIST_EN |
+		MDC_CONTROL_AND_STATUS_EN;
+	ldesc->next_desc = NULL;
+
+	if (IS_ALIGNED(dst, mdma->bus_width) &&
+	    IS_ALIGNED(src, mdma->bus_width))
+		max_burst = mdma->bus_width * mdma->max_burst_mult;
+	else
+		max_burst = mdma->bus_width * (mdma->max_burst_mult - 1);
+
+	if (dir == DMA_MEM_TO_DEV) {
+		ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R;
+		ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE;
+		mdc_set_read_width(ldesc, mdma->bus_width);
+		mdc_set_write_width(ldesc, mchan->config.dst_addr_width);
+		burst_size = min(max_burst, mchan->config.dst_maxburst *
+				 mchan->config.dst_addr_width);
+	} else if (dir == DMA_DEV_TO_MEM) {
+		ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_W;
+		ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE;
+		mdc_set_read_width(ldesc, mchan->config.src_addr_width);
+		mdc_set_write_width(ldesc, mdma->bus_width);
+		burst_size = min(max_burst, mchan->config.src_maxburst *
+				 mchan->config.src_addr_width);
+	} else {
+		ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R |
+			MDC_GENERAL_CONFIG_INC_W;
+		mdc_set_read_width(ldesc, mdma->bus_width);
+		mdc_set_write_width(ldesc, mdma->bus_width);
+		burst_size = max_burst;
+	}
+	ldesc->readport_conf |= (burst_size - 1) <<
+		MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT;
+}
+
+static void mdc_list_desc_free(struct mdc_tx_desc *mdesc)
+{
+	struct mdc_dma *mdma = mdesc->chan->mdma;
+	struct mdc_hw_list_desc *curr, *next;
+	dma_addr_t curr_phys, next_phys;
+
+	curr = mdesc->list;
+	curr_phys = mdesc->list_phys;
+	while (curr) {
+		next = curr->next_desc;
+		next_phys = curr->node_addr;
+		dma_pool_free(mdma->desc_pool, curr, curr_phys);
+		curr = next;
+		curr_phys = next_phys;
+	}
+}
+
+static void mdc_desc_free(struct virt_dma_desc *vd)
+{
+	struct mdc_tx_desc *mdesc = to_mdc_desc(&vd->tx);
+
+	mdc_list_desc_free(mdesc);
+	kfree(mdesc);
+}
+
+static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy(
+	struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len,
+	unsigned long flags)
+{
+	struct mdc_chan *mchan = to_mdc_chan(chan);
+	struct mdc_dma *mdma = mchan->mdma;
+	struct mdc_tx_desc *mdesc;
+	struct mdc_hw_list_desc *curr, *prev = NULL;
+	dma_addr_t curr_phys, prev_phys;
+
+	if (!len)
+		return NULL;
+
+	mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
+	if (!mdesc)
+		return NULL;
+	mdesc->chan = mchan;
+	mdesc->list_xfer_size = len;
+
+	while (len > 0) {
+		size_t xfer_size;
+
+		curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, &curr_phys);
+		if (!curr)
+			goto free_desc;
+
+		if (prev) {
+			prev->node_addr = curr_phys;
+			prev->next_desc = curr;
+		} else {
+			mdesc->list_phys = curr_phys;
+			mdesc->list = curr;
+		}
+
+		xfer_size = min_t(size_t, mdma->max_xfer_size, len);
+
+		mdc_list_desc_config(mchan, curr, DMA_MEM_TO_MEM, src, dest,
+				     xfer_size);
+
+		prev = curr;
+		prev_phys = curr_phys;
+
+		mdesc->list_len++;
+		src += xfer_size;
+		dest += xfer_size;
+		len -= xfer_size;
+	}
+
+	return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
+
+free_desc:
+	mdc_desc_free(&mdesc->vd);
+
+	return NULL;
+}
+
+static int mdc_check_slave_width(struct mdc_chan *mchan,
+				 enum dma_transfer_direction dir)
+{
+	enum dma_slave_buswidth width;
+
+	if (dir == DMA_MEM_TO_DEV)
+		width = mchan->config.dst_addr_width;
+	else
+		width = mchan->config.src_addr_width;
+
+	switch (width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+	case DMA_SLAVE_BUSWIDTH_8_BYTES:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (width > mchan->mdma->bus_width)
+		return -EINVAL;
+
+	return 0;
+}
+
+static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic(
+	struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+	size_t period_len, enum dma_transfer_direction dir,
+	unsigned long flags)
+{
+	struct mdc_chan *mchan = to_mdc_chan(chan);
+	struct mdc_dma *mdma = mchan->mdma;
+	struct mdc_tx_desc *mdesc;
+	struct mdc_hw_list_desc *curr, *prev = NULL;
+	dma_addr_t curr_phys, prev_phys;
+
+	if (!buf_len && !period_len)
+		return NULL;
+
+	if (!is_slave_direction(dir))
+		return NULL;
+
+	if (mdc_check_slave_width(mchan, dir) < 0)
+		return NULL;
+
+	mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
+	if (!mdesc)
+		return NULL;
+	mdesc->chan = mchan;
+	mdesc->cyclic = true;
+	mdesc->list_xfer_size = buf_len;
+	mdesc->list_period_len = DIV_ROUND_UP(period_len,
+					      mdma->max_xfer_size);
+
+	while (buf_len > 0) {
+		size_t remainder = min(period_len, buf_len);
+
+		while (remainder > 0) {
+			size_t xfer_size;
+
+			curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT,
+					      &curr_phys);
+			if (!curr)
+				goto free_desc;
+
+			if (!prev) {
+				mdesc->list_phys = curr_phys;
+				mdesc->list = curr;
+			} else {
+				prev->node_addr = curr_phys;
+				prev->next_desc = curr;
+			}
+
+			xfer_size = min_t(size_t, mdma->max_xfer_size,
+					  remainder);
+
+			if (dir == DMA_MEM_TO_DEV) {
+				mdc_list_desc_config(mchan, curr, dir,
+						     buf_addr,
+						     mchan->config.dst_addr,
+						     xfer_size);
+			} else {
+				mdc_list_desc_config(mchan, curr, dir,
+						     mchan->config.src_addr,
+						     buf_addr,
+						     xfer_size);
+			}
+
+			prev = curr;
+			prev_phys = curr_phys;
+
+			mdesc->list_len++;
+			buf_addr += xfer_size;
+			buf_len -= xfer_size;
+			remainder -= xfer_size;
+		}
+	}
+	prev->node_addr = mdesc->list_phys;
+
+	return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
+
+free_desc:
+	mdc_desc_free(&mdesc->vd);
+
+	return NULL;
+}
+
+static struct dma_async_tx_descriptor *mdc_prep_slave_sg(
+	struct dma_chan *chan, struct scatterlist *sgl,
+	unsigned int sg_len, enum dma_transfer_direction dir,
+	unsigned long flags, void *context)
+{
+	struct mdc_chan *mchan = to_mdc_chan(chan);
+	struct mdc_dma *mdma = mchan->mdma;
+	struct mdc_tx_desc *mdesc;
+	struct scatterlist *sg;
+	struct mdc_hw_list_desc *curr, *prev = NULL;
+	dma_addr_t curr_phys, prev_phys;
+	unsigned int i;
+
+	if (!sgl)
+		return NULL;
+
+	if (!is_slave_direction(dir))
+		return NULL;
+
+	if (mdc_check_slave_width(mchan, dir) < 0)
+		return NULL;
+
+	mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
+	if (!mdesc)
+		return NULL;
+	mdesc->chan = mchan;
+
+	for_each_sg(sgl, sg, sg_len, i) {
+		dma_addr_t buf = sg_dma_address(sg);
+		size_t buf_len = sg_dma_len(sg);
+
+		while (buf_len > 0) {
+			size_t xfer_size;
+
+			curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT,
+					      &curr_phys);
+			if (!curr)
+				goto free_desc;
+
+			if (!prev) {
+				mdesc->list_phys = curr_phys;
+				mdesc->list = curr;
+			} else {
+				prev->node_addr = curr_phys;
+				prev->next_desc = curr;
+			}
+
+			xfer_size = min_t(size_t, mdma->max_xfer_size,
+					  buf_len);
+
+			if (dir == DMA_MEM_TO_DEV) {
+				mdc_list_desc_config(mchan, curr, dir, buf,
+						     mchan->config.dst_addr,
+						     xfer_size);
+			} else {
+				mdc_list_desc_config(mchan, curr, dir,
+						     mchan->config.src_addr,
+						     buf, xfer_size);
+			}
+
+			prev = curr;
+			prev_phys = curr_phys;
+
+			mdesc->list_len++;
+			mdesc->list_xfer_size += xfer_size;
+			buf += xfer_size;
+			buf_len -= xfer_size;
+		}
+	}
+
+	return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
+
+free_desc:
+	mdc_desc_free(&mdesc->vd);
+
+	return NULL;
+}
+
+static void mdc_issue_desc(struct mdc_chan *mchan)
+{
+	struct mdc_dma *mdma = mchan->mdma;
+	struct virt_dma_desc *vd;
+	struct mdc_tx_desc *mdesc;
+	u32 val;
+
+	vd = vchan_next_desc(&mchan->vc);
+	if (!vd)
+		return;
+
+	list_del(&vd->node);
+
+	mdesc = to_mdc_desc(&vd->tx);
+	mchan->desc = mdesc;
+
+	dev_dbg(mdma2dev(mdma), "Issuing descriptor on channel %d\n",
+		mchan->chan_nr);
+
+	mdma->soc->enable_chan(mchan);
+
+	val = mdc_chan_readl(mchan, MDC_GENERAL_CONFIG);
+	val |= MDC_GENERAL_CONFIG_LIST_IEN | MDC_GENERAL_CONFIG_IEN |
+		MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W |
+		MDC_GENERAL_CONFIG_PHYSICAL_R;
+	mdc_chan_writel(mchan, val, MDC_GENERAL_CONFIG);
+	val = (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) |
+		(mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) |
+		(mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT);
+	mdc_chan_writel(mchan, val, MDC_READ_PORT_CONFIG);
+	mdc_chan_writel(mchan, mdesc->list_phys, MDC_LIST_NODE_ADDRESS);
+	val = mdc_chan_readl(mchan, MDC_CONTROL_AND_STATUS);
+	val |= MDC_CONTROL_AND_STATUS_LIST_EN;
+	mdc_chan_writel(mchan, val, MDC_CONTROL_AND_STATUS);
+}
+
+static void mdc_issue_pending(struct dma_chan *chan)
+{
+	struct mdc_chan *mchan = to_mdc_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&mchan->vc.lock, flags);
+	if (vchan_issue_pending(&mchan->vc) && !mchan->desc)
+		mdc_issue_desc(mchan);
+	spin_unlock_irqrestore(&mchan->vc.lock, flags);
+}
+
+static enum dma_status mdc_tx_status(struct dma_chan *chan,
+	dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+	struct mdc_chan *mchan = to_mdc_chan(chan);
+	struct mdc_tx_desc *mdesc;
+	struct virt_dma_desc *vd;
+	unsigned long flags;
+	size_t bytes = 0;
+	int ret;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret == DMA_COMPLETE)
+		return ret;
+
+	if (!txstate)
+		return ret;
+
+	spin_lock_irqsave(&mchan->vc.lock, flags);
+	vd = vchan_find_desc(&mchan->vc, cookie);
+	if (vd) {
+		mdesc = to_mdc_desc(&vd->tx);
+		bytes = mdesc->list_xfer_size;
+	} else if (mchan->desc && mchan->desc->vd.tx.cookie == cookie) {
+		struct mdc_hw_list_desc *ldesc;
+		u32 val1, val2, done, processed, residue;
+		int i, cmds;
+
+		mdesc = mchan->desc;
+
+		/*
+		 * Determine the number of commands that haven't been
+		 * processed (handled by the IRQ handler) yet.
+		 */
+		do {
+			val1 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) &
+				~MDC_CMDS_PROCESSED_INT_ACTIVE;
+			residue = mdc_chan_readl(mchan,
+						 MDC_ACTIVE_TRANSFER_SIZE);
+			val2 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) &
+				~MDC_CMDS_PROCESSED_INT_ACTIVE;
+		} while (val1 != val2);
+
+		done = (val1 >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
+			MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
+		processed = (val1 >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) &
+			MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK;
+		cmds = (done - processed) %
+			(MDC_CMDS_PROCESSED_CMDS_DONE_MASK + 1);
+
+		/*
+		 * If the command loaded event hasn't been processed yet, then
+		 * the difference above includes an extra command.
+		 */
+		if (!mdesc->cmd_loaded)
+			cmds--;
+		else
+			cmds += mdesc->list_cmds_done;
+
+		bytes = mdesc->list_xfer_size;
+		ldesc = mdesc->list;
+		for (i = 0; i < cmds; i++) {
+			bytes -= ldesc->xfer_size + 1;
+			ldesc = ldesc->next_desc;
+		}
+		if (ldesc) {
+			if (residue != MDC_TRANSFER_SIZE_MASK)
+				bytes -= ldesc->xfer_size - residue;
+			else
+				bytes -= ldesc->xfer_size + 1;
+		}
+	}
+	spin_unlock_irqrestore(&mchan->vc.lock, flags);
+
+	dma_set_residue(txstate, bytes);
+
+	return ret;
+}
+
+static int mdc_terminate_all(struct dma_chan *chan)
+{
+	struct mdc_chan *mchan = to_mdc_chan(chan);
+	struct mdc_tx_desc *mdesc;
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	spin_lock_irqsave(&mchan->vc.lock, flags);
+
+	mdc_chan_writel(mchan, MDC_CONTROL_AND_STATUS_CANCEL,
+			MDC_CONTROL_AND_STATUS);
+
+	mdesc = mchan->desc;
+	mchan->desc = NULL;
+	vchan_get_all_descriptors(&mchan->vc, &head);
+
+	spin_unlock_irqrestore(&mchan->vc.lock, flags);
+
+	if (mdesc)
+		mdc_desc_free(&mdesc->vd);
+	vchan_dma_desc_free_list(&mchan->vc, &head);
+
+	return 0;
+}
+
+static int mdc_slave_config(struct dma_chan *chan,
+			    struct dma_slave_config *config)
+{
+	struct mdc_chan *mchan = to_mdc_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&mchan->vc.lock, flags);
+	mchan->config = *config;
+	spin_unlock_irqrestore(&mchan->vc.lock, flags);
+
+	return 0;
+}
+
+static int mdc_alloc_chan_resources(struct dma_chan *chan)
+{
+	return 0;
+}
+
+static void mdc_free_chan_resources(struct dma_chan *chan)
+{
+	struct mdc_chan *mchan = to_mdc_chan(chan);
+	struct mdc_dma *mdma = mchan->mdma;
+
+	mdc_terminate_all(chan);
+
+	mdma->soc->disable_chan(mchan);
+}
+
+static irqreturn_t mdc_chan_irq(int irq, void *dev_id)
+{
+	struct mdc_chan *mchan = (struct mdc_chan *)dev_id;
+	struct mdc_tx_desc *mdesc;
+	u32 val, processed, done1, done2;
+	unsigned int i;
+
+	spin_lock(&mchan->vc.lock);
+
+	val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
+	processed = (val >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) &
+		MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK;
+	/*
+	 * CMDS_DONE may have incremented between reading CMDS_PROCESSED
+	 * and clearing INT_ACTIVE.  Re-read CMDS_PROCESSED to ensure we
+	 * didn't miss a command completion.
+	 */
+	do {
+		val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
+		done1 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
+			MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
+		val &= ~((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK <<
+			  MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) |
+			 MDC_CMDS_PROCESSED_INT_ACTIVE);
+		val |= done1 << MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT;
+		mdc_chan_writel(mchan, val, MDC_CMDS_PROCESSED);
+		val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
+		done2 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
+			MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
+	} while (done1 != done2);
+
+	dev_dbg(mdma2dev(mchan->mdma), "IRQ on channel %d\n", mchan->chan_nr);
+
+	mdesc = mchan->desc;
+	if (!mdesc) {
+		dev_warn(mdma2dev(mchan->mdma),
+			 "IRQ with no active descriptor on channel %d\n",
+			 mchan->chan_nr);
+		goto out;
+	}
+
+	for (i = processed; i != done1;
+	     i = (i + 1) % (MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK + 1)) {
+		/*
+		 * The first interrupt in a transfer indicates that the
+		 * command list has been loaded, not that a command has
+		 * been completed.
+		 */
+		if (!mdesc->cmd_loaded) {
+			mdesc->cmd_loaded = true;
+			continue;
+		}
+
+		mdesc->list_cmds_done++;
+		if (mdesc->cyclic) {
+			mdesc->list_cmds_done %= mdesc->list_len;
+			if (mdesc->list_cmds_done % mdesc->list_period_len == 0)
+				vchan_cyclic_callback(&mdesc->vd);
+		} else if (mdesc->list_cmds_done == mdesc->list_len) {
+			mchan->desc = NULL;
+			vchan_cookie_complete(&mdesc->vd);
+			mdc_issue_desc(mchan);
+			break;
+		}
+	}
+out:
+	spin_unlock(&mchan->vc.lock);
+
+	return IRQ_HANDLED;
+}
+
+static struct dma_chan *mdc_of_xlate(struct of_phandle_args *dma_spec,
+				     struct of_dma *ofdma)
+{
+	struct mdc_dma *mdma = ofdma->of_dma_data;
+	struct dma_chan *chan;
+
+	if (dma_spec->args_count != 3)
+		return NULL;
+
+	list_for_each_entry(chan, &mdma->dma_dev.channels, device_node) {
+		struct mdc_chan *mchan = to_mdc_chan(chan);
+
+		if (!(dma_spec->args[1] & BIT(mchan->chan_nr)))
+			continue;
+		if (dma_get_slave_channel(chan)) {
+			mchan->periph = dma_spec->args[0];
+			mchan->thread = dma_spec->args[2];
+			return chan;
+		}
+	}
+
+	return NULL;
+}
+
+#define PISTACHIO_CR_PERIPH_DMA_ROUTE(ch)	(0x120 + 0x4 * ((ch) / 4))
+#define PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(ch) (8 * ((ch) % 4))
+#define PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK	0x3f
+
+static void pistachio_mdc_enable_chan(struct mdc_chan *mchan)
+{
+	struct mdc_dma *mdma = mchan->mdma;
+
+	regmap_update_bits(mdma->periph_regs,
+			   PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr),
+			   PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK <<
+			   PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr),
+			   mchan->periph <<
+			   PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr));
+}
+
+static void pistachio_mdc_disable_chan(struct mdc_chan *mchan)
+{
+	struct mdc_dma *mdma = mchan->mdma;
+
+	regmap_update_bits(mdma->periph_regs,
+			   PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr),
+			   PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK <<
+			   PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr),
+			   0);
+}
+
+static const struct mdc_dma_soc_data pistachio_mdc_data = {
+	.enable_chan = pistachio_mdc_enable_chan,
+	.disable_chan = pistachio_mdc_disable_chan,
+};
+
+static const struct of_device_id mdc_dma_of_match[] = {
+	{ .compatible = "img,pistachio-mdc-dma", .data = &pistachio_mdc_data, },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, mdc_dma_of_match);
+
+static int mdc_dma_probe(struct platform_device *pdev)
+{
+	struct mdc_dma *mdma;
+	struct resource *res;
+	const struct of_device_id *match;
+	unsigned int i;
+	u32 val;
+	int ret;
+
+	mdma = devm_kzalloc(&pdev->dev, sizeof(*mdma), GFP_KERNEL);
+	if (!mdma)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, mdma);
+
+	match = of_match_device(mdc_dma_of_match, &pdev->dev);
+	mdma->soc = match->data;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	mdma->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(mdma->regs))
+		return PTR_ERR(mdma->regs);
+
+	mdma->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+							    "img,cr-periph");
+	if (IS_ERR(mdma->periph_regs))
+		return PTR_ERR(mdma->periph_regs);
+
+	mdma->clk = devm_clk_get(&pdev->dev, "sys");
+	if (IS_ERR(mdma->clk))
+		return PTR_ERR(mdma->clk);
+
+	ret = clk_prepare_enable(mdma->clk);
+	if (ret)
+		return ret;
+
+	dma_cap_zero(mdma->dma_dev.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdma->dma_dev.cap_mask);
+	dma_cap_set(DMA_PRIVATE, mdma->dma_dev.cap_mask);
+	dma_cap_set(DMA_CYCLIC, mdma->dma_dev.cap_mask);
+	dma_cap_set(DMA_MEMCPY, mdma->dma_dev.cap_mask);
+
+	val = mdc_readl(mdma, MDC_GLOBAL_CONFIG_A);
+	mdma->nr_channels = (val >> MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT) &
+		MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK;
+	mdma->nr_threads =
+		1 << ((val >> MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT) &
+		      MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK);
+	mdma->bus_width =
+		(1 << ((val >> MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT) &
+		       MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK)) / 8;
+	/*
+	 * Although transfer sizes of up to MDC_TRANSFER_SIZE_MASK + 1 bytes
+	 * are supported, this makes it possible for the value reported in
+	 * MDC_ACTIVE_TRANSFER_SIZE to be ambiguous - an active transfer size
+	 * of MDC_TRANSFER_SIZE_MASK may indicate either that 0 bytes or
+	 * MDC_TRANSFER_SIZE_MASK + 1 bytes are remaining.  To eliminate this
+	 * ambiguity, restrict transfer sizes to one bus-width less than the
+	 * actual maximum.
+	 */
+	mdma->max_xfer_size = MDC_TRANSFER_SIZE_MASK + 1 - mdma->bus_width;
+
+	of_property_read_u32(pdev->dev.of_node, "dma-channels",
+			     &mdma->nr_channels);
+	ret = of_property_read_u32(pdev->dev.of_node,
+				   "img,max-burst-multiplier",
+				   &mdma->max_burst_mult);
+	if (ret)
+		goto disable_clk;
+
+	mdma->dma_dev.dev = &pdev->dev;
+	mdma->dma_dev.device_prep_slave_sg = mdc_prep_slave_sg;
+	mdma->dma_dev.device_prep_dma_cyclic = mdc_prep_dma_cyclic;
+	mdma->dma_dev.device_prep_dma_memcpy = mdc_prep_dma_memcpy;
+	mdma->dma_dev.device_alloc_chan_resources = mdc_alloc_chan_resources;
+	mdma->dma_dev.device_free_chan_resources = mdc_free_chan_resources;
+	mdma->dma_dev.device_tx_status = mdc_tx_status;
+	mdma->dma_dev.device_issue_pending = mdc_issue_pending;
+	mdma->dma_dev.device_terminate_all = mdc_terminate_all;
+	mdma->dma_dev.device_config = mdc_slave_config;
+
+	mdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+	mdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+	for (i = 1; i <= mdma->bus_width; i <<= 1) {
+		mdma->dma_dev.src_addr_widths |= BIT(i);
+		mdma->dma_dev.dst_addr_widths |= BIT(i);
+	}
+
+	INIT_LIST_HEAD(&mdma->dma_dev.channels);
+	for (i = 0; i < mdma->nr_channels; i++) {
+		struct mdc_chan *mchan = &mdma->channels[i];
+
+		mchan->mdma = mdma;
+		mchan->chan_nr = i;
+		mchan->irq = platform_get_irq(pdev, i);
+		if (mchan->irq < 0) {
+			ret = mchan->irq;
+			goto disable_clk;
+		}
+		ret = devm_request_irq(&pdev->dev, mchan->irq, mdc_chan_irq,
+				       IRQ_TYPE_LEVEL_HIGH,
+				       dev_name(&pdev->dev), mchan);
+		if (ret < 0)
+			goto disable_clk;
+
+		mchan->vc.desc_free = mdc_desc_free;
+		vchan_init(&mchan->vc, &mdma->dma_dev);
+	}
+
+	mdma->desc_pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
+					   sizeof(struct mdc_hw_list_desc),
+					   4, 0);
+	if (!mdma->desc_pool) {
+		ret = -ENOMEM;
+		goto disable_clk;
+	}
+
+	ret = dma_async_device_register(&mdma->dma_dev);
+	if (ret)
+		goto disable_clk;
+
+	ret = of_dma_controller_register(pdev->dev.of_node, mdc_of_xlate, mdma);
+	if (ret)
+		goto unregister;
+
+	dev_info(&pdev->dev, "MDC with %u channels and %u threads\n",
+		 mdma->nr_channels, mdma->nr_threads);
+
+	return 0;
+
+unregister:
+	dma_async_device_unregister(&mdma->dma_dev);
+disable_clk:
+	clk_disable_unprepare(mdma->clk);
+	return ret;
+}
+
+static int mdc_dma_remove(struct platform_device *pdev)
+{
+	struct mdc_dma *mdma = platform_get_drvdata(pdev);
+	struct mdc_chan *mchan, *next;
+
+	of_dma_controller_free(pdev->dev.of_node);
+	dma_async_device_unregister(&mdma->dma_dev);
+
+	list_for_each_entry_safe(mchan, next, &mdma->dma_dev.channels,
+				 vc.chan.device_node) {
+		list_del(&mchan->vc.chan.device_node);
+
+		synchronize_irq(mchan->irq);
+		devm_free_irq(&pdev->dev, mchan->irq, mchan);
+
+		tasklet_kill(&mchan->vc.task);
+	}
+
+	clk_disable_unprepare(mdma->clk);
+
+	return 0;
+}
+
+static struct platform_driver mdc_dma_driver = {
+	.driver = {
+		.name = "img-mdc-dma",
+		.of_match_table = of_match_ptr(mdc_dma_of_match),
+	},
+	.probe = mdc_dma_probe,
+	.remove = mdc_dma_remove,
+};
+module_platform_driver(mdc_dma_driver);
+
+MODULE_DESCRIPTION("IMG Multi-threaded DMA Controller (MDC) driver");
+MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
+MODULE_LICENSE("GPL v2");

+ 51 - 57
drivers/dma/imx-dma.c

@@ -230,11 +230,6 @@ static inline int is_imx1_dma(struct imxdma_engine *imxdma)
 	return imxdma->devtype == IMX1_DMA;
 	return imxdma->devtype == IMX1_DMA;
 }
 }
 
 
-static inline int is_imx21_dma(struct imxdma_engine *imxdma)
-{
-	return imxdma->devtype == IMX21_DMA;
-}
-
 static inline int is_imx27_dma(struct imxdma_engine *imxdma)
 static inline int is_imx27_dma(struct imxdma_engine *imxdma)
 {
 {
 	return imxdma->devtype == IMX27_DMA;
 	return imxdma->devtype == IMX27_DMA;
@@ -669,69 +664,67 @@ out:
 
 
 }
 }
 
 
-static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-		unsigned long arg)
+static int imxdma_terminate_all(struct dma_chan *chan)
 {
 {
 	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
 	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
-	struct dma_slave_config *dmaengine_cfg = (void *)arg;
 	struct imxdma_engine *imxdma = imxdmac->imxdma;
 	struct imxdma_engine *imxdma = imxdmac->imxdma;
 	unsigned long flags;
 	unsigned long flags;
-	unsigned int mode = 0;
-
-	switch (cmd) {
-	case DMA_TERMINATE_ALL:
-		imxdma_disable_hw(imxdmac);
-
-		spin_lock_irqsave(&imxdma->lock, flags);
-		list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
-		list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
-		spin_unlock_irqrestore(&imxdma->lock, flags);
-		return 0;
-	case DMA_SLAVE_CONFIG:
-		if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
-			imxdmac->per_address = dmaengine_cfg->src_addr;
-			imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
-			imxdmac->word_size = dmaengine_cfg->src_addr_width;
-		} else {
-			imxdmac->per_address = dmaengine_cfg->dst_addr;
-			imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
-			imxdmac->word_size = dmaengine_cfg->dst_addr_width;
-		}
 
 
-		switch (imxdmac->word_size) {
-		case DMA_SLAVE_BUSWIDTH_1_BYTE:
-			mode = IMX_DMA_MEMSIZE_8;
-			break;
-		case DMA_SLAVE_BUSWIDTH_2_BYTES:
-			mode = IMX_DMA_MEMSIZE_16;
-			break;
-		default:
-		case DMA_SLAVE_BUSWIDTH_4_BYTES:
-			mode = IMX_DMA_MEMSIZE_32;
-			break;
-		}
+	imxdma_disable_hw(imxdmac);
 
 
-		imxdmac->hw_chaining = 0;
+	spin_lock_irqsave(&imxdma->lock, flags);
+	list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
+	list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
+	spin_unlock_irqrestore(&imxdma->lock, flags);
+	return 0;
+}
 
 
-		imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
-			((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
-			CCR_REN;
-		imxdmac->ccr_to_device =
-			(IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
-			((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
-		imx_dmav1_writel(imxdma, imxdmac->dma_request,
-				 DMA_RSSR(imxdmac->channel));
+static int imxdma_config(struct dma_chan *chan,
+			 struct dma_slave_config *dmaengine_cfg)
+{
+	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
+	struct imxdma_engine *imxdma = imxdmac->imxdma;
+	unsigned int mode = 0;
 
 
-		/* Set burst length */
-		imx_dmav1_writel(imxdma, imxdmac->watermark_level *
-				imxdmac->word_size, DMA_BLR(imxdmac->channel));
+	if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
+		imxdmac->per_address = dmaengine_cfg->src_addr;
+		imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
+		imxdmac->word_size = dmaengine_cfg->src_addr_width;
+	} else {
+		imxdmac->per_address = dmaengine_cfg->dst_addr;
+		imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
+		imxdmac->word_size = dmaengine_cfg->dst_addr_width;
+	}
 
 
-		return 0;
+	switch (imxdmac->word_size) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		mode = IMX_DMA_MEMSIZE_8;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		mode = IMX_DMA_MEMSIZE_16;
+		break;
 	default:
 	default:
-		return -ENOSYS;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		mode = IMX_DMA_MEMSIZE_32;
+		break;
 	}
 	}
 
 
-	return -EINVAL;
+	imxdmac->hw_chaining = 0;
+
+	imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
+		((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
+		CCR_REN;
+	imxdmac->ccr_to_device =
+		(IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
+		((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
+	imx_dmav1_writel(imxdma, imxdmac->dma_request,
+			 DMA_RSSR(imxdmac->channel));
+
+	/* Set burst length */
+	imx_dmav1_writel(imxdma, imxdmac->watermark_level *
+			 imxdmac->word_size, DMA_BLR(imxdmac->channel));
+
+	return 0;
 }
 }
 
 
 static enum dma_status imxdma_tx_status(struct dma_chan *chan,
 static enum dma_status imxdma_tx_status(struct dma_chan *chan,
@@ -1184,7 +1177,8 @@ static int __init imxdma_probe(struct platform_device *pdev)
 	imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
 	imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
 	imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
 	imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
 	imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
 	imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
-	imxdma->dma_device.device_control = imxdma_control;
+	imxdma->dma_device.device_config = imxdma_config;
+	imxdma->dma_device.device_terminate_all = imxdma_terminate_all;
 	imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
 	imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
 
 
 	platform_set_drvdata(pdev, imxdma);
 	platform_set_drvdata(pdev, imxdma);

+ 59 - 91
drivers/dma/imx-sdma.c

@@ -830,20 +830,29 @@ static int sdma_load_context(struct sdma_channel *sdmac)
 	return ret;
 	return ret;
 }
 }
 
 
-static void sdma_disable_channel(struct sdma_channel *sdmac)
+static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
+{
+	return container_of(chan, struct sdma_channel, chan);
+}
+
+static int sdma_disable_channel(struct dma_chan *chan)
 {
 {
+	struct sdma_channel *sdmac = to_sdma_chan(chan);
 	struct sdma_engine *sdma = sdmac->sdma;
 	struct sdma_engine *sdma = sdmac->sdma;
 	int channel = sdmac->channel;
 	int channel = sdmac->channel;
 
 
 	writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
 	writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
 	sdmac->status = DMA_ERROR;
 	sdmac->status = DMA_ERROR;
+
+	return 0;
 }
 }
 
 
-static int sdma_config_channel(struct sdma_channel *sdmac)
+static int sdma_config_channel(struct dma_chan *chan)
 {
 {
+	struct sdma_channel *sdmac = to_sdma_chan(chan);
 	int ret;
 	int ret;
 
 
-	sdma_disable_channel(sdmac);
+	sdma_disable_channel(chan);
 
 
 	sdmac->event_mask[0] = 0;
 	sdmac->event_mask[0] = 0;
 	sdmac->event_mask[1] = 0;
 	sdmac->event_mask[1] = 0;
@@ -935,11 +944,6 @@ out:
 	return ret;
 	return ret;
 }
 }
 
 
-static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
-{
-	return container_of(chan, struct sdma_channel, chan);
-}
-
 static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
 static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
@@ -1004,7 +1008,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
 	struct sdma_engine *sdma = sdmac->sdma;
 	struct sdma_engine *sdma = sdmac->sdma;
 
 
-	sdma_disable_channel(sdmac);
+	sdma_disable_channel(chan);
 
 
 	if (sdmac->event_id0)
 	if (sdmac->event_id0)
 		sdma_event_disable(sdmac, sdmac->event_id0);
 		sdma_event_disable(sdmac, sdmac->event_id0);
@@ -1203,35 +1207,24 @@ err_out:
 	return NULL;
 	return NULL;
 }
 }
 
 
-static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-		unsigned long arg)
+static int sdma_config(struct dma_chan *chan,
+		       struct dma_slave_config *dmaengine_cfg)
 {
 {
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
-	struct dma_slave_config *dmaengine_cfg = (void *)arg;
-
-	switch (cmd) {
-	case DMA_TERMINATE_ALL:
-		sdma_disable_channel(sdmac);
-		return 0;
-	case DMA_SLAVE_CONFIG:
-		if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
-			sdmac->per_address = dmaengine_cfg->src_addr;
-			sdmac->watermark_level = dmaengine_cfg->src_maxburst *
-						dmaengine_cfg->src_addr_width;
-			sdmac->word_size = dmaengine_cfg->src_addr_width;
-		} else {
-			sdmac->per_address = dmaengine_cfg->dst_addr;
-			sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
-						dmaengine_cfg->dst_addr_width;
-			sdmac->word_size = dmaengine_cfg->dst_addr_width;
-		}
-		sdmac->direction = dmaengine_cfg->direction;
-		return sdma_config_channel(sdmac);
-	default:
-		return -ENOSYS;
-	}
 
 
-	return -EINVAL;
+	if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
+		sdmac->per_address = dmaengine_cfg->src_addr;
+		sdmac->watermark_level = dmaengine_cfg->src_maxburst *
+			dmaengine_cfg->src_addr_width;
+		sdmac->word_size = dmaengine_cfg->src_addr_width;
+	} else {
+		sdmac->per_address = dmaengine_cfg->dst_addr;
+		sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
+			dmaengine_cfg->dst_addr_width;
+		sdmac->word_size = dmaengine_cfg->dst_addr_width;
+	}
+	sdmac->direction = dmaengine_cfg->direction;
+	return sdma_config_channel(chan);
 }
 }
 
 
 static enum dma_status sdma_tx_status(struct dma_chan *chan,
 static enum dma_status sdma_tx_status(struct dma_chan *chan,
@@ -1303,15 +1296,15 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
 	if (header->ram_code_start + header->ram_code_size > fw->size)
 	if (header->ram_code_start + header->ram_code_size > fw->size)
 		goto err_firmware;
 		goto err_firmware;
 	switch (header->version_major) {
 	switch (header->version_major) {
-		case 1:
-			sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
-			break;
-		case 2:
-			sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
-			break;
-		default:
-			dev_err(sdma->dev, "unknown firmware version\n");
-			goto err_firmware;
+	case 1:
+		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
+		break;
+	case 2:
+		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
+		break;
+	default:
+		dev_err(sdma->dev, "unknown firmware version\n");
+		goto err_firmware;
 	}
 	}
 
 
 	addr = (void *)header + header->script_addrs_start;
 	addr = (void *)header + header->script_addrs_start;
@@ -1479,7 +1472,7 @@ static int sdma_probe(struct platform_device *pdev)
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
+	sdma = devm_kzalloc(&pdev->dev, sizeof(*sdma), GFP_KERNEL);
 	if (!sdma)
 	if (!sdma)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
@@ -1488,48 +1481,34 @@ static int sdma_probe(struct platform_device *pdev)
 	sdma->dev = &pdev->dev;
 	sdma->dev = &pdev->dev;
 	sdma->drvdata = drvdata;
 	sdma->drvdata = drvdata;
 
 
-	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	irq = platform_get_irq(pdev, 0);
 	irq = platform_get_irq(pdev, 0);
-	if (!iores || irq < 0) {
-		ret = -EINVAL;
-		goto err_irq;
-	}
+	if (irq < 0)
+		return irq;
 
 
-	if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) {
-		ret = -EBUSY;
-		goto err_request_region;
-	}
+	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	sdma->regs = devm_ioremap_resource(&pdev->dev, iores);
+	if (IS_ERR(sdma->regs))
+		return PTR_ERR(sdma->regs);
 
 
 	sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
 	sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
-	if (IS_ERR(sdma->clk_ipg)) {
-		ret = PTR_ERR(sdma->clk_ipg);
-		goto err_clk;
-	}
+	if (IS_ERR(sdma->clk_ipg))
+		return PTR_ERR(sdma->clk_ipg);
 
 
 	sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
 	sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
-	if (IS_ERR(sdma->clk_ahb)) {
-		ret = PTR_ERR(sdma->clk_ahb);
-		goto err_clk;
-	}
+	if (IS_ERR(sdma->clk_ahb))
+		return PTR_ERR(sdma->clk_ahb);
 
 
 	clk_prepare(sdma->clk_ipg);
 	clk_prepare(sdma->clk_ipg);
 	clk_prepare(sdma->clk_ahb);
 	clk_prepare(sdma->clk_ahb);
 
 
-	sdma->regs = ioremap(iores->start, resource_size(iores));
-	if (!sdma->regs) {
-		ret = -ENOMEM;
-		goto err_ioremap;
-	}
-
-	ret = request_irq(irq, sdma_int_handler, 0, "sdma", sdma);
+	ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
+			       sdma);
 	if (ret)
 	if (ret)
-		goto err_request_irq;
+		return ret;
 
 
 	sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
 	sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
-	if (!sdma->script_addrs) {
-		ret = -ENOMEM;
-		goto err_alloc;
-	}
+	if (!sdma->script_addrs)
+		return -ENOMEM;
 
 
 	/* initially no scripts available */
 	/* initially no scripts available */
 	saddr_arr = (s32 *)sdma->script_addrs;
 	saddr_arr = (s32 *)sdma->script_addrs;
@@ -1600,7 +1579,12 @@ static int sdma_probe(struct platform_device *pdev)
 	sdma->dma_device.device_tx_status = sdma_tx_status;
 	sdma->dma_device.device_tx_status = sdma_tx_status;
 	sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
 	sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
 	sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
 	sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
-	sdma->dma_device.device_control = sdma_control;
+	sdma->dma_device.device_config = sdma_config;
+	sdma->dma_device.device_terminate_all = sdma_disable_channel;
+	sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+	sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+	sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+	sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
 	sdma->dma_device.device_issue_pending = sdma_issue_pending;
 	sdma->dma_device.device_issue_pending = sdma_issue_pending;
 	sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
 	sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
 	dma_set_max_seg_size(sdma->dma_device.dev, 65535);
 	dma_set_max_seg_size(sdma->dma_device.dev, 65535);
@@ -1629,38 +1613,22 @@ err_register:
 	dma_async_device_unregister(&sdma->dma_device);
 	dma_async_device_unregister(&sdma->dma_device);
 err_init:
 err_init:
 	kfree(sdma->script_addrs);
 	kfree(sdma->script_addrs);
-err_alloc:
-	free_irq(irq, sdma);
-err_request_irq:
-	iounmap(sdma->regs);
-err_ioremap:
-err_clk:
-	release_mem_region(iores->start, resource_size(iores));
-err_request_region:
-err_irq:
-	kfree(sdma);
 	return ret;
 	return ret;
 }
 }
 
 
 static int sdma_remove(struct platform_device *pdev)
 static int sdma_remove(struct platform_device *pdev)
 {
 {
 	struct sdma_engine *sdma = platform_get_drvdata(pdev);
 	struct sdma_engine *sdma = platform_get_drvdata(pdev);
-	struct resource *iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	int irq = platform_get_irq(pdev, 0);
 	int i;
 	int i;
 
 
 	dma_async_device_unregister(&sdma->dma_device);
 	dma_async_device_unregister(&sdma->dma_device);
 	kfree(sdma->script_addrs);
 	kfree(sdma->script_addrs);
-	free_irq(irq, sdma);
-	iounmap(sdma->regs);
-	release_mem_region(iores->start, resource_size(iores));
 	/* Kill the tasklet */
 	/* Kill the tasklet */
 	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
 	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
 		struct sdma_channel *sdmac = &sdma->channel[i];
 		struct sdma_channel *sdmac = &sdma->channel[i];
 
 
 		tasklet_kill(&sdmac->tasklet);
 		tasklet_kill(&sdmac->tasklet);
 	}
 	}
-	kfree(sdma);
 
 
 	platform_set_drvdata(pdev, NULL);
 	platform_set_drvdata(pdev, NULL);
 	dev_info(&pdev->dev, "Removed...\n");
 	dev_info(&pdev->dev, "Removed...\n");

+ 6 - 19
drivers/dma/intel_mid_dma.c

@@ -492,10 +492,10 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
 	return ret;
 	return ret;
 }
 }
 
 
-static int dma_slave_control(struct dma_chan *chan, unsigned long arg)
+static int intel_mid_dma_config(struct dma_chan *chan,
+				struct dma_slave_config *slave)
 {
 {
 	struct intel_mid_dma_chan	*midc = to_intel_mid_dma_chan(chan);
 	struct intel_mid_dma_chan	*midc = to_intel_mid_dma_chan(chan);
-	struct dma_slave_config  *slave = (struct dma_slave_config *)arg;
 	struct intel_mid_dma_slave *mid_slave;
 	struct intel_mid_dma_slave *mid_slave;
 
 
 	BUG_ON(!midc);
 	BUG_ON(!midc);
@@ -509,28 +509,14 @@ static int dma_slave_control(struct dma_chan *chan, unsigned long arg)
 	midc->mid_slave = mid_slave;
 	midc->mid_slave = mid_slave;
 	return 0;
 	return 0;
 }
 }
-/**
- * intel_mid_dma_device_control -	DMA device control
- * @chan: chan for DMA control
- * @cmd: control cmd
- * @arg: cmd arg value
- *
- * Perform DMA control command
- */
-static int intel_mid_dma_device_control(struct dma_chan *chan,
-			enum dma_ctrl_cmd cmd, unsigned long arg)
+
+static int intel_mid_dma_terminate_all(struct dma_chan *chan)
 {
 {
 	struct intel_mid_dma_chan	*midc = to_intel_mid_dma_chan(chan);
 	struct intel_mid_dma_chan	*midc = to_intel_mid_dma_chan(chan);
 	struct middma_device	*mid = to_middma_device(chan->device);
 	struct middma_device	*mid = to_middma_device(chan->device);
 	struct intel_mid_dma_desc	*desc, *_desc;
 	struct intel_mid_dma_desc	*desc, *_desc;
 	union intel_mid_dma_cfg_lo cfg_lo;
 	union intel_mid_dma_cfg_lo cfg_lo;
 
 
-	if (cmd == DMA_SLAVE_CONFIG)
-		return dma_slave_control(chan, arg);
-
-	if (cmd != DMA_TERMINATE_ALL)
-		return -ENXIO;
-
 	spin_lock_bh(&midc->lock);
 	spin_lock_bh(&midc->lock);
 	if (midc->busy == false) {
 	if (midc->busy == false) {
 		spin_unlock_bh(&midc->lock);
 		spin_unlock_bh(&midc->lock);
@@ -1148,7 +1134,8 @@ static int mid_setup_dma(struct pci_dev *pdev)
 	dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy;
 	dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy;
 	dma->common.device_issue_pending = intel_mid_dma_issue_pending;
 	dma->common.device_issue_pending = intel_mid_dma_issue_pending;
 	dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg;
 	dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg;
-	dma->common.device_control = intel_mid_dma_device_control;
+	dma->common.device_config = intel_mid_dma_config;
+	dma->common.device_terminate_all = intel_mid_dma_terminate_all;
 
 
 	/*enable dma cntrl*/
 	/*enable dma cntrl*/
 	iowrite32(REG_BIT0, dma->dma_base + DMA_CFG);
 	iowrite32(REG_BIT0, dma->dma_base + DMA_CFG);

+ 22 - 3
drivers/dma/ioat/dma_v3.c

@@ -214,6 +214,11 @@ static bool is_bwd_ioat(struct pci_dev *pdev)
 	case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
 	case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
 	case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
 	case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
 	case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
 	case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
+	/* even though not Atom, BDX-DE has same DMA silicon */
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
 		return true;
 		return true;
 	default:
 	default:
 		return false;
 		return false;
@@ -489,6 +494,7 @@ static void ioat3_eh(struct ioat2_dma_chan *ioat)
 	struct ioat_chan_common *chan = &ioat->base;
 	struct ioat_chan_common *chan = &ioat->base;
 	struct pci_dev *pdev = to_pdev(chan);
 	struct pci_dev *pdev = to_pdev(chan);
 	struct ioat_dma_descriptor *hw;
 	struct ioat_dma_descriptor *hw;
+	struct dma_async_tx_descriptor *tx;
 	u64 phys_complete;
 	u64 phys_complete;
 	struct ioat_ring_ent *desc;
 	struct ioat_ring_ent *desc;
 	u32 err_handled = 0;
 	u32 err_handled = 0;
@@ -534,6 +540,16 @@ static void ioat3_eh(struct ioat2_dma_chan *ioat)
 		dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n",
 		dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n",
 			__func__, chanerr, err_handled);
 			__func__, chanerr, err_handled);
 		BUG();
 		BUG();
+	} else { /* cleanup the faulty descriptor */
+		tx = &desc->txd;
+		if (tx->cookie) {
+			dma_cookie_complete(tx);
+			dma_descriptor_unmap(tx);
+			if (tx->callback) {
+				tx->callback(tx->callback_param);
+				tx->callback = NULL;
+			}
+		}
 	}
 	}
 
 
 	writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
 	writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
@@ -1300,7 +1316,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
 
 
 	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
 	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
 
 
-	if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
+	if (tmo == 0 ||
+	    dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
 		dev_err(dev, "Self-test xor timed out\n");
 		dev_err(dev, "Self-test xor timed out\n");
 		err = -ENODEV;
 		err = -ENODEV;
 		goto dma_unmap;
 		goto dma_unmap;
@@ -1366,7 +1383,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
 
 
 	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
 	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
 
 
-	if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
+	if (tmo == 0 ||
+	    dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
 		dev_err(dev, "Self-test validate timed out\n");
 		dev_err(dev, "Self-test validate timed out\n");
 		err = -ENODEV;
 		err = -ENODEV;
 		goto dma_unmap;
 		goto dma_unmap;
@@ -1418,7 +1436,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
 
 
 	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
 	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
 
 
-	if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
+	if (tmo == 0 ||
+	    dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
 		dev_err(dev, "Self-test 2nd validate timed out\n");
 		dev_err(dev, "Self-test 2nd validate timed out\n");
 		err = -ENODEV;
 		err = -ENODEV;
 		goto dma_unmap;
 		goto dma_unmap;

+ 5 - 0
drivers/dma/ioat/hw.h

@@ -57,6 +57,11 @@
 #define PCI_DEVICE_ID_INTEL_IOAT_BWD2	0x0C52
 #define PCI_DEVICE_ID_INTEL_IOAT_BWD2	0x0C52
 #define PCI_DEVICE_ID_INTEL_IOAT_BWD3	0x0C53
 #define PCI_DEVICE_ID_INTEL_IOAT_BWD3	0x0C53
 
 
+#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE0	0x6f50
+#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE1	0x6f51
+#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE2	0x6f52
+#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE3	0x6f53
+
 #define IOAT_VER_1_2            0x12    /* Version 1.2 */
 #define IOAT_VER_1_2            0x12    /* Version 1.2 */
 #define IOAT_VER_2_0            0x20    /* Version 2.0 */
 #define IOAT_VER_2_0            0x20    /* Version 2.0 */
 #define IOAT_VER_3_0            0x30    /* Version 3.0 */
 #define IOAT_VER_3_0            0x30    /* Version 3.0 */

+ 5 - 0
drivers/dma/ioat/pci.c

@@ -111,6 +111,11 @@ static struct pci_device_id ioat_pci_tbl[] = {
 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) },
 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) },
 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) },
 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) },
 
 
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) },
+
 	{ 0, }
 	{ 0, }
 };
 };
 MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
 MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);

+ 51 - 45
drivers/dma/ipu/ipu_idmac.c

@@ -1398,76 +1398,81 @@ static void idmac_issue_pending(struct dma_chan *chan)
 	 */
 	 */
 }
 }
 
 
-static int __idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-			   unsigned long arg)
+static int idmac_pause(struct dma_chan *chan)
 {
 {
 	struct idmac_channel *ichan = to_idmac_chan(chan);
 	struct idmac_channel *ichan = to_idmac_chan(chan);
 	struct idmac *idmac = to_idmac(chan->device);
 	struct idmac *idmac = to_idmac(chan->device);
 	struct ipu *ipu = to_ipu(idmac);
 	struct ipu *ipu = to_ipu(idmac);
 	struct list_head *list, *tmp;
 	struct list_head *list, *tmp;
 	unsigned long flags;
 	unsigned long flags;
-	int i;
 
 
-	switch (cmd) {
-	case DMA_PAUSE:
-		spin_lock_irqsave(&ipu->lock, flags);
-		ipu_ic_disable_task(ipu, chan->chan_id);
+	mutex_lock(&ichan->chan_mutex);
 
 
-		/* Return all descriptors into "prepared" state */
-		list_for_each_safe(list, tmp, &ichan->queue)
-			list_del_init(list);
+	spin_lock_irqsave(&ipu->lock, flags);
+	ipu_ic_disable_task(ipu, chan->chan_id);
 
 
-		ichan->sg[0] = NULL;
-		ichan->sg[1] = NULL;
+	/* Return all descriptors into "prepared" state */
+	list_for_each_safe(list, tmp, &ichan->queue)
+		list_del_init(list);
 
 
-		spin_unlock_irqrestore(&ipu->lock, flags);
+	ichan->sg[0] = NULL;
+	ichan->sg[1] = NULL;
 
 
-		ichan->status = IPU_CHANNEL_INITIALIZED;
-		break;
-	case DMA_TERMINATE_ALL:
-		ipu_disable_channel(idmac, ichan,
-				    ichan->status >= IPU_CHANNEL_ENABLED);
+	spin_unlock_irqrestore(&ipu->lock, flags);
 
 
-		tasklet_disable(&ipu->tasklet);
+	ichan->status = IPU_CHANNEL_INITIALIZED;
 
 
-		/* ichan->queue is modified in ISR, have to spinlock */
-		spin_lock_irqsave(&ichan->lock, flags);
-		list_splice_init(&ichan->queue, &ichan->free_list);
+	mutex_unlock(&ichan->chan_mutex);
 
 
-		if (ichan->desc)
-			for (i = 0; i < ichan->n_tx_desc; i++) {
-				struct idmac_tx_desc *desc = ichan->desc + i;
-				if (list_empty(&desc->list))
-					/* Descriptor was prepared, but not submitted */
-					list_add(&desc->list, &ichan->free_list);
+	return 0;
+}
 
 
-				async_tx_clear_ack(&desc->txd);
-			}
+static int __idmac_terminate_all(struct dma_chan *chan)
+{
+	struct idmac_channel *ichan = to_idmac_chan(chan);
+	struct idmac *idmac = to_idmac(chan->device);
+	struct ipu *ipu = to_ipu(idmac);
+	unsigned long flags;
+	int i;
 
 
-		ichan->sg[0] = NULL;
-		ichan->sg[1] = NULL;
-		spin_unlock_irqrestore(&ichan->lock, flags);
+	ipu_disable_channel(idmac, ichan,
+			    ichan->status >= IPU_CHANNEL_ENABLED);
 
 
-		tasklet_enable(&ipu->tasklet);
+	tasklet_disable(&ipu->tasklet);
 
 
-		ichan->status = IPU_CHANNEL_INITIALIZED;
-		break;
-	default:
-		return -ENOSYS;
-	}
+	/* ichan->queue is modified in ISR, have to spinlock */
+	spin_lock_irqsave(&ichan->lock, flags);
+	list_splice_init(&ichan->queue, &ichan->free_list);
+
+	if (ichan->desc)
+		for (i = 0; i < ichan->n_tx_desc; i++) {
+			struct idmac_tx_desc *desc = ichan->desc + i;
+			if (list_empty(&desc->list))
+				/* Descriptor was prepared, but not submitted */
+				list_add(&desc->list, &ichan->free_list);
+
+			async_tx_clear_ack(&desc->txd);
+		}
+
+	ichan->sg[0] = NULL;
+	ichan->sg[1] = NULL;
+	spin_unlock_irqrestore(&ichan->lock, flags);
+
+	tasklet_enable(&ipu->tasklet);
+
+	ichan->status = IPU_CHANNEL_INITIALIZED;
 
 
 	return 0;
 	return 0;
 }
 }
 
 
-static int idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-			 unsigned long arg)
+static int idmac_terminate_all(struct dma_chan *chan)
 {
 {
 	struct idmac_channel *ichan = to_idmac_chan(chan);
 	struct idmac_channel *ichan = to_idmac_chan(chan);
 	int ret;
 	int ret;
 
 
 	mutex_lock(&ichan->chan_mutex);
 	mutex_lock(&ichan->chan_mutex);
 
 
-	ret = __idmac_control(chan, cmd, arg);
+	ret = __idmac_terminate_all(chan);
 
 
 	mutex_unlock(&ichan->chan_mutex);
 	mutex_unlock(&ichan->chan_mutex);
 
 
@@ -1568,7 +1573,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
 
 
 	mutex_lock(&ichan->chan_mutex);
 	mutex_lock(&ichan->chan_mutex);
 
 
-	__idmac_control(chan, DMA_TERMINATE_ALL, 0);
+	__idmac_terminate_all(chan);
 
 
 	if (ichan->status > IPU_CHANNEL_FREE) {
 	if (ichan->status > IPU_CHANNEL_FREE) {
 #ifdef DEBUG
 #ifdef DEBUG
@@ -1622,7 +1627,8 @@ static int __init ipu_idmac_init(struct ipu *ipu)
 
 
 	/* Compulsory for DMA_SLAVE fields */
 	/* Compulsory for DMA_SLAVE fields */
 	dma->device_prep_slave_sg		= idmac_prep_slave_sg;
 	dma->device_prep_slave_sg		= idmac_prep_slave_sg;
-	dma->device_control			= idmac_control;
+	dma->device_pause			= idmac_pause;
+	dma->device_terminate_all		= idmac_terminate_all;
 
 
 	INIT_LIST_HEAD(&dma->channels);
 	INIT_LIST_HEAD(&dma->channels);
 	for (i = 0; i < IPU_CHANNELS_NUM; i++) {
 	for (i = 0; i < IPU_CHANNELS_NUM; i++) {
@@ -1655,7 +1661,7 @@ static void ipu_idmac_exit(struct ipu *ipu)
 	for (i = 0; i < IPU_CHANNELS_NUM; i++) {
 	for (i = 0; i < IPU_CHANNELS_NUM; i++) {
 		struct idmac_channel *ichan = ipu->channel + i;
 		struct idmac_channel *ichan = ipu->channel + i;
 
 
-		idmac_control(&ichan->dma_chan, DMA_TERMINATE_ALL, 0);
+		idmac_terminate_all(&ichan->dma_chan);
 	}
 	}
 
 
 	dma_async_device_unregister(&idmac->dma);
 	dma_async_device_unregister(&idmac->dma);

+ 110 - 93
drivers/dma/k3dma.c

@@ -441,7 +441,7 @@ static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
 	num = 0;
 	num = 0;
 
 
 	if (!c->ccfg) {
 	if (!c->ccfg) {
-		/* default is memtomem, without calling device_control */
+		/* default is memtomem, without calling device_config */
 		c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
 		c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
 		c->ccfg |= (0xf << 20) | (0xf << 24);	/* burst = 16 */
 		c->ccfg |= (0xf << 20) | (0xf << 24);	/* burst = 16 */
 		c->ccfg |= (0x3 << 12) | (0x3 << 16);	/* width = 64 bit */
 		c->ccfg |= (0x3 << 12) | (0x3 << 16);	/* width = 64 bit */
@@ -523,112 +523,126 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
 	return vchan_tx_prep(&c->vc, &ds->vd, flags);
 	return vchan_tx_prep(&c->vc, &ds->vd, flags);
 }
 }
 
 
-static int k3_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-	unsigned long arg)
+static int k3_dma_config(struct dma_chan *chan,
+			 struct dma_slave_config *cfg)
+{
+	struct k3_dma_chan *c = to_k3_chan(chan);
+	u32 maxburst = 0, val = 0;
+	enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+
+	if (cfg == NULL)
+		return -EINVAL;
+	c->dir = cfg->direction;
+	if (c->dir == DMA_DEV_TO_MEM) {
+		c->ccfg = CX_CFG_DSTINCR;
+		c->dev_addr = cfg->src_addr;
+		maxburst = cfg->src_maxburst;
+		width = cfg->src_addr_width;
+	} else if (c->dir == DMA_MEM_TO_DEV) {
+		c->ccfg = CX_CFG_SRCINCR;
+		c->dev_addr = cfg->dst_addr;
+		maxburst = cfg->dst_maxburst;
+		width = cfg->dst_addr_width;
+	}
+	switch (width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+	case DMA_SLAVE_BUSWIDTH_8_BYTES:
+		val =  __ffs(width);
+		break;
+	default:
+		val = 3;
+		break;
+	}
+	c->ccfg |= (val << 12) | (val << 16);
+
+	if ((maxburst == 0) || (maxburst > 16))
+		val = 16;
+	else
+		val = maxburst - 1;
+	c->ccfg |= (val << 20) | (val << 24);
+	c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
+
+	/* specific request line */
+	c->ccfg |= c->vc.chan.chan_id << 4;
+
+	return 0;
+}
+
+static int k3_dma_terminate_all(struct dma_chan *chan)
 {
 {
 	struct k3_dma_chan *c = to_k3_chan(chan);
 	struct k3_dma_chan *c = to_k3_chan(chan);
 	struct k3_dma_dev *d = to_k3_dma(chan->device);
 	struct k3_dma_dev *d = to_k3_dma(chan->device);
-	struct dma_slave_config *cfg = (void *)arg;
 	struct k3_dma_phy *p = c->phy;
 	struct k3_dma_phy *p = c->phy;
 	unsigned long flags;
 	unsigned long flags;
-	u32 maxburst = 0, val = 0;
-	enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
 	LIST_HEAD(head);
 	LIST_HEAD(head);
 
 
-	switch (cmd) {
-	case DMA_SLAVE_CONFIG:
-		if (cfg == NULL)
-			return -EINVAL;
-		c->dir = cfg->direction;
-		if (c->dir == DMA_DEV_TO_MEM) {
-			c->ccfg = CX_CFG_DSTINCR;
-			c->dev_addr = cfg->src_addr;
-			maxburst = cfg->src_maxburst;
-			width = cfg->src_addr_width;
-		} else if (c->dir == DMA_MEM_TO_DEV) {
-			c->ccfg = CX_CFG_SRCINCR;
-			c->dev_addr = cfg->dst_addr;
-			maxburst = cfg->dst_maxburst;
-			width = cfg->dst_addr_width;
-		}
-		switch (width) {
-		case DMA_SLAVE_BUSWIDTH_1_BYTE:
-		case DMA_SLAVE_BUSWIDTH_2_BYTES:
-		case DMA_SLAVE_BUSWIDTH_4_BYTES:
-		case DMA_SLAVE_BUSWIDTH_8_BYTES:
-			val =  __ffs(width);
-			break;
-		default:
-			val = 3;
-			break;
-		}
-		c->ccfg |= (val << 12) | (val << 16);
+	dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
 
 
-		if ((maxburst == 0) || (maxburst > 16))
-			val = 16;
-		else
-			val = maxburst - 1;
-		c->ccfg |= (val << 20) | (val << 24);
-		c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
+	/* Prevent this channel being scheduled */
+	spin_lock(&d->lock);
+	list_del_init(&c->node);
+	spin_unlock(&d->lock);
 
 
-		/* specific request line */
-		c->ccfg |= c->vc.chan.chan_id << 4;
-		break;
+	/* Clear the tx descriptor lists */
+	spin_lock_irqsave(&c->vc.lock, flags);
+	vchan_get_all_descriptors(&c->vc, &head);
+	if (p) {
+		/* vchan is assigned to a pchan - stop the channel */
+		k3_dma_terminate_chan(p, d);
+		c->phy = NULL;
+		p->vchan = NULL;
+		p->ds_run = p->ds_done = NULL;
+	}
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+	vchan_dma_desc_free_list(&c->vc, &head);
 
 
-	case DMA_TERMINATE_ALL:
-		dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
+	return 0;
+}
 
 
-		/* Prevent this channel being scheduled */
-		spin_lock(&d->lock);
-		list_del_init(&c->node);
-		spin_unlock(&d->lock);
+static int k3_dma_transfer_pause(struct dma_chan *chan)
+{
+	struct k3_dma_chan *c = to_k3_chan(chan);
+	struct k3_dma_dev *d = to_k3_dma(chan->device);
+	struct k3_dma_phy *p = c->phy;
 
 
-		/* Clear the tx descriptor lists */
-		spin_lock_irqsave(&c->vc.lock, flags);
-		vchan_get_all_descriptors(&c->vc, &head);
+	dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
+	if (c->status == DMA_IN_PROGRESS) {
+		c->status = DMA_PAUSED;
 		if (p) {
 		if (p) {
-			/* vchan is assigned to a pchan - stop the channel */
-			k3_dma_terminate_chan(p, d);
-			c->phy = NULL;
-			p->vchan = NULL;
-			p->ds_run = p->ds_done = NULL;
+			k3_dma_pause_dma(p, false);
+		} else {
+			spin_lock(&d->lock);
+			list_del_init(&c->node);
+			spin_unlock(&d->lock);
 		}
 		}
-		spin_unlock_irqrestore(&c->vc.lock, flags);
-		vchan_dma_desc_free_list(&c->vc, &head);
-		break;
+	}
 
 
-	case DMA_PAUSE:
-		dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
-		if (c->status == DMA_IN_PROGRESS) {
-			c->status = DMA_PAUSED;
-			if (p) {
-				k3_dma_pause_dma(p, false);
-			} else {
-				spin_lock(&d->lock);
-				list_del_init(&c->node);
-				spin_unlock(&d->lock);
-			}
-		}
-		break;
+	return 0;
+}
 
 
-	case DMA_RESUME:
-		dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
-		spin_lock_irqsave(&c->vc.lock, flags);
-		if (c->status == DMA_PAUSED) {
-			c->status = DMA_IN_PROGRESS;
-			if (p) {
-				k3_dma_pause_dma(p, true);
-			} else if (!list_empty(&c->vc.desc_issued)) {
-				spin_lock(&d->lock);
-				list_add_tail(&c->node, &d->chan_pending);
-				spin_unlock(&d->lock);
-			}
+static int k3_dma_transfer_resume(struct dma_chan *chan)
+{
+	struct k3_dma_chan *c = to_k3_chan(chan);
+	struct k3_dma_dev *d = to_k3_dma(chan->device);
+	struct k3_dma_phy *p = c->phy;
+	unsigned long flags;
+
+	dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
+	spin_lock_irqsave(&c->vc.lock, flags);
+	if (c->status == DMA_PAUSED) {
+		c->status = DMA_IN_PROGRESS;
+		if (p) {
+			k3_dma_pause_dma(p, true);
+		} else if (!list_empty(&c->vc.desc_issued)) {
+			spin_lock(&d->lock);
+			list_add_tail(&c->node, &d->chan_pending);
+			spin_unlock(&d->lock);
 		}
 		}
-		spin_unlock_irqrestore(&c->vc.lock, flags);
-		break;
-	default:
-		return -ENXIO;
 	}
 	}
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -720,7 +734,10 @@ static int k3_dma_probe(struct platform_device *op)
 	d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
 	d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
 	d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
 	d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
 	d->slave.device_issue_pending = k3_dma_issue_pending;
 	d->slave.device_issue_pending = k3_dma_issue_pending;
-	d->slave.device_control = k3_dma_control;
+	d->slave.device_config = k3_dma_config;
+	d->slave.device_pause = k3_dma_transfer_pause;
+	d->slave.device_resume = k3_dma_transfer_resume;
+	d->slave.device_terminate_all = k3_dma_terminate_all;
 	d->slave.copy_align = DMA_ALIGN;
 	d->slave.copy_align = DMA_ALIGN;
 
 
 	/* init virtual channel */
 	/* init virtual channel */
@@ -787,7 +804,7 @@ static int k3_dma_remove(struct platform_device *op)
 }
 }
 
 
 #ifdef CONFIG_PM_SLEEP
 #ifdef CONFIG_PM_SLEEP
-static int k3_dma_suspend(struct device *dev)
+static int k3_dma_suspend_dev(struct device *dev)
 {
 {
 	struct k3_dma_dev *d = dev_get_drvdata(dev);
 	struct k3_dma_dev *d = dev_get_drvdata(dev);
 	u32 stat = 0;
 	u32 stat = 0;
@@ -803,7 +820,7 @@ static int k3_dma_suspend(struct device *dev)
 	return 0;
 	return 0;
 }
 }
 
 
-static int k3_dma_resume(struct device *dev)
+static int k3_dma_resume_dev(struct device *dev)
 {
 {
 	struct k3_dma_dev *d = dev_get_drvdata(dev);
 	struct k3_dma_dev *d = dev_get_drvdata(dev);
 	int ret = 0;
 	int ret = 0;
@@ -818,7 +835,7 @@ static int k3_dma_resume(struct device *dev)
 }
 }
 #endif
 #endif
 
 
-static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume);
+static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend_dev, k3_dma_resume_dev);
 
 
 static struct platform_driver k3_pdma_driver = {
 static struct platform_driver k3_pdma_driver = {
 	.driver		= {
 	.driver		= {

+ 56 - 53
drivers/dma/mmp_pdma.c

@@ -683,68 +683,70 @@ fail:
 	return NULL;
 	return NULL;
 }
 }
 
 
-static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
-			    unsigned long arg)
+static int mmp_pdma_config(struct dma_chan *dchan,
+			   struct dma_slave_config *cfg)
 {
 {
 	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
 	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
-	struct dma_slave_config *cfg = (void *)arg;
-	unsigned long flags;
 	u32 maxburst = 0, addr = 0;
 	u32 maxburst = 0, addr = 0;
 	enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
 	enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
 
 
 	if (!dchan)
 	if (!dchan)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	switch (cmd) {
-	case DMA_TERMINATE_ALL:
-		disable_chan(chan->phy);
-		mmp_pdma_free_phy(chan);
-		spin_lock_irqsave(&chan->desc_lock, flags);
-		mmp_pdma_free_desc_list(chan, &chan->chain_pending);
-		mmp_pdma_free_desc_list(chan, &chan->chain_running);
-		spin_unlock_irqrestore(&chan->desc_lock, flags);
-		chan->idle = true;
-		break;
-	case DMA_SLAVE_CONFIG:
-		if (cfg->direction == DMA_DEV_TO_MEM) {
-			chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
-			maxburst = cfg->src_maxburst;
-			width = cfg->src_addr_width;
-			addr = cfg->src_addr;
-		} else if (cfg->direction == DMA_MEM_TO_DEV) {
-			chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
-			maxburst = cfg->dst_maxburst;
-			width = cfg->dst_addr_width;
-			addr = cfg->dst_addr;
-		}
-
-		if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
-			chan->dcmd |= DCMD_WIDTH1;
-		else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
-			chan->dcmd |= DCMD_WIDTH2;
-		else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
-			chan->dcmd |= DCMD_WIDTH4;
-
-		if (maxburst == 8)
-			chan->dcmd |= DCMD_BURST8;
-		else if (maxburst == 16)
-			chan->dcmd |= DCMD_BURST16;
-		else if (maxburst == 32)
-			chan->dcmd |= DCMD_BURST32;
-
-		chan->dir = cfg->direction;
-		chan->dev_addr = addr;
-		/* FIXME: drivers should be ported over to use the filter
-		 * function. Once that's done, the following two lines can
-		 * be removed.
-		 */
-		if (cfg->slave_id)
-			chan->drcmr = cfg->slave_id;
-		break;
-	default:
-		return -ENOSYS;
+	if (cfg->direction == DMA_DEV_TO_MEM) {
+		chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
+		maxburst = cfg->src_maxburst;
+		width = cfg->src_addr_width;
+		addr = cfg->src_addr;
+	} else if (cfg->direction == DMA_MEM_TO_DEV) {
+		chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
+		maxburst = cfg->dst_maxburst;
+		width = cfg->dst_addr_width;
+		addr = cfg->dst_addr;
 	}
 	}
 
 
+	if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
+		chan->dcmd |= DCMD_WIDTH1;
+	else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
+		chan->dcmd |= DCMD_WIDTH2;
+	else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
+		chan->dcmd |= DCMD_WIDTH4;
+
+	if (maxburst == 8)
+		chan->dcmd |= DCMD_BURST8;
+	else if (maxburst == 16)
+		chan->dcmd |= DCMD_BURST16;
+	else if (maxburst == 32)
+		chan->dcmd |= DCMD_BURST32;
+
+	chan->dir = cfg->direction;
+	chan->dev_addr = addr;
+	/* FIXME: drivers should be ported over to use the filter
+	 * function. Once that's done, the following two lines can
+	 * be removed.
+	 */
+	if (cfg->slave_id)
+		chan->drcmr = cfg->slave_id;
+
+	return 0;
+}
+
+static int mmp_pdma_terminate_all(struct dma_chan *dchan)
+{
+	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+	unsigned long flags;
+
+	if (!dchan)
+		return -EINVAL;
+
+	disable_chan(chan->phy);
+	mmp_pdma_free_phy(chan);
+	spin_lock_irqsave(&chan->desc_lock, flags);
+	mmp_pdma_free_desc_list(chan, &chan->chain_pending);
+	mmp_pdma_free_desc_list(chan, &chan->chain_running);
+	spin_unlock_irqrestore(&chan->desc_lock, flags);
+	chan->idle = true;
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -1061,7 +1063,8 @@ static int mmp_pdma_probe(struct platform_device *op)
 	pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
 	pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
 	pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic;
 	pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic;
 	pdev->device.device_issue_pending = mmp_pdma_issue_pending;
 	pdev->device.device_issue_pending = mmp_pdma_issue_pending;
-	pdev->device.device_control = mmp_pdma_control;
+	pdev->device.device_config = mmp_pdma_config;
+	pdev->device.device_terminate_all = mmp_pdma_terminate_all;
 	pdev->device.copy_align = PDMA_ALIGNMENT;
 	pdev->device.copy_align = PDMA_ALIGNMENT;
 
 
 	if (pdev->dev->coherent_dma_mask)
 	if (pdev->dev->coherent_dma_mask)

+ 46 - 39
drivers/dma/mmp_tdma.c

@@ -19,7 +19,6 @@
 #include <linux/dmaengine.h>
 #include <linux/dmaengine.h>
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
 #include <linux/device.h>
 #include <linux/device.h>
-#include <mach/regs-icu.h>
 #include <linux/platform_data/dma-mmp_tdma.h>
 #include <linux/platform_data/dma-mmp_tdma.h>
 #include <linux/of_device.h>
 #include <linux/of_device.h>
 #include <linux/of_dma.h>
 #include <linux/of_dma.h>
@@ -164,33 +163,46 @@ static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac)
 	tdmac->status = DMA_IN_PROGRESS;
 	tdmac->status = DMA_IN_PROGRESS;
 }
 }
 
 
-static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac)
+static int mmp_tdma_disable_chan(struct dma_chan *chan)
 {
 {
+	struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
+
 	writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
 	writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
 					tdmac->reg_base + TDCR);
 					tdmac->reg_base + TDCR);
 
 
 	tdmac->status = DMA_COMPLETE;
 	tdmac->status = DMA_COMPLETE;
+
+	return 0;
 }
 }
 
 
-static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac)
+static int mmp_tdma_resume_chan(struct dma_chan *chan)
 {
 {
+	struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
+
 	writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN,
 	writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN,
 					tdmac->reg_base + TDCR);
 					tdmac->reg_base + TDCR);
 	tdmac->status = DMA_IN_PROGRESS;
 	tdmac->status = DMA_IN_PROGRESS;
+
+	return 0;
 }
 }
 
 
-static void mmp_tdma_pause_chan(struct mmp_tdma_chan *tdmac)
+static int mmp_tdma_pause_chan(struct dma_chan *chan)
 {
 {
+	struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
+
 	writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
 	writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
 					tdmac->reg_base + TDCR);
 					tdmac->reg_base + TDCR);
 	tdmac->status = DMA_PAUSED;
 	tdmac->status = DMA_PAUSED;
+
+	return 0;
 }
 }
 
 
-static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac)
+static int mmp_tdma_config_chan(struct dma_chan *chan)
 {
 {
+	struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
 	unsigned int tdcr = 0;
 	unsigned int tdcr = 0;
 
 
-	mmp_tdma_disable_chan(tdmac);
+	mmp_tdma_disable_chan(chan);
 
 
 	if (tdmac->dir == DMA_MEM_TO_DEV)
 	if (tdmac->dir == DMA_MEM_TO_DEV)
 		tdcr = TDCR_DSTDIR_ADDR_HOLD | TDCR_SRCDIR_ADDR_INC;
 		tdcr = TDCR_DSTDIR_ADDR_HOLD | TDCR_SRCDIR_ADDR_INC;
@@ -452,42 +464,34 @@ err_out:
 	return NULL;
 	return NULL;
 }
 }
 
 
-static int mmp_tdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-		unsigned long arg)
+static int mmp_tdma_terminate_all(struct dma_chan *chan)
 {
 {
 	struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
 	struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
-	struct dma_slave_config *dmaengine_cfg = (void *)arg;
-	int ret = 0;
-
-	switch (cmd) {
-	case DMA_TERMINATE_ALL:
-		mmp_tdma_disable_chan(tdmac);
-		/* disable interrupt */
-		mmp_tdma_enable_irq(tdmac, false);
-		break;
-	case DMA_PAUSE:
-		mmp_tdma_pause_chan(tdmac);
-		break;
-	case DMA_RESUME:
-		mmp_tdma_resume_chan(tdmac);
-		break;
-	case DMA_SLAVE_CONFIG:
-		if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
-			tdmac->dev_addr = dmaengine_cfg->src_addr;
-			tdmac->burst_sz = dmaengine_cfg->src_maxburst;
-			tdmac->buswidth = dmaengine_cfg->src_addr_width;
-		} else {
-			tdmac->dev_addr = dmaengine_cfg->dst_addr;
-			tdmac->burst_sz = dmaengine_cfg->dst_maxburst;
-			tdmac->buswidth = dmaengine_cfg->dst_addr_width;
-		}
-		tdmac->dir = dmaengine_cfg->direction;
-		return mmp_tdma_config_chan(tdmac);
-	default:
-		ret = -ENOSYS;
+
+	mmp_tdma_disable_chan(chan);
+	/* disable interrupt */
+	mmp_tdma_enable_irq(tdmac, false);
+
+	return 0;
+}
+
+static int mmp_tdma_config(struct dma_chan *chan,
+			   struct dma_slave_config *dmaengine_cfg)
+{
+	struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
+
+	if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
+		tdmac->dev_addr = dmaengine_cfg->src_addr;
+		tdmac->burst_sz = dmaengine_cfg->src_maxburst;
+		tdmac->buswidth = dmaengine_cfg->src_addr_width;
+	} else {
+		tdmac->dev_addr = dmaengine_cfg->dst_addr;
+		tdmac->burst_sz = dmaengine_cfg->dst_maxburst;
+		tdmac->buswidth = dmaengine_cfg->dst_addr_width;
 	}
 	}
+	tdmac->dir = dmaengine_cfg->direction;
 
 
-	return ret;
+	return mmp_tdma_config_chan(chan);
 }
 }
 
 
 static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan,
 static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan,
@@ -668,7 +672,10 @@ static int mmp_tdma_probe(struct platform_device *pdev)
 	tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic;
 	tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic;
 	tdev->device.device_tx_status = mmp_tdma_tx_status;
 	tdev->device.device_tx_status = mmp_tdma_tx_status;
 	tdev->device.device_issue_pending = mmp_tdma_issue_pending;
 	tdev->device.device_issue_pending = mmp_tdma_issue_pending;
-	tdev->device.device_control = mmp_tdma_control;
+	tdev->device.device_config = mmp_tdma_config;
+	tdev->device.device_pause = mmp_tdma_pause_chan;
+	tdev->device.device_resume = mmp_tdma_resume_chan;
+	tdev->device.device_terminate_all = mmp_tdma_terminate_all;
 	tdev->device.copy_align = TDMA_ALIGNMENT;
 	tdev->device.copy_align = TDMA_ALIGNMENT;
 
 
 	dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
 	dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));

+ 2 - 23
drivers/dma/moxart-dma.c

@@ -263,28 +263,6 @@ static int moxart_slave_config(struct dma_chan *chan,
 	return 0;
 	return 0;
 }
 }
 
 
-static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-			  unsigned long arg)
-{
-	int ret = 0;
-
-	switch (cmd) {
-	case DMA_PAUSE:
-	case DMA_RESUME:
-		return -EINVAL;
-	case DMA_TERMINATE_ALL:
-		moxart_terminate_all(chan);
-		break;
-	case DMA_SLAVE_CONFIG:
-		ret = moxart_slave_config(chan, (struct dma_slave_config *)arg);
-		break;
-	default:
-		ret = -ENOSYS;
-	}
-
-	return ret;
-}
-
 static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
 static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
 	struct dma_chan *chan, struct scatterlist *sgl,
 	struct dma_chan *chan, struct scatterlist *sgl,
 	unsigned int sg_len, enum dma_transfer_direction dir,
 	unsigned int sg_len, enum dma_transfer_direction dir,
@@ -531,7 +509,8 @@ static void moxart_dma_init(struct dma_device *dma, struct device *dev)
 	dma->device_free_chan_resources		= moxart_free_chan_resources;
 	dma->device_free_chan_resources		= moxart_free_chan_resources;
 	dma->device_issue_pending		= moxart_issue_pending;
 	dma->device_issue_pending		= moxart_issue_pending;
 	dma->device_tx_status			= moxart_tx_status;
 	dma->device_tx_status			= moxart_tx_status;
-	dma->device_control			= moxart_control;
+	dma->device_config			= moxart_slave_config;
+	dma->device_terminate_all		= moxart_terminate_all;
 	dma->dev				= dev;
 	dma->dev				= dev;
 
 
 	INIT_LIST_HEAD(&dma->channels);
 	INIT_LIST_HEAD(&dma->channels);

+ 51 - 60
drivers/dma/mpc512x_dma.c

@@ -800,79 +800,69 @@ err_prep:
 	return NULL;
 	return NULL;
 }
 }
 
 
-static int mpc_dma_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-							unsigned long arg)
+static int mpc_dma_device_config(struct dma_chan *chan,
+				 struct dma_slave_config *cfg)
 {
 {
-	struct mpc_dma_chan *mchan;
-	struct mpc_dma *mdma;
-	struct dma_slave_config *cfg;
+	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
 	unsigned long flags;
 	unsigned long flags;
 
 
-	mchan = dma_chan_to_mpc_dma_chan(chan);
-	switch (cmd) {
-	case DMA_TERMINATE_ALL:
-		/* Disable channel requests */
-		mdma = dma_chan_to_mpc_dma(chan);
-
-		spin_lock_irqsave(&mchan->lock, flags);
-
-		out_8(&mdma->regs->dmacerq, chan->chan_id);
-		list_splice_tail_init(&mchan->prepared, &mchan->free);
-		list_splice_tail_init(&mchan->queued, &mchan->free);
-		list_splice_tail_init(&mchan->active, &mchan->free);
-
-		spin_unlock_irqrestore(&mchan->lock, flags);
+	/*
+	 * Software constraints:
+	 *  - only transfers between a peripheral device and
+	 *     memory are supported;
+	 *  - only peripheral devices with 4-byte FIFO access register
+	 *     are supported;
+	 *  - minimal transfer chunk is 4 bytes and consequently
+	 *     source and destination addresses must be 4-byte aligned
+	 *     and transfer size must be aligned on (4 * maxburst)
+	 *     boundary;
+	 *  - during the transfer RAM address is being incremented by
+	 *     the size of minimal transfer chunk;
+	 *  - peripheral port's address is constant during the transfer.
+	 */
 
 
-		return 0;
+	if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
+	    cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
+	    !IS_ALIGNED(cfg->src_addr, 4) ||
+	    !IS_ALIGNED(cfg->dst_addr, 4)) {
+		return -EINVAL;
+	}
 
 
-	case DMA_SLAVE_CONFIG:
-		/*
-		 * Software constraints:
-		 *  - only transfers between a peripheral device and
-		 *     memory are supported;
-		 *  - only peripheral devices with 4-byte FIFO access register
-		 *     are supported;
-		 *  - minimal transfer chunk is 4 bytes and consequently
-		 *     source and destination addresses must be 4-byte aligned
-		 *     and transfer size must be aligned on (4 * maxburst)
-		 *     boundary;
-		 *  - during the transfer RAM address is being incremented by
-		 *     the size of minimal transfer chunk;
-		 *  - peripheral port's address is constant during the transfer.
-		 */
+	spin_lock_irqsave(&mchan->lock, flags);
 
 
-		cfg = (void *)arg;
+	mchan->src_per_paddr = cfg->src_addr;
+	mchan->src_tcd_nunits = cfg->src_maxburst;
+	mchan->dst_per_paddr = cfg->dst_addr;
+	mchan->dst_tcd_nunits = cfg->dst_maxburst;
 
 
-		if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
-		    cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
-		    !IS_ALIGNED(cfg->src_addr, 4) ||
-		    !IS_ALIGNED(cfg->dst_addr, 4)) {
-			return -EINVAL;
-		}
+	/* Apply defaults */
+	if (mchan->src_tcd_nunits == 0)
+		mchan->src_tcd_nunits = 1;
+	if (mchan->dst_tcd_nunits == 0)
+		mchan->dst_tcd_nunits = 1;
 
 
-		spin_lock_irqsave(&mchan->lock, flags);
+	spin_unlock_irqrestore(&mchan->lock, flags);
 
 
-		mchan->src_per_paddr = cfg->src_addr;
-		mchan->src_tcd_nunits = cfg->src_maxburst;
-		mchan->dst_per_paddr = cfg->dst_addr;
-		mchan->dst_tcd_nunits = cfg->dst_maxburst;
+	return 0;
+}
 
 
-		/* Apply defaults */
-		if (mchan->src_tcd_nunits == 0)
-			mchan->src_tcd_nunits = 1;
-		if (mchan->dst_tcd_nunits == 0)
-			mchan->dst_tcd_nunits = 1;
+static int mpc_dma_device_terminate_all(struct dma_chan *chan)
+{
+	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
+	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
+	unsigned long flags;
 
 
-		spin_unlock_irqrestore(&mchan->lock, flags);
+	/* Disable channel requests */
+	spin_lock_irqsave(&mchan->lock, flags);
 
 
-		return 0;
+	out_8(&mdma->regs->dmacerq, chan->chan_id);
+	list_splice_tail_init(&mchan->prepared, &mchan->free);
+	list_splice_tail_init(&mchan->queued, &mchan->free);
+	list_splice_tail_init(&mchan->active, &mchan->free);
 
 
-	default:
-		/* Unknown command */
-		break;
-	}
+	spin_unlock_irqrestore(&mchan->lock, flags);
 
 
-	return -ENXIO;
+	return 0;
 }
 }
 
 
 static int mpc_dma_probe(struct platform_device *op)
 static int mpc_dma_probe(struct platform_device *op)
@@ -963,7 +953,8 @@ static int mpc_dma_probe(struct platform_device *op)
 	dma->device_tx_status = mpc_dma_tx_status;
 	dma->device_tx_status = mpc_dma_tx_status;
 	dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
 	dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
 	dma->device_prep_slave_sg = mpc_dma_prep_slave_sg;
 	dma->device_prep_slave_sg = mpc_dma_prep_slave_sg;
-	dma->device_control = mpc_dma_device_control;
+	dma->device_config = mpc_dma_device_config;
+	dma->device_terminate_all = mpc_dma_device_terminate_all;
 
 
 	INIT_LIST_HEAD(&dma->channels);
 	INIT_LIST_HEAD(&dma->channels);
 	dma_cap_set(DMA_MEMCPY, dma->cap_mask);
 	dma_cap_set(DMA_MEMCPY, dma->cap_mask);

+ 0 - 9
drivers/dma/mv_xor.c

@@ -928,14 +928,6 @@ out:
 	return err;
 	return err;
 }
 }
 
 
-/* This driver does not implement any of the optional DMA operations. */
-static int
-mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-	       unsigned long arg)
-{
-	return -ENOSYS;
-}
-
 static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
 static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
 {
 {
 	struct dma_chan *chan, *_chan;
 	struct dma_chan *chan, *_chan;
@@ -1008,7 +1000,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
 	dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
 	dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
 	dma_dev->device_tx_status = mv_xor_status;
 	dma_dev->device_tx_status = mv_xor_status;
 	dma_dev->device_issue_pending = mv_xor_issue_pending;
 	dma_dev->device_issue_pending = mv_xor_issue_pending;
-	dma_dev->device_control = mv_xor_control;
 	dma_dev->dev = &pdev->dev;
 	dma_dev->dev = &pdev->dev;
 
 
 	/* set prep routines based on capability */
 	/* set prep routines based on capability */

+ 28 - 37
drivers/dma/mxs-dma.c

@@ -202,8 +202,9 @@ static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
 	return container_of(chan, struct mxs_dma_chan, chan);
 	return container_of(chan, struct mxs_dma_chan, chan);
 }
 }
 
 
-static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
+static void mxs_dma_reset_chan(struct dma_chan *chan)
 {
 {
+	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
 	int chan_id = mxs_chan->chan.chan_id;
 	int chan_id = mxs_chan->chan.chan_id;
 
 
@@ -250,8 +251,9 @@ static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
 	mxs_chan->status = DMA_COMPLETE;
 	mxs_chan->status = DMA_COMPLETE;
 }
 }
 
 
-static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
+static void mxs_dma_enable_chan(struct dma_chan *chan)
 {
 {
+	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
 	int chan_id = mxs_chan->chan.chan_id;
 	int chan_id = mxs_chan->chan.chan_id;
 
 
@@ -272,13 +274,16 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
 	mxs_chan->reset = false;
 	mxs_chan->reset = false;
 }
 }
 
 
-static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
+static void mxs_dma_disable_chan(struct dma_chan *chan)
 {
 {
+	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+
 	mxs_chan->status = DMA_COMPLETE;
 	mxs_chan->status = DMA_COMPLETE;
 }
 }
 
 
-static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
+static int mxs_dma_pause_chan(struct dma_chan *chan)
 {
 {
+	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
 	int chan_id = mxs_chan->chan.chan_id;
 	int chan_id = mxs_chan->chan.chan_id;
 
 
@@ -291,10 +296,12 @@ static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
 			mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
 			mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
 
 
 	mxs_chan->status = DMA_PAUSED;
 	mxs_chan->status = DMA_PAUSED;
+	return 0;
 }
 }
 
 
-static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan)
+static int mxs_dma_resume_chan(struct dma_chan *chan)
 {
 {
+	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
 	int chan_id = mxs_chan->chan.chan_id;
 	int chan_id = mxs_chan->chan.chan_id;
 
 
@@ -307,6 +314,7 @@ static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan)
 			mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR);
 			mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR);
 
 
 	mxs_chan->status = DMA_IN_PROGRESS;
 	mxs_chan->status = DMA_IN_PROGRESS;
+	return 0;
 }
 }
 
 
 static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
 static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
@@ -383,7 +391,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
 			"%s: error in channel %d\n", __func__,
 			"%s: error in channel %d\n", __func__,
 			chan);
 			chan);
 		mxs_chan->status = DMA_ERROR;
 		mxs_chan->status = DMA_ERROR;
-		mxs_dma_reset_chan(mxs_chan);
+		mxs_dma_reset_chan(&mxs_chan->chan);
 	} else if (mxs_chan->status != DMA_COMPLETE) {
 	} else if (mxs_chan->status != DMA_COMPLETE) {
 		if (mxs_chan->flags & MXS_DMA_SG_LOOP) {
 		if (mxs_chan->flags & MXS_DMA_SG_LOOP) {
 			mxs_chan->status = DMA_IN_PROGRESS;
 			mxs_chan->status = DMA_IN_PROGRESS;
@@ -432,7 +440,7 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
 	if (ret)
 	if (ret)
 		goto err_clk;
 		goto err_clk;
 
 
-	mxs_dma_reset_chan(mxs_chan);
+	mxs_dma_reset_chan(chan);
 
 
 	dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
 	dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
 	mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
 	mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
@@ -456,7 +464,7 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
 	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
 	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
 
 
-	mxs_dma_disable_chan(mxs_chan);
+	mxs_dma_disable_chan(chan);
 
 
 	free_irq(mxs_chan->chan_irq, mxs_dma);
 	free_irq(mxs_chan->chan_irq, mxs_dma);
 
 
@@ -651,28 +659,12 @@ err_out:
 	return NULL;
 	return NULL;
 }
 }
 
 
-static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-		unsigned long arg)
+static int mxs_dma_terminate_all(struct dma_chan *chan)
 {
 {
-	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
-	int ret = 0;
-
-	switch (cmd) {
-	case DMA_TERMINATE_ALL:
-		mxs_dma_reset_chan(mxs_chan);
-		mxs_dma_disable_chan(mxs_chan);
-		break;
-	case DMA_PAUSE:
-		mxs_dma_pause_chan(mxs_chan);
-		break;
-	case DMA_RESUME:
-		mxs_dma_resume_chan(mxs_chan);
-		break;
-	default:
-		ret = -ENOSYS;
-	}
+	mxs_dma_reset_chan(chan);
+	mxs_dma_disable_chan(chan);
 
 
-	return ret;
+	return 0;
 }
 }
 
 
 static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
 static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
@@ -701,13 +693,6 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
 	return mxs_chan->status;
 	return mxs_chan->status;
 }
 }
 
 
-static void mxs_dma_issue_pending(struct dma_chan *chan)
-{
-	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
-
-	mxs_dma_enable_chan(mxs_chan);
-}
-
 static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
 static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
 {
 {
 	int ret;
 	int ret;
@@ -860,8 +845,14 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
 	mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status;
 	mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status;
 	mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg;
 	mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg;
 	mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic;
 	mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic;
-	mxs_dma->dma_device.device_control = mxs_dma_control;
-	mxs_dma->dma_device.device_issue_pending = mxs_dma_issue_pending;
+	mxs_dma->dma_device.device_pause = mxs_dma_pause_chan;
+	mxs_dma->dma_device.device_resume = mxs_dma_resume_chan;
+	mxs_dma->dma_device.device_terminate_all = mxs_dma_terminate_all;
+	mxs_dma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+	mxs_dma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+	mxs_dma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+	mxs_dma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+	mxs_dma->dma_device.device_issue_pending = mxs_dma_enable_chan;
 
 
 	ret = dma_async_device_register(&mxs_dma->dma_device);
 	ret = dma_async_device_register(&mxs_dma->dma_device);
 	if (ret) {
 	if (ret) {

+ 51 - 61
drivers/dma/nbpfaxi.c

@@ -504,7 +504,7 @@ static int nbpf_prep_one(struct nbpf_link_desc *ldesc,
 	 * pauses DMA and reads out data received via DMA as well as those left
 	 * pauses DMA and reads out data received via DMA as well as those left
 	 * in the Rx FIFO. For this to work with the RAM side using burst
 	 * in the Rx FIFO. For this to work with the RAM side using burst
 	 * transfers we enable the SBE bit and terminate the transfer in our
 	 * transfers we enable the SBE bit and terminate the transfer in our
-	 * DMA_PAUSE handler.
+	 * .device_pause handler.
 	 */
 	 */
 	mem_xfer = nbpf_xfer_ds(chan->nbpf, size);
 	mem_xfer = nbpf_xfer_ds(chan->nbpf, size);
 
 
@@ -565,13 +565,6 @@ static void nbpf_configure(struct nbpf_device *nbpf)
 	nbpf_write(nbpf, NBPF_CTRL, NBPF_CTRL_LVINT);
 	nbpf_write(nbpf, NBPF_CTRL, NBPF_CTRL_LVINT);
 }
 }
 
 
-static void nbpf_pause(struct nbpf_channel *chan)
-{
-	nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS);
-	/* See comment in nbpf_prep_one() */
-	nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN);
-}
-
 /*		Generic part			*/
 /*		Generic part			*/
 
 
 /* DMA ENGINE functions */
 /* DMA ENGINE functions */
@@ -837,54 +830,58 @@ static void nbpf_chan_idle(struct nbpf_channel *chan)
 	}
 	}
 }
 }
 
 
-static int nbpf_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
-			unsigned long arg)
+static int nbpf_pause(struct dma_chan *dchan)
 {
 {
 	struct nbpf_channel *chan = nbpf_to_chan(dchan);
 	struct nbpf_channel *chan = nbpf_to_chan(dchan);
-	struct dma_slave_config *config;
 
 
-	dev_dbg(dchan->device->dev, "Entry %s(%d)\n", __func__, cmd);
+	dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
 
 
-	switch (cmd) {
-	case DMA_TERMINATE_ALL:
-		dev_dbg(dchan->device->dev, "Terminating\n");
-		nbpf_chan_halt(chan);
-		nbpf_chan_idle(chan);
-		break;
+	chan->paused = true;
+	nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS);
+	/* See comment in nbpf_prep_one() */
+	nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN);
 
 
-	case DMA_SLAVE_CONFIG:
-		if (!arg)
-			return -EINVAL;
-		config = (struct dma_slave_config *)arg;
+	return 0;
+}
 
 
-		/*
-		 * We could check config->slave_id to match chan->terminal here,
-		 * but with DT they would be coming from the same source, so
-		 * such a check would be superflous
-		 */
+static int nbpf_terminate_all(struct dma_chan *dchan)
+{
+	struct nbpf_channel *chan = nbpf_to_chan(dchan);
 
 
-		chan->slave_dst_addr = config->dst_addr;
-		chan->slave_dst_width = nbpf_xfer_size(chan->nbpf,
-						       config->dst_addr_width, 1);
-		chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf,
-						       config->dst_addr_width,
-						       config->dst_maxburst);
-		chan->slave_src_addr = config->src_addr;
-		chan->slave_src_width = nbpf_xfer_size(chan->nbpf,
-						       config->src_addr_width, 1);
-		chan->slave_src_burst = nbpf_xfer_size(chan->nbpf,
-						       config->src_addr_width,
-						       config->src_maxburst);
-		break;
+	dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
+	dev_dbg(dchan->device->dev, "Terminating\n");
 
 
-	case DMA_PAUSE:
-		chan->paused = true;
-		nbpf_pause(chan);
-		break;
+	nbpf_chan_halt(chan);
+	nbpf_chan_idle(chan);
 
 
-	default:
-		return -ENXIO;
-	}
+	return 0;
+}
+
+static int nbpf_config(struct dma_chan *dchan,
+		       struct dma_slave_config *config)
+{
+	struct nbpf_channel *chan = nbpf_to_chan(dchan);
+
+	dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
+
+	/*
+	 * We could check config->slave_id to match chan->terminal here,
+	 * but with DT they would be coming from the same source, so
+	 * such a check would be superflous
+	 */
+
+	chan->slave_dst_addr = config->dst_addr;
+	chan->slave_dst_width = nbpf_xfer_size(chan->nbpf,
+					       config->dst_addr_width, 1);
+	chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf,
+					       config->dst_addr_width,
+					       config->dst_maxburst);
+	chan->slave_src_addr = config->src_addr;
+	chan->slave_src_width = nbpf_xfer_size(chan->nbpf,
+					       config->src_addr_width, 1);
+	chan->slave_src_burst = nbpf_xfer_size(chan->nbpf,
+					       config->src_addr_width,
+					       config->src_maxburst);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -1072,18 +1069,6 @@ static void nbpf_free_chan_resources(struct dma_chan *dchan)
 	}
 	}
 }
 }
 
 
-static int nbpf_slave_caps(struct dma_chan *dchan,
-			   struct dma_slave_caps *caps)
-{
-	caps->src_addr_widths = NBPF_DMA_BUSWIDTHS;
-	caps->dstn_addr_widths = NBPF_DMA_BUSWIDTHS;
-	caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-	caps->cmd_pause = false;
-	caps->cmd_terminate = true;
-
-	return 0;
-}
-
 static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec,
 static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec,
 				      struct of_dma *ofdma)
 				      struct of_dma *ofdma)
 {
 {
@@ -1414,7 +1399,6 @@ static int nbpf_probe(struct platform_device *pdev)
 	dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy;
 	dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy;
 	dma_dev->device_tx_status = nbpf_tx_status;
 	dma_dev->device_tx_status = nbpf_tx_status;
 	dma_dev->device_issue_pending = nbpf_issue_pending;
 	dma_dev->device_issue_pending = nbpf_issue_pending;
-	dma_dev->device_slave_caps = nbpf_slave_caps;
 
 
 	/*
 	/*
 	 * If we drop support for unaligned MEMCPY buffer addresses and / or
 	 * If we drop support for unaligned MEMCPY buffer addresses and / or
@@ -1426,7 +1410,13 @@ static int nbpf_probe(struct platform_device *pdev)
 
 
 	/* Compulsory for DMA_SLAVE fields */
 	/* Compulsory for DMA_SLAVE fields */
 	dma_dev->device_prep_slave_sg = nbpf_prep_slave_sg;
 	dma_dev->device_prep_slave_sg = nbpf_prep_slave_sg;
-	dma_dev->device_control = nbpf_control;
+	dma_dev->device_config = nbpf_config;
+	dma_dev->device_pause = nbpf_pause;
+	dma_dev->device_terminate_all = nbpf_terminate_all;
+
+	dma_dev->src_addr_widths = NBPF_DMA_BUSWIDTHS;
+	dma_dev->dst_addr_widths = NBPF_DMA_BUSWIDTHS;
+	dma_dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
 
 
 	platform_set_drvdata(pdev, nbpf);
 	platform_set_drvdata(pdev, nbpf);
 
 

+ 4 - 0
drivers/dma/of-dma.c

@@ -159,6 +159,10 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
 		return ERR_PTR(-ENODEV);
 		return ERR_PTR(-ENODEV);
 	}
 	}
 
 
+	/* Silently fail if there is not even the "dmas" property */
+	if (!of_find_property(np, "dmas", NULL))
+		return ERR_PTR(-ENODEV);
+
 	count = of_property_count_strings(np, "dma-names");
 	count = of_property_count_strings(np, "dma-names");
 	if (count < 0) {
 	if (count < 0) {
 		pr_err("%s: dma-names property of node '%s' missing or empty\n",
 		pr_err("%s: dma-names property of node '%s' missing or empty\n",

+ 19 - 50
drivers/dma/omap-dma.c

@@ -948,8 +948,10 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
 	return vchan_tx_prep(&c->vc, &d->vd, flags);
 	return vchan_tx_prep(&c->vc, &d->vd, flags);
 }
 }
 
 
-static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
+static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
 {
 {
+	struct omap_chan *c = to_omap_dma_chan(chan);
+
 	if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
 	if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
 	    cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
 	    cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
 		return -EINVAL;
 		return -EINVAL;
@@ -959,8 +961,9 @@ static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *c
 	return 0;
 	return 0;
 }
 }
 
 
-static int omap_dma_terminate_all(struct omap_chan *c)
+static int omap_dma_terminate_all(struct dma_chan *chan)
 {
 {
+	struct omap_chan *c = to_omap_dma_chan(chan);
 	struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
 	struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
 	unsigned long flags;
 	unsigned long flags;
 	LIST_HEAD(head);
 	LIST_HEAD(head);
@@ -996,8 +999,10 @@ static int omap_dma_terminate_all(struct omap_chan *c)
 	return 0;
 	return 0;
 }
 }
 
 
-static int omap_dma_pause(struct omap_chan *c)
+static int omap_dma_pause(struct dma_chan *chan)
 {
 {
+	struct omap_chan *c = to_omap_dma_chan(chan);
+
 	/* Pause/Resume only allowed with cyclic mode */
 	/* Pause/Resume only allowed with cyclic mode */
 	if (!c->cyclic)
 	if (!c->cyclic)
 		return -EINVAL;
 		return -EINVAL;
@@ -1010,8 +1015,10 @@ static int omap_dma_pause(struct omap_chan *c)
 	return 0;
 	return 0;
 }
 }
 
 
-static int omap_dma_resume(struct omap_chan *c)
+static int omap_dma_resume(struct dma_chan *chan)
 {
 {
+	struct omap_chan *c = to_omap_dma_chan(chan);
+
 	/* Pause/Resume only allowed with cyclic mode */
 	/* Pause/Resume only allowed with cyclic mode */
 	if (!c->cyclic)
 	if (!c->cyclic)
 		return -EINVAL;
 		return -EINVAL;
@@ -1029,37 +1036,6 @@ static int omap_dma_resume(struct omap_chan *c)
 	return 0;
 	return 0;
 }
 }
 
 
-static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-	unsigned long arg)
-{
-	struct omap_chan *c = to_omap_dma_chan(chan);
-	int ret;
-
-	switch (cmd) {
-	case DMA_SLAVE_CONFIG:
-		ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
-		break;
-
-	case DMA_TERMINATE_ALL:
-		ret = omap_dma_terminate_all(c);
-		break;
-
-	case DMA_PAUSE:
-		ret = omap_dma_pause(c);
-		break;
-
-	case DMA_RESUME:
-		ret = omap_dma_resume(c);
-		break;
-
-	default:
-		ret = -ENXIO;
-		break;
-	}
-
-	return ret;
-}
-
 static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
 static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
 {
 {
 	struct omap_chan *c;
 	struct omap_chan *c;
@@ -1094,19 +1070,6 @@ static void omap_dma_free(struct omap_dmadev *od)
 				 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
 				 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
 				 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
 				 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
 
 
-static int omap_dma_device_slave_caps(struct dma_chan *dchan,
-				      struct dma_slave_caps *caps)
-{
-	caps->src_addr_widths = OMAP_DMA_BUSWIDTHS;
-	caps->dstn_addr_widths = OMAP_DMA_BUSWIDTHS;
-	caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-	caps->cmd_pause = true;
-	caps->cmd_terminate = true;
-	caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
-
-	return 0;
-}
-
 static int omap_dma_probe(struct platform_device *pdev)
 static int omap_dma_probe(struct platform_device *pdev)
 {
 {
 	struct omap_dmadev *od;
 	struct omap_dmadev *od;
@@ -1136,8 +1099,14 @@ static int omap_dma_probe(struct platform_device *pdev)
 	od->ddev.device_issue_pending = omap_dma_issue_pending;
 	od->ddev.device_issue_pending = omap_dma_issue_pending;
 	od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
 	od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
 	od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
 	od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
-	od->ddev.device_control = omap_dma_control;
-	od->ddev.device_slave_caps = omap_dma_device_slave_caps;
+	od->ddev.device_config = omap_dma_slave_config;
+	od->ddev.device_pause = omap_dma_pause;
+	od->ddev.device_resume = omap_dma_resume;
+	od->ddev.device_terminate_all = omap_dma_terminate_all;
+	od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
+	od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
+	od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+	od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
 	od->ddev.dev = &pdev->dev;
 	od->ddev.dev = &pdev->dev;
 	INIT_LIST_HEAD(&od->ddev.channels);
 	INIT_LIST_HEAD(&od->ddev.channels);
 	INIT_LIST_HEAD(&od->pending);
 	INIT_LIST_HEAD(&od->pending);

+ 2 - 6
drivers/dma/pch_dma.c

@@ -665,16 +665,12 @@ err_desc_get:
 	return NULL;
 	return NULL;
 }
 }
 
 
-static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-			     unsigned long arg)
+static int pd_device_terminate_all(struct dma_chan *chan)
 {
 {
 	struct pch_dma_chan *pd_chan = to_pd_chan(chan);
 	struct pch_dma_chan *pd_chan = to_pd_chan(chan);
 	struct pch_dma_desc *desc, *_d;
 	struct pch_dma_desc *desc, *_d;
 	LIST_HEAD(list);
 	LIST_HEAD(list);
 
 
-	if (cmd != DMA_TERMINATE_ALL)
-		return -ENXIO;
-
 	spin_lock_irq(&pd_chan->lock);
 	spin_lock_irq(&pd_chan->lock);
 
 
 	pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
 	pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
@@ -932,7 +928,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
 	pd->dma.device_tx_status = pd_tx_status;
 	pd->dma.device_tx_status = pd_tx_status;
 	pd->dma.device_issue_pending = pd_issue_pending;
 	pd->dma.device_issue_pending = pd_issue_pending;
 	pd->dma.device_prep_slave_sg = pd_prep_slave_sg;
 	pd->dma.device_prep_slave_sg = pd_prep_slave_sg;
-	pd->dma.device_control = pd_device_control;
+	pd->dma.device_terminate_all = pd_device_terminate_all;
 
 
 	err = dma_async_device_register(&pd->dma);
 	err = dma_async_device_register(&pd->dma);
 	if (err) {
 	if (err) {

+ 155 - 75
drivers/dma/pl330.c

@@ -504,6 +504,9 @@ struct dma_pl330_desc {
 
 
 	enum desc_status status;
 	enum desc_status status;
 
 
+	int bytes_requested;
+	bool last;
+
 	/* The channel which currently holds this desc */
 	/* The channel which currently holds this desc */
 	struct dma_pl330_chan *pchan;
 	struct dma_pl330_chan *pchan;
 
 
@@ -1048,6 +1051,10 @@ static bool _trigger(struct pl330_thread *thrd)
 	if (!req)
 	if (!req)
 		return true;
 		return true;
 
 
+	/* Return if req is running */
+	if (idx == thrd->req_running)
+		return true;
+
 	desc = req->desc;
 	desc = req->desc;
 
 
 	ns = desc->rqcfg.nonsecure ? 1 : 0;
 	ns = desc->rqcfg.nonsecure ? 1 : 0;
@@ -1587,6 +1594,8 @@ static int pl330_update(struct pl330_dmac *pl330)
 			descdone = thrd->req[active].desc;
 			descdone = thrd->req[active].desc;
 			thrd->req[active].desc = NULL;
 			thrd->req[active].desc = NULL;
 
 
+			thrd->req_running = -1;
+
 			/* Get going again ASAP */
 			/* Get going again ASAP */
 			_start(thrd);
 			_start(thrd);
 
 
@@ -2086,77 +2095,89 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
 	return 1;
 	return 1;
 }
 }
 
 
-static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
+static int pl330_config(struct dma_chan *chan,
+			struct dma_slave_config *slave_config)
+{
+	struct dma_pl330_chan *pch = to_pchan(chan);
+
+	if (slave_config->direction == DMA_MEM_TO_DEV) {
+		if (slave_config->dst_addr)
+			pch->fifo_addr = slave_config->dst_addr;
+		if (slave_config->dst_addr_width)
+			pch->burst_sz = __ffs(slave_config->dst_addr_width);
+		if (slave_config->dst_maxburst)
+			pch->burst_len = slave_config->dst_maxburst;
+	} else if (slave_config->direction == DMA_DEV_TO_MEM) {
+		if (slave_config->src_addr)
+			pch->fifo_addr = slave_config->src_addr;
+		if (slave_config->src_addr_width)
+			pch->burst_sz = __ffs(slave_config->src_addr_width);
+		if (slave_config->src_maxburst)
+			pch->burst_len = slave_config->src_maxburst;
+	}
+
+	return 0;
+}
+
+static int pl330_terminate_all(struct dma_chan *chan)
 {
 {
 	struct dma_pl330_chan *pch = to_pchan(chan);
 	struct dma_pl330_chan *pch = to_pchan(chan);
 	struct dma_pl330_desc *desc;
 	struct dma_pl330_desc *desc;
 	unsigned long flags;
 	unsigned long flags;
 	struct pl330_dmac *pl330 = pch->dmac;
 	struct pl330_dmac *pl330 = pch->dmac;
-	struct dma_slave_config *slave_config;
 	LIST_HEAD(list);
 	LIST_HEAD(list);
 
 
-	switch (cmd) {
-	case DMA_TERMINATE_ALL:
-		pm_runtime_get_sync(pl330->ddma.dev);
-		spin_lock_irqsave(&pch->lock, flags);
+	spin_lock_irqsave(&pch->lock, flags);
+	spin_lock(&pl330->lock);
+	_stop(pch->thread);
+	spin_unlock(&pl330->lock);
+
+	pch->thread->req[0].desc = NULL;
+	pch->thread->req[1].desc = NULL;
+	pch->thread->req_running = -1;
+
+	/* Mark all desc done */
+	list_for_each_entry(desc, &pch->submitted_list, node) {
+		desc->status = FREE;
+		dma_cookie_complete(&desc->txd);
+	}
 
 
-		spin_lock(&pl330->lock);
-		_stop(pch->thread);
-		spin_unlock(&pl330->lock);
+	list_for_each_entry(desc, &pch->work_list , node) {
+		desc->status = FREE;
+		dma_cookie_complete(&desc->txd);
+	}
 
 
-		pch->thread->req[0].desc = NULL;
-		pch->thread->req[1].desc = NULL;
-		pch->thread->req_running = -1;
+	list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool);
+	list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
+	list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
+	spin_unlock_irqrestore(&pch->lock, flags);
 
 
-		/* Mark all desc done */
-		list_for_each_entry(desc, &pch->submitted_list, node) {
-			desc->status = FREE;
-			dma_cookie_complete(&desc->txd);
-		}
+	return 0;
+}
 
 
-		list_for_each_entry(desc, &pch->work_list , node) {
-			desc->status = FREE;
-			dma_cookie_complete(&desc->txd);
-		}
+/*
+ * We don't support DMA_RESUME command because of hardware
+ * limitations, so after pausing the channel we cannot restore
+ * it to active state. We have to terminate channel and setup
+ * DMA transfer again. This pause feature was implemented to
+ * allow safely read residue before channel termination.
+ */
+int pl330_pause(struct dma_chan *chan)
+{
+	struct dma_pl330_chan *pch = to_pchan(chan);
+	struct pl330_dmac *pl330 = pch->dmac;
+	unsigned long flags;
 
 
-		list_for_each_entry(desc, &pch->completed_list , node) {
-			desc->status = FREE;
-			dma_cookie_complete(&desc->txd);
-		}
+	pm_runtime_get_sync(pl330->ddma.dev);
+	spin_lock_irqsave(&pch->lock, flags);
 
 
-		if (!list_empty(&pch->work_list))
-			pm_runtime_put(pl330->ddma.dev);
+	spin_lock(&pl330->lock);
+	_stop(pch->thread);
+	spin_unlock(&pl330->lock);
 
 
-		list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool);
-		list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
-		list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
-		spin_unlock_irqrestore(&pch->lock, flags);
-		pm_runtime_mark_last_busy(pl330->ddma.dev);
-		pm_runtime_put_autosuspend(pl330->ddma.dev);
-		break;
-	case DMA_SLAVE_CONFIG:
-		slave_config = (struct dma_slave_config *)arg;
-
-		if (slave_config->direction == DMA_MEM_TO_DEV) {
-			if (slave_config->dst_addr)
-				pch->fifo_addr = slave_config->dst_addr;
-			if (slave_config->dst_addr_width)
-				pch->burst_sz = __ffs(slave_config->dst_addr_width);
-			if (slave_config->dst_maxburst)
-				pch->burst_len = slave_config->dst_maxburst;
-		} else if (slave_config->direction == DMA_DEV_TO_MEM) {
-			if (slave_config->src_addr)
-				pch->fifo_addr = slave_config->src_addr;
-			if (slave_config->src_addr_width)
-				pch->burst_sz = __ffs(slave_config->src_addr_width);
-			if (slave_config->src_maxburst)
-				pch->burst_len = slave_config->src_maxburst;
-		}
-		break;
-	default:
-		dev_err(pch->dmac->ddma.dev, "Not supported command.\n");
-		return -ENXIO;
-	}
+	spin_unlock_irqrestore(&pch->lock, flags);
+	pm_runtime_mark_last_busy(pl330->ddma.dev);
+	pm_runtime_put_autosuspend(pl330->ddma.dev);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -2182,11 +2203,74 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
 	pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
 	pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
 }
 }
 
 
+int pl330_get_current_xferred_count(struct dma_pl330_chan *pch,
+		struct dma_pl330_desc *desc)
+{
+	struct pl330_thread *thrd = pch->thread;
+	struct pl330_dmac *pl330 = pch->dmac;
+	void __iomem *regs = thrd->dmac->base;
+	u32 val, addr;
+
+	pm_runtime_get_sync(pl330->ddma.dev);
+	val = addr = 0;
+	if (desc->rqcfg.src_inc) {
+		val = readl(regs + SA(thrd->id));
+		addr = desc->px.src_addr;
+	} else {
+		val = readl(regs + DA(thrd->id));
+		addr = desc->px.dst_addr;
+	}
+	pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
+	pm_runtime_put_autosuspend(pl330->ddma.dev);
+	return val - addr;
+}
+
 static enum dma_status
 static enum dma_status
 pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
 pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
 		 struct dma_tx_state *txstate)
 		 struct dma_tx_state *txstate)
 {
 {
-	return dma_cookie_status(chan, cookie, txstate);
+	enum dma_status ret;
+	unsigned long flags;
+	struct dma_pl330_desc *desc, *running = NULL;
+	struct dma_pl330_chan *pch = to_pchan(chan);
+	unsigned int transferred, residual = 0;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+
+	if (!txstate)
+		return ret;
+
+	if (ret == DMA_COMPLETE)
+		goto out;
+
+	spin_lock_irqsave(&pch->lock, flags);
+
+	if (pch->thread->req_running != -1)
+		running = pch->thread->req[pch->thread->req_running].desc;
+
+	/* Check in pending list */
+	list_for_each_entry(desc, &pch->work_list, node) {
+		if (desc->status == DONE)
+			transferred = desc->bytes_requested;
+		else if (running && desc == running)
+			transferred =
+				pl330_get_current_xferred_count(pch, desc);
+		else
+			transferred = 0;
+		residual += desc->bytes_requested - transferred;
+		if (desc->txd.cookie == cookie) {
+			ret = desc->status;
+			break;
+		}
+		if (desc->last)
+			residual = 0;
+	}
+	spin_unlock_irqrestore(&pch->lock, flags);
+
+out:
+	dma_set_residue(txstate, residual);
+
+	return ret;
 }
 }
 
 
 static void pl330_issue_pending(struct dma_chan *chan)
 static void pl330_issue_pending(struct dma_chan *chan)
@@ -2231,12 +2315,14 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
 			desc->txd.callback = last->txd.callback;
 			desc->txd.callback = last->txd.callback;
 			desc->txd.callback_param = last->txd.callback_param;
 			desc->txd.callback_param = last->txd.callback_param;
 		}
 		}
+		last->last = false;
 
 
 		dma_cookie_assign(&desc->txd);
 		dma_cookie_assign(&desc->txd);
 
 
 		list_move_tail(&desc->node, &pch->submitted_list);
 		list_move_tail(&desc->node, &pch->submitted_list);
 	}
 	}
 
 
+	last->last = true;
 	cookie = dma_cookie_assign(&last->txd);
 	cookie = dma_cookie_assign(&last->txd);
 	list_add_tail(&last->node, &pch->submitted_list);
 	list_add_tail(&last->node, &pch->submitted_list);
 	spin_unlock_irqrestore(&pch->lock, flags);
 	spin_unlock_irqrestore(&pch->lock, flags);
@@ -2459,6 +2545,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
 		desc->rqtype = direction;
 		desc->rqtype = direction;
 		desc->rqcfg.brst_size = pch->burst_sz;
 		desc->rqcfg.brst_size = pch->burst_sz;
 		desc->rqcfg.brst_len = 1;
 		desc->rqcfg.brst_len = 1;
+		desc->bytes_requested = period_len;
 		fill_px(&desc->px, dst, src, period_len);
 		fill_px(&desc->px, dst, src, period_len);
 
 
 		if (!first)
 		if (!first)
@@ -2601,6 +2688,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 		desc->rqcfg.brst_size = pch->burst_sz;
 		desc->rqcfg.brst_size = pch->burst_sz;
 		desc->rqcfg.brst_len = 1;
 		desc->rqcfg.brst_len = 1;
 		desc->rqtype = direction;
 		desc->rqtype = direction;
+		desc->bytes_requested = sg_dma_len(sg);
 	}
 	}
 
 
 	/* Return the last desc in the chain */
 	/* Return the last desc in the chain */
@@ -2623,19 +2711,6 @@ static irqreturn_t pl330_irq_handler(int irq, void *data)
 	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
 	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
 	BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
 	BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
 
 
-static int pl330_dma_device_slave_caps(struct dma_chan *dchan,
-	struct dma_slave_caps *caps)
-{
-	caps->src_addr_widths = PL330_DMA_BUSWIDTHS;
-	caps->dstn_addr_widths = PL330_DMA_BUSWIDTHS;
-	caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-	caps->cmd_pause = false;
-	caps->cmd_terminate = true;
-	caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
-
-	return 0;
-}
-
 /*
 /*
  * Runtime PM callbacks are provided by amba/bus.c driver.
  * Runtime PM callbacks are provided by amba/bus.c driver.
  *
  *
@@ -2793,9 +2868,14 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
 	pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
 	pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
 	pd->device_tx_status = pl330_tx_status;
 	pd->device_tx_status = pl330_tx_status;
 	pd->device_prep_slave_sg = pl330_prep_slave_sg;
 	pd->device_prep_slave_sg = pl330_prep_slave_sg;
-	pd->device_control = pl330_control;
+	pd->device_config = pl330_config;
+	pd->device_pause = pl330_pause;
+	pd->device_terminate_all = pl330_terminate_all;
 	pd->device_issue_pending = pl330_issue_pending;
 	pd->device_issue_pending = pl330_issue_pending;
-	pd->device_slave_caps = pl330_dma_device_slave_caps;
+	pd->src_addr_widths = PL330_DMA_BUSWIDTHS;
+	pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
+	pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+	pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
 
 
 	ret = dma_async_device_register(pd);
 	ret = dma_async_device_register(pd);
 	if (ret) {
 	if (ret) {
@@ -2847,7 +2927,7 @@ probe_err3:
 
 
 		/* Flush the channel */
 		/* Flush the channel */
 		if (pch->thread) {
 		if (pch->thread) {
-			pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
+			pl330_terminate_all(&pch->chan);
 			pl330_free_chan_resources(&pch->chan);
 			pl330_free_chan_resources(&pch->chan);
 		}
 		}
 	}
 	}
@@ -2878,7 +2958,7 @@ static int pl330_remove(struct amba_device *adev)
 
 
 		/* Flush the channel */
 		/* Flush the channel */
 		if (pch->thread) {
 		if (pch->thread) {
-			pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
+			pl330_terminate_all(&pch->chan);
 			pl330_free_chan_resources(&pch->chan);
 			pl330_free_chan_resources(&pch->chan);
 		}
 		}
 	}
 	}

+ 43 - 42
drivers/dma/qcom_bam_dma.c

@@ -530,11 +530,18 @@ static void bam_free_chan(struct dma_chan *chan)
  * Sets slave configuration for channel
  * Sets slave configuration for channel
  *
  *
  */
  */
-static void bam_slave_config(struct bam_chan *bchan,
-		struct dma_slave_config *cfg)
+static int bam_slave_config(struct dma_chan *chan,
+			    struct dma_slave_config *cfg)
 {
 {
+	struct bam_chan *bchan = to_bam_chan(chan);
+	unsigned long flag;
+
+	spin_lock_irqsave(&bchan->vc.lock, flag);
 	memcpy(&bchan->slave, cfg, sizeof(*cfg));
 	memcpy(&bchan->slave, cfg, sizeof(*cfg));
 	bchan->reconfigure = 1;
 	bchan->reconfigure = 1;
+	spin_unlock_irqrestore(&bchan->vc.lock, flag);
+
+	return 0;
 }
 }
 
 
 /**
 /**
@@ -627,8 +634,9 @@ err_out:
  * No callbacks are done
  * No callbacks are done
  *
  *
  */
  */
-static void bam_dma_terminate_all(struct bam_chan *bchan)
+static int bam_dma_terminate_all(struct dma_chan *chan)
 {
 {
+	struct bam_chan *bchan = to_bam_chan(chan);
 	unsigned long flag;
 	unsigned long flag;
 	LIST_HEAD(head);
 	LIST_HEAD(head);
 
 
@@ -643,56 +651,46 @@ static void bam_dma_terminate_all(struct bam_chan *bchan)
 	spin_unlock_irqrestore(&bchan->vc.lock, flag);
 	spin_unlock_irqrestore(&bchan->vc.lock, flag);
 
 
 	vchan_dma_desc_free_list(&bchan->vc, &head);
 	vchan_dma_desc_free_list(&bchan->vc, &head);
+
+	return 0;
 }
 }
 
 
 /**
 /**
- * bam_control - DMA device control
+ * bam_pause - Pause DMA channel
  * @chan: dma channel
  * @chan: dma channel
- * @cmd: control cmd
- * @arg: cmd argument
  *
  *
- * Perform DMA control command
+ */
+static int bam_pause(struct dma_chan *chan)
+{
+	struct bam_chan *bchan = to_bam_chan(chan);
+	struct bam_device *bdev = bchan->bdev;
+	unsigned long flag;
+
+	spin_lock_irqsave(&bchan->vc.lock, flag);
+	writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
+	bchan->paused = 1;
+	spin_unlock_irqrestore(&bchan->vc.lock, flag);
+
+	return 0;
+}
+
+/**
+ * bam_resume - Resume DMA channel operations
+ * @chan: dma channel
  *
  *
  */
  */
-static int bam_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-	unsigned long arg)
+static int bam_resume(struct dma_chan *chan)
 {
 {
 	struct bam_chan *bchan = to_bam_chan(chan);
 	struct bam_chan *bchan = to_bam_chan(chan);
 	struct bam_device *bdev = bchan->bdev;
 	struct bam_device *bdev = bchan->bdev;
-	int ret = 0;
 	unsigned long flag;
 	unsigned long flag;
 
 
-	switch (cmd) {
-	case DMA_PAUSE:
-		spin_lock_irqsave(&bchan->vc.lock, flag);
-		writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
-		bchan->paused = 1;
-		spin_unlock_irqrestore(&bchan->vc.lock, flag);
-		break;
-
-	case DMA_RESUME:
-		spin_lock_irqsave(&bchan->vc.lock, flag);
-		writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
-		bchan->paused = 0;
-		spin_unlock_irqrestore(&bchan->vc.lock, flag);
-		break;
-
-	case DMA_TERMINATE_ALL:
-		bam_dma_terminate_all(bchan);
-		break;
-
-	case DMA_SLAVE_CONFIG:
-		spin_lock_irqsave(&bchan->vc.lock, flag);
-		bam_slave_config(bchan, (struct dma_slave_config *)arg);
-		spin_unlock_irqrestore(&bchan->vc.lock, flag);
-		break;
-
-	default:
-		ret = -ENXIO;
-		break;
-	}
+	spin_lock_irqsave(&bchan->vc.lock, flag);
+	writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
+	bchan->paused = 0;
+	spin_unlock_irqrestore(&bchan->vc.lock, flag);
 
 
-	return ret;
+	return 0;
 }
 }
 
 
 /**
 /**
@@ -1148,7 +1146,10 @@ static int bam_dma_probe(struct platform_device *pdev)
 	bdev->common.device_alloc_chan_resources = bam_alloc_chan;
 	bdev->common.device_alloc_chan_resources = bam_alloc_chan;
 	bdev->common.device_free_chan_resources = bam_free_chan;
 	bdev->common.device_free_chan_resources = bam_free_chan;
 	bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
 	bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
-	bdev->common.device_control = bam_control;
+	bdev->common.device_config = bam_slave_config;
+	bdev->common.device_pause = bam_pause;
+	bdev->common.device_resume = bam_resume;
+	bdev->common.device_terminate_all = bam_dma_terminate_all;
 	bdev->common.device_issue_pending = bam_issue_pending;
 	bdev->common.device_issue_pending = bam_issue_pending;
 	bdev->common.device_tx_status = bam_tx_status;
 	bdev->common.device_tx_status = bam_tx_status;
 	bdev->common.dev = bdev->dev;
 	bdev->common.dev = bdev->dev;
@@ -1187,7 +1188,7 @@ static int bam_dma_remove(struct platform_device *pdev)
 	devm_free_irq(bdev->dev, bdev->irq, bdev);
 	devm_free_irq(bdev->dev, bdev->irq, bdev);
 
 
 	for (i = 0; i < bdev->num_channels; i++) {
 	for (i = 0; i < bdev->num_channels; i++) {
-		bam_dma_terminate_all(&bdev->channels[i]);
+		bam_dma_terminate_all(&bdev->channels[i].vc.chan);
 		tasklet_kill(&bdev->channels[i].vc.task);
 		tasklet_kill(&bdev->channels[i].vc.task);
 
 
 		dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE,
 		dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE,

+ 36 - 37
drivers/dma/s3c24xx-dma.c

@@ -384,20 +384,30 @@ static u32 s3c24xx_dma_getbytes_chan(struct s3c24xx_dma_chan *s3cchan)
 	return tc * txd->width;
 	return tc * txd->width;
 }
 }
 
 
-static int s3c24xx_dma_set_runtime_config(struct s3c24xx_dma_chan *s3cchan,
+static int s3c24xx_dma_set_runtime_config(struct dma_chan *chan,
 				  struct dma_slave_config *config)
 				  struct dma_slave_config *config)
 {
 {
-	if (!s3cchan->slave)
-		return -EINVAL;
+	struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
+	unsigned long flags;
+	int ret = 0;
 
 
 	/* Reject definitely invalid configurations */
 	/* Reject definitely invalid configurations */
 	if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
 	if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
 	    config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
 	    config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
 		return -EINVAL;
 		return -EINVAL;
 
 
+	spin_lock_irqsave(&s3cchan->vc.lock, flags);
+
+	if (!s3cchan->slave) {
+		ret = -EINVAL;
+		goto out;
+	}
+
 	s3cchan->cfg = *config;
 	s3cchan->cfg = *config;
 
 
-	return 0;
+out:
+	spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
+	return ret;
 }
 }
 
 
 /*
 /*
@@ -703,8 +713,7 @@ static irqreturn_t s3c24xx_dma_irq(int irq, void *data)
  * The DMA ENGINE API
  * The DMA ENGINE API
  */
  */
 
 
-static int s3c24xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-			 unsigned long arg)
+static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
 {
 {
 	struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
 	struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
 	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
 	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
@@ -713,40 +722,28 @@ static int s3c24xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 
 
 	spin_lock_irqsave(&s3cchan->vc.lock, flags);
 	spin_lock_irqsave(&s3cchan->vc.lock, flags);
 
 
-	switch (cmd) {
-	case DMA_SLAVE_CONFIG:
-		ret = s3c24xx_dma_set_runtime_config(s3cchan,
-					      (struct dma_slave_config *)arg);
-		break;
-	case DMA_TERMINATE_ALL:
-		if (!s3cchan->phy && !s3cchan->at) {
-			dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n",
-				s3cchan->id);
-			ret = -EINVAL;
-			break;
-		}
+	if (!s3cchan->phy && !s3cchan->at) {
+		dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n",
+			s3cchan->id);
+		ret = -EINVAL;
+		goto unlock;
+	}
 
 
-		s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
+	s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
 
 
-		 /* Mark physical channel as free */
-		if (s3cchan->phy)
-			s3c24xx_dma_phy_free(s3cchan);
+	/* Mark physical channel as free */
+	if (s3cchan->phy)
+		s3c24xx_dma_phy_free(s3cchan);
 
 
-		/* Dequeue current job */
-		if (s3cchan->at) {
-			s3c24xx_dma_desc_free(&s3cchan->at->vd);
-			s3cchan->at = NULL;
-		}
-
-		/* Dequeue jobs not yet fired as well */
-		s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
-		break;
-	default:
-		/* Unknown command */
-		ret = -ENXIO;
-		break;
+	/* Dequeue current job */
+	if (s3cchan->at) {
+		s3c24xx_dma_desc_free(&s3cchan->at->vd);
+		s3cchan->at = NULL;
 	}
 	}
 
 
+	/* Dequeue jobs not yet fired as well */
+	s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
+unlock:
 	spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
 	spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
 
 
 	return ret;
 	return ret;
@@ -1300,7 +1297,8 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
 	s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy;
 	s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy;
 	s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status;
 	s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status;
 	s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending;
 	s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending;
-	s3cdma->memcpy.device_control = s3c24xx_dma_control;
+	s3cdma->memcpy.device_config = s3c24xx_dma_set_runtime_config;
+	s3cdma->memcpy.device_terminate_all = s3c24xx_dma_terminate_all;
 
 
 	/* Initialize slave engine for SoC internal dedicated peripherals */
 	/* Initialize slave engine for SoC internal dedicated peripherals */
 	dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
 	dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
@@ -1315,7 +1313,8 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
 	s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending;
 	s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending;
 	s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg;
 	s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg;
 	s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic;
 	s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic;
-	s3cdma->slave.device_control = s3c24xx_dma_control;
+	s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config;
+	s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all;
 
 
 	/* Register as many memcpy channels as there are physical channels */
 	/* Register as many memcpy channels as there are physical channels */
 	ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy,
 	ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy,

+ 82 - 75
drivers/dma/sa11x0-dma.c

@@ -669,8 +669,10 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
 	return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 	return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 }
 }
 
 
-static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg)
+static int sa11x0_dma_device_config(struct dma_chan *chan,
+				    struct dma_slave_config *cfg)
 {
 {
+	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 	u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
 	u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
 	dma_addr_t addr;
 	dma_addr_t addr;
 	enum dma_slave_buswidth width;
 	enum dma_slave_buswidth width;
@@ -704,99 +706,101 @@ static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_c
 	return 0;
 	return 0;
 }
 }
 
 
-static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-	unsigned long arg)
+static int sa11x0_dma_device_pause(struct dma_chan *chan)
 {
 {
 	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
 	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
 	struct sa11x0_dma_phy *p;
 	struct sa11x0_dma_phy *p;
 	LIST_HEAD(head);
 	LIST_HEAD(head);
 	unsigned long flags;
 	unsigned long flags;
-	int ret;
 
 
-	switch (cmd) {
-	case DMA_SLAVE_CONFIG:
-		return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg);
-
-	case DMA_TERMINATE_ALL:
-		dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
-		/* Clear the tx descriptor lists */
-		spin_lock_irqsave(&c->vc.lock, flags);
-		vchan_get_all_descriptors(&c->vc, &head);
+	dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
+	spin_lock_irqsave(&c->vc.lock, flags);
+	if (c->status == DMA_IN_PROGRESS) {
+		c->status = DMA_PAUSED;
 
 
 		p = c->phy;
 		p = c->phy;
 		if (p) {
 		if (p) {
-			dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
-			/* vchan is assigned to a pchan - stop the channel */
-			writel(DCSR_RUN | DCSR_IE |
-				DCSR_STRTA | DCSR_DONEA |
-				DCSR_STRTB | DCSR_DONEB,
-				p->base + DMA_DCSR_C);
-
-			if (p->txd_load) {
-				if (p->txd_load != p->txd_done)
-					list_add_tail(&p->txd_load->vd.node, &head);
-				p->txd_load = NULL;
-			}
-			if (p->txd_done) {
-				list_add_tail(&p->txd_done->vd.node, &head);
-				p->txd_done = NULL;
-			}
-			c->phy = NULL;
+			writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
+		} else {
 			spin_lock(&d->lock);
 			spin_lock(&d->lock);
-			p->vchan = NULL;
+			list_del_init(&c->node);
 			spin_unlock(&d->lock);
 			spin_unlock(&d->lock);
-			tasklet_schedule(&d->task);
 		}
 		}
-		spin_unlock_irqrestore(&c->vc.lock, flags);
-		vchan_dma_desc_free_list(&c->vc, &head);
-		ret = 0;
-		break;
+	}
+	spin_unlock_irqrestore(&c->vc.lock, flags);
 
 
-	case DMA_PAUSE:
-		dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
-		spin_lock_irqsave(&c->vc.lock, flags);
-		if (c->status == DMA_IN_PROGRESS) {
-			c->status = DMA_PAUSED;
+	return 0;
+}
 
 
-			p = c->phy;
-			if (p) {
-				writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
-			} else {
-				spin_lock(&d->lock);
-				list_del_init(&c->node);
-				spin_unlock(&d->lock);
-			}
-		}
-		spin_unlock_irqrestore(&c->vc.lock, flags);
-		ret = 0;
-		break;
+static int sa11x0_dma_device_resume(struct dma_chan *chan)
+{
+	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
+	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
+	struct sa11x0_dma_phy *p;
+	LIST_HEAD(head);
+	unsigned long flags;
 
 
-	case DMA_RESUME:
-		dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
-		spin_lock_irqsave(&c->vc.lock, flags);
-		if (c->status == DMA_PAUSED) {
-			c->status = DMA_IN_PROGRESS;
-
-			p = c->phy;
-			if (p) {
-				writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
-			} else if (!list_empty(&c->vc.desc_issued)) {
-				spin_lock(&d->lock);
-				list_add_tail(&c->node, &d->chan_pending);
-				spin_unlock(&d->lock);
-			}
+	dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
+	spin_lock_irqsave(&c->vc.lock, flags);
+	if (c->status == DMA_PAUSED) {
+		c->status = DMA_IN_PROGRESS;
+
+		p = c->phy;
+		if (p) {
+			writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
+		} else if (!list_empty(&c->vc.desc_issued)) {
+			spin_lock(&d->lock);
+			list_add_tail(&c->node, &d->chan_pending);
+			spin_unlock(&d->lock);
 		}
 		}
-		spin_unlock_irqrestore(&c->vc.lock, flags);
-		ret = 0;
-		break;
+	}
+	spin_unlock_irqrestore(&c->vc.lock, flags);
 
 
-	default:
-		ret = -ENXIO;
-		break;
+	return 0;
+}
+
+static int sa11x0_dma_device_terminate_all(struct dma_chan *chan)
+{
+	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
+	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
+	struct sa11x0_dma_phy *p;
+	LIST_HEAD(head);
+	unsigned long flags;
+
+	dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
+	/* Clear the tx descriptor lists */
+	spin_lock_irqsave(&c->vc.lock, flags);
+	vchan_get_all_descriptors(&c->vc, &head);
+
+	p = c->phy;
+	if (p) {
+		dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
+		/* vchan is assigned to a pchan - stop the channel */
+		writel(DCSR_RUN | DCSR_IE |
+		       DCSR_STRTA | DCSR_DONEA |
+		       DCSR_STRTB | DCSR_DONEB,
+		       p->base + DMA_DCSR_C);
+
+		if (p->txd_load) {
+			if (p->txd_load != p->txd_done)
+				list_add_tail(&p->txd_load->vd.node, &head);
+			p->txd_load = NULL;
+		}
+		if (p->txd_done) {
+			list_add_tail(&p->txd_done->vd.node, &head);
+			p->txd_done = NULL;
+		}
+		c->phy = NULL;
+		spin_lock(&d->lock);
+		p->vchan = NULL;
+		spin_unlock(&d->lock);
+		tasklet_schedule(&d->task);
 	}
 	}
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+	vchan_dma_desc_free_list(&c->vc, &head);
 
 
-	return ret;
+	return 0;
 }
 }
 
 
 struct sa11x0_dma_channel_desc {
 struct sa11x0_dma_channel_desc {
@@ -833,7 +837,10 @@ static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
 	dmadev->dev = dev;
 	dmadev->dev = dev;
 	dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources;
 	dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources;
 	dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources;
 	dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources;
-	dmadev->device_control = sa11x0_dma_control;
+	dmadev->device_config = sa11x0_dma_device_config;
+	dmadev->device_pause = sa11x0_dma_device_pause;
+	dmadev->device_resume = sa11x0_dma_device_resume;
+	dmadev->device_terminate_all = sa11x0_dma_device_terminate_all;
 	dmadev->device_tx_status = sa11x0_dma_tx_status;
 	dmadev->device_tx_status = sa11x0_dma_tx_status;
 	dmadev->device_issue_pending = sa11x0_dma_issue_pending;
 	dmadev->device_issue_pending = sa11x0_dma_issue_pending;
 
 

+ 13 - 1
drivers/dma/sh/Kconfig

@@ -2,6 +2,10 @@
 # DMA engine configuration for sh
 # DMA engine configuration for sh
 #
 #
 
 
+config RENESAS_DMA
+	bool
+	select DMA_ENGINE
+
 #
 #
 # DMA Engine Helpers
 # DMA Engine Helpers
 #
 #
@@ -12,7 +16,7 @@ config SH_DMAE_BASE
 	depends on !SUPERH || SH_DMA
 	depends on !SUPERH || SH_DMA
 	depends on !SH_DMA_API
 	depends on !SH_DMA_API
 	default y
 	default y
-	select DMA_ENGINE
+	select RENESAS_DMA
 	help
 	help
 	  Enable support for the Renesas SuperH DMA controllers.
 	  Enable support for the Renesas SuperH DMA controllers.
 
 
@@ -52,3 +56,11 @@ config RCAR_AUDMAC_PP
 	depends on SH_DMAE_BASE
 	depends on SH_DMAE_BASE
 	help
 	help
 	  Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers.
 	  Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers.
+
+config RCAR_DMAC
+	tristate "Renesas R-Car Gen2 DMA Controller"
+	depends on ARCH_SHMOBILE || COMPILE_TEST
+	select RENESAS_DMA
+	help
+	  This driver supports the general purpose DMA controller found in the
+	  Renesas R-Car second generation SoCs.

+ 1 - 0
drivers/dma/sh/Makefile

@@ -16,3 +16,4 @@ obj-$(CONFIG_SH_DMAE) += shdma.o
 obj-$(CONFIG_SUDMAC) += sudmac.o
 obj-$(CONFIG_SUDMAC) += sudmac.o
 obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
 obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
 obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o
 obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o
+obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o

+ 1770 - 0
drivers/dma/sh/rcar-dmac.c

@@ -0,0 +1,1770 @@
+/*
+ * Renesas R-Car Gen2 DMA Controller Driver
+ *
+ * Copyright (C) 2014 Renesas Electronics Inc.
+ *
+ * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "../dmaengine.h"
+
+/*
+ * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer
+ * @node: entry in the parent's chunks list
+ * @src_addr: device source address
+ * @dst_addr: device destination address
+ * @size: transfer size in bytes
+ */
+struct rcar_dmac_xfer_chunk {
+	struct list_head node;
+
+	dma_addr_t src_addr;
+	dma_addr_t dst_addr;
+	u32 size;
+};
+
+/*
+ * struct rcar_dmac_hw_desc - Hardware descriptor for a transfer chunk
+ * @sar: value of the SAR register (source address)
+ * @dar: value of the DAR register (destination address)
+ * @tcr: value of the TCR register (transfer count)
+ */
+struct rcar_dmac_hw_desc {
+	u32 sar;
+	u32 dar;
+	u32 tcr;
+	u32 reserved;
+} __attribute__((__packed__));
+
+/*
+ * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor
+ * @async_tx: base DMA asynchronous transaction descriptor
+ * @direction: direction of the DMA transfer
+ * @xfer_shift: log2 of the transfer size
+ * @chcr: value of the channel configuration register for this transfer
+ * @node: entry in the channel's descriptors lists
+ * @chunks: list of transfer chunks for this transfer
+ * @running: the transfer chunk being currently processed
+ * @nchunks: number of transfer chunks for this transfer
+ * @hwdescs.use: whether the transfer descriptor uses hardware descriptors
+ * @hwdescs.mem: hardware descriptors memory for the transfer
+ * @hwdescs.dma: device address of the hardware descriptors memory
+ * @hwdescs.size: size of the hardware descriptors in bytes
+ * @size: transfer size in bytes
+ * @cyclic: when set indicates that the DMA transfer is cyclic
+ */
+struct rcar_dmac_desc {
+	struct dma_async_tx_descriptor async_tx;
+	enum dma_transfer_direction direction;
+	unsigned int xfer_shift;
+	u32 chcr;
+
+	struct list_head node;
+	struct list_head chunks;
+	struct rcar_dmac_xfer_chunk *running;
+	unsigned int nchunks;
+
+	struct {
+		bool use;
+		struct rcar_dmac_hw_desc *mem;
+		dma_addr_t dma;
+		size_t size;
+	} hwdescs;
+
+	unsigned int size;
+	bool cyclic;
+};
+
+#define to_rcar_dmac_desc(d)	container_of(d, struct rcar_dmac_desc, async_tx)
+
+/*
+ * struct rcar_dmac_desc_page - One page worth of descriptors
+ * @node: entry in the channel's pages list
+ * @descs: array of DMA descriptors
+ * @chunks: array of transfer chunk descriptors
+ */
+struct rcar_dmac_desc_page {
+	struct list_head node;
+
+	union {
+		struct rcar_dmac_desc descs[0];
+		struct rcar_dmac_xfer_chunk chunks[0];
+	};
+};
+
+#define RCAR_DMAC_DESCS_PER_PAGE					\
+	((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) /	\
+	sizeof(struct rcar_dmac_desc))
+#define RCAR_DMAC_XFER_CHUNKS_PER_PAGE					\
+	((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) /	\
+	sizeof(struct rcar_dmac_xfer_chunk))
+
+/*
+ * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel
+ * @chan: base DMA channel object
+ * @iomem: channel I/O memory base
+ * @index: index of this channel in the controller
+ * @src_xfer_size: size (in bytes) of hardware transfers on the source side
+ * @dst_xfer_size: size (in bytes) of hardware transfers on the destination side
+ * @src_slave_addr: slave source memory address
+ * @dst_slave_addr: slave destination memory address
+ * @mid_rid: hardware MID/RID for the DMA client using this channel
+ * @lock: protects the channel CHCR register and the desc members
+ * @desc.free: list of free descriptors
+ * @desc.pending: list of pending descriptors (submitted with tx_submit)
+ * @desc.active: list of active descriptors (activated with issue_pending)
+ * @desc.done: list of completed descriptors
+ * @desc.wait: list of descriptors waiting for an ack
+ * @desc.running: the descriptor being processed (a member of the active list)
+ * @desc.chunks_free: list of free transfer chunk descriptors
+ * @desc.pages: list of pages used by allocated descriptors
+ */
+struct rcar_dmac_chan {
+	struct dma_chan chan;
+	void __iomem *iomem;
+	unsigned int index;
+
+	unsigned int src_xfer_size;
+	unsigned int dst_xfer_size;
+	dma_addr_t src_slave_addr;
+	dma_addr_t dst_slave_addr;
+	int mid_rid;
+
+	spinlock_t lock;
+
+	struct {
+		struct list_head free;
+		struct list_head pending;
+		struct list_head active;
+		struct list_head done;
+		struct list_head wait;
+		struct rcar_dmac_desc *running;
+
+		struct list_head chunks_free;
+
+		struct list_head pages;
+	} desc;
+};
+
+#define to_rcar_dmac_chan(c)	container_of(c, struct rcar_dmac_chan, chan)
+
+/*
+ * struct rcar_dmac - R-Car Gen2 DMA Controller
+ * @engine: base DMA engine object
+ * @dev: the hardware device
+ * @iomem: remapped I/O memory base
+ * @n_channels: number of available channels
+ * @channels: array of DMAC channels
+ * @modules: bitmask of client modules in use
+ */
+struct rcar_dmac {
+	struct dma_device engine;
+	struct device *dev;
+	void __iomem *iomem;
+
+	unsigned int n_channels;
+	struct rcar_dmac_chan *channels;
+
+	unsigned long modules[256 / BITS_PER_LONG];
+};
+
+#define to_rcar_dmac(d)		container_of(d, struct rcar_dmac, engine)
+
+/* -----------------------------------------------------------------------------
+ * Registers
+ */
+
+#define RCAR_DMAC_CHAN_OFFSET(i)	(0x8000 + 0x80 * (i))
+
+#define RCAR_DMAISTA			0x0020
+#define RCAR_DMASEC			0x0030
+#define RCAR_DMAOR			0x0060
+#define RCAR_DMAOR_PRI_FIXED		(0 << 8)
+#define RCAR_DMAOR_PRI_ROUND_ROBIN	(3 << 8)
+#define RCAR_DMAOR_AE			(1 << 2)
+#define RCAR_DMAOR_DME			(1 << 0)
+#define RCAR_DMACHCLR			0x0080
+#define RCAR_DMADPSEC			0x00a0
+
+#define RCAR_DMASAR			0x0000
+#define RCAR_DMADAR			0x0004
+#define RCAR_DMATCR			0x0008
+#define RCAR_DMATCR_MASK		0x00ffffff
+#define RCAR_DMATSR			0x0028
+#define RCAR_DMACHCR			0x000c
+#define RCAR_DMACHCR_CAE		(1 << 31)
+#define RCAR_DMACHCR_CAIE		(1 << 30)
+#define RCAR_DMACHCR_DPM_DISABLED	(0 << 28)
+#define RCAR_DMACHCR_DPM_ENABLED	(1 << 28)
+#define RCAR_DMACHCR_DPM_REPEAT		(2 << 28)
+#define RCAR_DMACHCR_DPM_INFINITE	(3 << 28)
+#define RCAR_DMACHCR_RPT_SAR		(1 << 27)
+#define RCAR_DMACHCR_RPT_DAR		(1 << 26)
+#define RCAR_DMACHCR_RPT_TCR		(1 << 25)
+#define RCAR_DMACHCR_DPB		(1 << 22)
+#define RCAR_DMACHCR_DSE		(1 << 19)
+#define RCAR_DMACHCR_DSIE		(1 << 18)
+#define RCAR_DMACHCR_TS_1B		((0 << 20) | (0 << 3))
+#define RCAR_DMACHCR_TS_2B		((0 << 20) | (1 << 3))
+#define RCAR_DMACHCR_TS_4B		((0 << 20) | (2 << 3))
+#define RCAR_DMACHCR_TS_16B		((0 << 20) | (3 << 3))
+#define RCAR_DMACHCR_TS_32B		((1 << 20) | (0 << 3))
+#define RCAR_DMACHCR_TS_64B		((1 << 20) | (1 << 3))
+#define RCAR_DMACHCR_TS_8B		((1 << 20) | (3 << 3))
+#define RCAR_DMACHCR_DM_FIXED		(0 << 14)
+#define RCAR_DMACHCR_DM_INC		(1 << 14)
+#define RCAR_DMACHCR_DM_DEC		(2 << 14)
+#define RCAR_DMACHCR_SM_FIXED		(0 << 12)
+#define RCAR_DMACHCR_SM_INC		(1 << 12)
+#define RCAR_DMACHCR_SM_DEC		(2 << 12)
+#define RCAR_DMACHCR_RS_AUTO		(4 << 8)
+#define RCAR_DMACHCR_RS_DMARS		(8 << 8)
+#define RCAR_DMACHCR_IE			(1 << 2)
+#define RCAR_DMACHCR_TE			(1 << 1)
+#define RCAR_DMACHCR_DE			(1 << 0)
+#define RCAR_DMATCRB			0x0018
+#define RCAR_DMATSRB			0x0038
+#define RCAR_DMACHCRB			0x001c
+#define RCAR_DMACHCRB_DCNT(n)		((n) << 24)
+#define RCAR_DMACHCRB_DPTR_MASK		(0xff << 16)
+#define RCAR_DMACHCRB_DPTR_SHIFT	16
+#define RCAR_DMACHCRB_DRST		(1 << 15)
+#define RCAR_DMACHCRB_DTS		(1 << 8)
+#define RCAR_DMACHCRB_SLM_NORMAL	(0 << 4)
+#define RCAR_DMACHCRB_SLM_CLK(n)	((8 | (n)) << 4)
+#define RCAR_DMACHCRB_PRI(n)		((n) << 0)
+#define RCAR_DMARS			0x0040
+#define RCAR_DMABUFCR			0x0048
+#define RCAR_DMABUFCR_MBU(n)		((n) << 16)
+#define RCAR_DMABUFCR_ULB(n)		((n) << 0)
+#define RCAR_DMADPBASE			0x0050
+#define RCAR_DMADPBASE_MASK		0xfffffff0
+#define RCAR_DMADPBASE_SEL		(1 << 0)
+#define RCAR_DMADPCR			0x0054
+#define RCAR_DMADPCR_DIPT(n)		((n) << 24)
+#define RCAR_DMAFIXSAR			0x0010
+#define RCAR_DMAFIXDAR			0x0014
+#define RCAR_DMAFIXDPBASE		0x0060
+
+/* Hardcode the MEMCPY transfer size to 4 bytes. */
+#define RCAR_DMAC_MEMCPY_XFER_SIZE	4
+
+/* -----------------------------------------------------------------------------
+ * Device access
+ */
+
+static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data)
+{
+	if (reg == RCAR_DMAOR)
+		writew(data, dmac->iomem + reg);
+	else
+		writel(data, dmac->iomem + reg);
+}
+
+static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg)
+{
+	if (reg == RCAR_DMAOR)
+		return readw(dmac->iomem + reg);
+	else
+		return readl(dmac->iomem + reg);
+}
+
+static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg)
+{
+	if (reg == RCAR_DMARS)
+		return readw(chan->iomem + reg);
+	else
+		return readl(chan->iomem + reg);
+}
+
+static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data)
+{
+	if (reg == RCAR_DMARS)
+		writew(data, chan->iomem + reg);
+	else
+		writel(data, chan->iomem + reg);
+}
+
+/* -----------------------------------------------------------------------------
+ * Initialization and configuration
+ */
+
+static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan)
+{
+	u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
+
+	return (chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE)) == RCAR_DMACHCR_DE;
+}
+
+static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
+{
+	struct rcar_dmac_desc *desc = chan->desc.running;
+	u32 chcr = desc->chcr;
+
+	WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan));
+
+	if (chan->mid_rid >= 0)
+		rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid);
+
+	if (desc->hwdescs.use) {
+		struct rcar_dmac_xfer_chunk *chunk;
+
+		dev_dbg(chan->chan.device->dev,
+			"chan%u: queue desc %p: %u@%pad\n",
+			chan->index, desc, desc->nchunks, &desc->hwdescs.dma);
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+		rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE,
+				     desc->hwdescs.dma >> 32);
+#endif
+		rcar_dmac_chan_write(chan, RCAR_DMADPBASE,
+				     (desc->hwdescs.dma & 0xfffffff0) |
+				     RCAR_DMADPBASE_SEL);
+		rcar_dmac_chan_write(chan, RCAR_DMACHCRB,
+				     RCAR_DMACHCRB_DCNT(desc->nchunks - 1) |
+				     RCAR_DMACHCRB_DRST);
+
+		/*
+		 * Errata: When descriptor memory is accessed through an IOMMU
+		 * the DMADAR register isn't initialized automatically from the
+		 * first descriptor at beginning of transfer by the DMAC like it
+		 * should. Initialize it manually with the destination address
+		 * of the first chunk.
+		 */
+		chunk = list_first_entry(&desc->chunks,
+					 struct rcar_dmac_xfer_chunk, node);
+		rcar_dmac_chan_write(chan, RCAR_DMADAR,
+				     chunk->dst_addr & 0xffffffff);
+
+		/*
+		 * Program the descriptor stage interrupt to occur after the end
+		 * of the first stage.
+		 */
+		rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(1));
+
+		chcr |= RCAR_DMACHCR_RPT_SAR | RCAR_DMACHCR_RPT_DAR
+		     |  RCAR_DMACHCR_RPT_TCR | RCAR_DMACHCR_DPB;
+
+		/*
+		 * If the descriptor isn't cyclic enable normal descriptor mode
+		 * and the transfer completion interrupt.
+		 */
+		if (!desc->cyclic)
+			chcr |= RCAR_DMACHCR_DPM_ENABLED | RCAR_DMACHCR_IE;
+		/*
+		 * If the descriptor is cyclic and has a callback enable the
+		 * descriptor stage interrupt in infinite repeat mode.
+		 */
+		else if (desc->async_tx.callback)
+			chcr |= RCAR_DMACHCR_DPM_INFINITE | RCAR_DMACHCR_DSIE;
+		/*
+		 * Otherwise just select infinite repeat mode without any
+		 * interrupt.
+		 */
+		else
+			chcr |= RCAR_DMACHCR_DPM_INFINITE;
+	} else {
+		struct rcar_dmac_xfer_chunk *chunk = desc->running;
+
+		dev_dbg(chan->chan.device->dev,
+			"chan%u: queue chunk %p: %u@%pad -> %pad\n",
+			chan->index, chunk, chunk->size, &chunk->src_addr,
+			&chunk->dst_addr);
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+		rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
+				     chunk->src_addr >> 32);
+		rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
+				     chunk->dst_addr >> 32);
+#endif
+		rcar_dmac_chan_write(chan, RCAR_DMASAR,
+				     chunk->src_addr & 0xffffffff);
+		rcar_dmac_chan_write(chan, RCAR_DMADAR,
+				     chunk->dst_addr & 0xffffffff);
+		rcar_dmac_chan_write(chan, RCAR_DMATCR,
+				     chunk->size >> desc->xfer_shift);
+
+		chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE;
+	}
+
+	rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr | RCAR_DMACHCR_DE);
+}
+
+static int rcar_dmac_init(struct rcar_dmac *dmac)
+{
+	u16 dmaor;
+
+	/* Clear all channels and enable the DMAC globally. */
+	rcar_dmac_write(dmac, RCAR_DMACHCLR, 0x7fff);
+	rcar_dmac_write(dmac, RCAR_DMAOR,
+			RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
+
+	dmaor = rcar_dmac_read(dmac, RCAR_DMAOR);
+	if ((dmaor & (RCAR_DMAOR_AE | RCAR_DMAOR_DME)) != RCAR_DMAOR_DME) {
+		dev_warn(dmac->dev, "DMAOR initialization failed.\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Descriptors submission
+ */
+
+static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan);
+	struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx);
+	unsigned long flags;
+	dma_cookie_t cookie;
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	cookie = dma_cookie_assign(tx);
+
+	dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n",
+		chan->index, tx->cookie, desc);
+
+	list_add_tail(&desc->node, &chan->desc.pending);
+	desc->running = list_first_entry(&desc->chunks,
+					 struct rcar_dmac_xfer_chunk, node);
+
+	spin_unlock_irqrestore(&chan->lock, flags);
+
+	return cookie;
+}
+
+/* -----------------------------------------------------------------------------
+ * Descriptors allocation and free
+ */
+
+/*
+ * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors
+ * @chan: the DMA channel
+ * @gfp: allocation flags
+ */
+static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
+{
+	struct rcar_dmac_desc_page *page;
+	LIST_HEAD(list);
+	unsigned int i;
+
+	page = (void *)get_zeroed_page(gfp);
+	if (!page)
+		return -ENOMEM;
+
+	for (i = 0; i < RCAR_DMAC_DESCS_PER_PAGE; ++i) {
+		struct rcar_dmac_desc *desc = &page->descs[i];
+
+		dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
+		desc->async_tx.tx_submit = rcar_dmac_tx_submit;
+		INIT_LIST_HEAD(&desc->chunks);
+
+		list_add_tail(&desc->node, &list);
+	}
+
+	spin_lock_irq(&chan->lock);
+	list_splice_tail(&list, &chan->desc.free);
+	list_add_tail(&page->node, &chan->desc.pages);
+	spin_unlock_irq(&chan->lock);
+
+	return 0;
+}
+
+/*
+ * rcar_dmac_desc_put - Release a DMA transfer descriptor
+ * @chan: the DMA channel
+ * @desc: the descriptor
+ *
+ * Put the descriptor and its transfer chunk descriptors back in the channel's
+ * free descriptors lists. The descriptor's chunks list will be reinitialized to
+ * an empty list as a result.
+ *
+ * The descriptor must have been removed from the channel's lists before calling
+ * this function.
+ */
+static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan,
+			       struct rcar_dmac_desc *desc)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&chan->lock, flags);
+	list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free);
+	list_add_tail(&desc->node, &chan->desc.free);
+	spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
+{
+	struct rcar_dmac_desc *desc, *_desc;
+	LIST_HEAD(list);
+
+	/*
+	 * We have to temporarily move all descriptors from the wait list to a
+	 * local list as iterating over the wait list, even with
+	 * list_for_each_entry_safe, isn't safe if we release the channel lock
+	 * around the rcar_dmac_desc_put() call.
+	 */
+	spin_lock_irq(&chan->lock);
+	list_splice_init(&chan->desc.wait, &list);
+	spin_unlock_irq(&chan->lock);
+
+	list_for_each_entry_safe(desc, _desc, &list, node) {
+		if (async_tx_test_ack(&desc->async_tx)) {
+			list_del(&desc->node);
+			rcar_dmac_desc_put(chan, desc);
+		}
+	}
+
+	if (list_empty(&list))
+		return;
+
+	/* Put the remaining descriptors back in the wait list. */
+	spin_lock_irq(&chan->lock);
+	list_splice(&list, &chan->desc.wait);
+	spin_unlock_irq(&chan->lock);
+}
+
+/*
+ * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer
+ * @chan: the DMA channel
+ *
+ * Locking: This function must be called in a non-atomic context.
+ *
+ * Return: A pointer to the allocated descriptor or NULL if no descriptor can
+ * be allocated.
+ */
+static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan)
+{
+	struct rcar_dmac_desc *desc;
+	int ret;
+
+	/* Recycle acked descriptors before attempting allocation. */
+	rcar_dmac_desc_recycle_acked(chan);
+
+	spin_lock_irq(&chan->lock);
+
+	while (list_empty(&chan->desc.free)) {
+		/*
+		 * No free descriptors, allocate a page worth of them and try
+		 * again, as someone else could race us to get the newly
+		 * allocated descriptors. If the allocation fails return an
+		 * error.
+		 */
+		spin_unlock_irq(&chan->lock);
+		ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT);
+		if (ret < 0)
+			return NULL;
+		spin_lock_irq(&chan->lock);
+	}
+
+	desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node);
+	list_del(&desc->node);
+
+	spin_unlock_irq(&chan->lock);
+
+	return desc;
+}
+
+/*
+ * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks
+ * @chan: the DMA channel
+ * @gfp: allocation flags
+ */
+static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
+{
+	struct rcar_dmac_desc_page *page;
+	LIST_HEAD(list);
+	unsigned int i;
+
+	page = (void *)get_zeroed_page(gfp);
+	if (!page)
+		return -ENOMEM;
+
+	for (i = 0; i < RCAR_DMAC_XFER_CHUNKS_PER_PAGE; ++i) {
+		struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i];
+
+		list_add_tail(&chunk->node, &list);
+	}
+
+	spin_lock_irq(&chan->lock);
+	list_splice_tail(&list, &chan->desc.chunks_free);
+	list_add_tail(&page->node, &chan->desc.pages);
+	spin_unlock_irq(&chan->lock);
+
+	return 0;
+}
+
+/*
+ * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer
+ * @chan: the DMA channel
+ *
+ * Locking: This function must be called in a non-atomic context.
+ *
+ * Return: A pointer to the allocated transfer chunk descriptor or NULL if no
+ * descriptor can be allocated.
+ */
+static struct rcar_dmac_xfer_chunk *
+rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan)
+{
+	struct rcar_dmac_xfer_chunk *chunk;
+	int ret;
+
+	spin_lock_irq(&chan->lock);
+
+	while (list_empty(&chan->desc.chunks_free)) {
+		/*
+		 * No free descriptors, allocate a page worth of them and try
+		 * again, as someone else could race us to get the newly
+		 * allocated descriptors. If the allocation fails return an
+		 * error.
+		 */
+		spin_unlock_irq(&chan->lock);
+		ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT);
+		if (ret < 0)
+			return NULL;
+		spin_lock_irq(&chan->lock);
+	}
+
+	chunk = list_first_entry(&chan->desc.chunks_free,
+				 struct rcar_dmac_xfer_chunk, node);
+	list_del(&chunk->node);
+
+	spin_unlock_irq(&chan->lock);
+
+	return chunk;
+}
+
+static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan,
+				     struct rcar_dmac_desc *desc, size_t size)
+{
+	/*
+	 * dma_alloc_coherent() allocates memory in page size increments. To
+	 * avoid reallocating the hardware descriptors when the allocated size
+	 * wouldn't change align the requested size to a multiple of the page
+	 * size.
+	 */
+	size = PAGE_ALIGN(size);
+
+	if (desc->hwdescs.size == size)
+		return;
+
+	if (desc->hwdescs.mem) {
+		dma_free_coherent(chan->chan.device->dev, desc->hwdescs.size,
+				  desc->hwdescs.mem, desc->hwdescs.dma);
+		desc->hwdescs.mem = NULL;
+		desc->hwdescs.size = 0;
+	}
+
+	if (!size)
+		return;
+
+	desc->hwdescs.mem = dma_alloc_coherent(chan->chan.device->dev, size,
+					       &desc->hwdescs.dma, GFP_NOWAIT);
+	if (!desc->hwdescs.mem)
+		return;
+
+	desc->hwdescs.size = size;
+}
+
+static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan,
+				 struct rcar_dmac_desc *desc)
+{
+	struct rcar_dmac_xfer_chunk *chunk;
+	struct rcar_dmac_hw_desc *hwdesc;
+
+	rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc));
+
+	hwdesc = desc->hwdescs.mem;
+	if (!hwdesc)
+		return -ENOMEM;
+
+	list_for_each_entry(chunk, &desc->chunks, node) {
+		hwdesc->sar = chunk->src_addr;
+		hwdesc->dar = chunk->dst_addr;
+		hwdesc->tcr = chunk->size >> desc->xfer_shift;
+		hwdesc++;
+	}
+
+	return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Stop and reset
+ */
+
+static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
+{
+	u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
+
+	chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE |
+		  RCAR_DMACHCR_TE | RCAR_DMACHCR_DE);
+	rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
+}
+
+static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
+{
+	struct rcar_dmac_desc *desc, *_desc;
+	unsigned long flags;
+	LIST_HEAD(descs);
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	/* Move all non-free descriptors to the local lists. */
+	list_splice_init(&chan->desc.pending, &descs);
+	list_splice_init(&chan->desc.active, &descs);
+	list_splice_init(&chan->desc.done, &descs);
+	list_splice_init(&chan->desc.wait, &descs);
+
+	chan->desc.running = NULL;
+
+	spin_unlock_irqrestore(&chan->lock, flags);
+
+	list_for_each_entry_safe(desc, _desc, &descs, node) {
+		list_del(&desc->node);
+		rcar_dmac_desc_put(chan, desc);
+	}
+}
+
+static void rcar_dmac_stop(struct rcar_dmac *dmac)
+{
+	rcar_dmac_write(dmac, RCAR_DMAOR, 0);
+}
+
+static void rcar_dmac_abort(struct rcar_dmac *dmac)
+{
+	unsigned int i;
+
+	/* Stop all channels. */
+	for (i = 0; i < dmac->n_channels; ++i) {
+		struct rcar_dmac_chan *chan = &dmac->channels[i];
+
+		/* Stop and reinitialize the channel. */
+		spin_lock(&chan->lock);
+		rcar_dmac_chan_halt(chan);
+		spin_unlock(&chan->lock);
+
+		rcar_dmac_chan_reinit(chan);
+	}
+}
+
+/* -----------------------------------------------------------------------------
+ * Descriptors preparation
+ */
+
+static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan,
+					  struct rcar_dmac_desc *desc)
+{
+	static const u32 chcr_ts[] = {
+		RCAR_DMACHCR_TS_1B, RCAR_DMACHCR_TS_2B,
+		RCAR_DMACHCR_TS_4B, RCAR_DMACHCR_TS_8B,
+		RCAR_DMACHCR_TS_16B, RCAR_DMACHCR_TS_32B,
+		RCAR_DMACHCR_TS_64B,
+	};
+
+	unsigned int xfer_size;
+	u32 chcr;
+
+	switch (desc->direction) {
+	case DMA_DEV_TO_MEM:
+		chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED
+		     | RCAR_DMACHCR_RS_DMARS;
+		xfer_size = chan->src_xfer_size;
+		break;
+
+	case DMA_MEM_TO_DEV:
+		chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC
+		     | RCAR_DMACHCR_RS_DMARS;
+		xfer_size = chan->dst_xfer_size;
+		break;
+
+	case DMA_MEM_TO_MEM:
+	default:
+		chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_INC
+		     | RCAR_DMACHCR_RS_AUTO;
+		xfer_size = RCAR_DMAC_MEMCPY_XFER_SIZE;
+		break;
+	}
+
+	desc->xfer_shift = ilog2(xfer_size);
+	desc->chcr = chcr | chcr_ts[desc->xfer_shift];
+}
+
+/*
+ * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list
+ *
+ * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
+ * converted to scatter-gather to guarantee consistent locking and a correct
+ * list manipulation. For slave DMA direction carries the usual meaning, and,
+ * logically, the SG list is RAM and the addr variable contains slave address,
+ * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
+ * and the SG list contains only one element and points at the source buffer.
+ */
+static struct dma_async_tx_descriptor *
+rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
+		       unsigned int sg_len, dma_addr_t dev_addr,
+		       enum dma_transfer_direction dir, unsigned long dma_flags,
+		       bool cyclic)
+{
+	struct rcar_dmac_xfer_chunk *chunk;
+	struct rcar_dmac_desc *desc;
+	struct scatterlist *sg;
+	unsigned int nchunks = 0;
+	unsigned int max_chunk_size;
+	unsigned int full_size = 0;
+	bool highmem = false;
+	unsigned int i;
+
+	desc = rcar_dmac_desc_get(chan);
+	if (!desc)
+		return NULL;
+
+	desc->async_tx.flags = dma_flags;
+	desc->async_tx.cookie = -EBUSY;
+
+	desc->cyclic = cyclic;
+	desc->direction = dir;
+
+	rcar_dmac_chan_configure_desc(chan, desc);
+
+	max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift;
+
+	/*
+	 * Allocate and fill the transfer chunk descriptors. We own the only
+	 * reference to the DMA descriptor, there's no need for locking.
+	 */
+	for_each_sg(sgl, sg, sg_len, i) {
+		dma_addr_t mem_addr = sg_dma_address(sg);
+		unsigned int len = sg_dma_len(sg);
+
+		full_size += len;
+
+		while (len) {
+			unsigned int size = min(len, max_chunk_size);
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+			/*
+			 * Prevent individual transfers from crossing 4GB
+			 * boundaries.
+			 */
+			if (dev_addr >> 32 != (dev_addr + size - 1) >> 32)
+				size = ALIGN(dev_addr, 1ULL << 32) - dev_addr;
+			if (mem_addr >> 32 != (mem_addr + size - 1) >> 32)
+				size = ALIGN(mem_addr, 1ULL << 32) - mem_addr;
+
+			/*
+			 * Check if either of the source or destination address
+			 * can't be expressed in 32 bits. If so we can't use
+			 * hardware descriptor lists.
+			 */
+			if (dev_addr >> 32 || mem_addr >> 32)
+				highmem = true;
+#endif
+
+			chunk = rcar_dmac_xfer_chunk_get(chan);
+			if (!chunk) {
+				rcar_dmac_desc_put(chan, desc);
+				return NULL;
+			}
+
+			if (dir == DMA_DEV_TO_MEM) {
+				chunk->src_addr = dev_addr;
+				chunk->dst_addr = mem_addr;
+			} else {
+				chunk->src_addr = mem_addr;
+				chunk->dst_addr = dev_addr;
+			}
+
+			chunk->size = size;
+
+			dev_dbg(chan->chan.device->dev,
+				"chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n",
+				chan->index, chunk, desc, i, sg, size, len,
+				&chunk->src_addr, &chunk->dst_addr);
+
+			mem_addr += size;
+			if (dir == DMA_MEM_TO_MEM)
+				dev_addr += size;
+
+			len -= size;
+
+			list_add_tail(&chunk->node, &desc->chunks);
+			nchunks++;
+		}
+	}
+
+	desc->nchunks = nchunks;
+	desc->size = full_size;
+
+	/*
+	 * Use hardware descriptor lists if possible when more than one chunk
+	 * needs to be transferred (otherwise they don't make much sense).
+	 *
+	 * The highmem check currently covers the whole transfer. As an
+	 * optimization we could use descriptor lists for consecutive lowmem
+	 * chunks and direct manual mode for highmem chunks. Whether the
+	 * performance improvement would be significant enough compared to the
+	 * additional complexity remains to be investigated.
+	 */
+	desc->hwdescs.use = !highmem && nchunks > 1;
+	if (desc->hwdescs.use) {
+		if (rcar_dmac_fill_hwdesc(chan, desc) < 0)
+			desc->hwdescs.use = false;
+	}
+
+	return &desc->async_tx;
+}
+
+/* -----------------------------------------------------------------------------
+ * DMA engine operations
+ */
+
+static int rcar_dmac_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+	int ret;
+
+	INIT_LIST_HEAD(&rchan->desc.chunks_free);
+	INIT_LIST_HEAD(&rchan->desc.pages);
+
+	/* Preallocate descriptors. */
+	ret = rcar_dmac_xfer_chunk_alloc(rchan, GFP_KERNEL);
+	if (ret < 0)
+		return -ENOMEM;
+
+	ret = rcar_dmac_desc_alloc(rchan, GFP_KERNEL);
+	if (ret < 0)
+		return -ENOMEM;
+
+	return pm_runtime_get_sync(chan->device->dev);
+}
+
+static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
+{
+	struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+	struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
+	struct rcar_dmac_desc_page *page, *_page;
+	struct rcar_dmac_desc *desc;
+	LIST_HEAD(list);
+
+	/* Protect against ISR */
+	spin_lock_irq(&rchan->lock);
+	rcar_dmac_chan_halt(rchan);
+	spin_unlock_irq(&rchan->lock);
+
+	/* Now no new interrupts will occur */
+
+	if (rchan->mid_rid >= 0) {
+		/* The caller is holding dma_list_mutex */
+		clear_bit(rchan->mid_rid, dmac->modules);
+		rchan->mid_rid = -EINVAL;
+	}
+
+	list_splice_init(&rchan->desc.free, &list);
+	list_splice_init(&rchan->desc.pending, &list);
+	list_splice_init(&rchan->desc.active, &list);
+	list_splice_init(&rchan->desc.done, &list);
+	list_splice_init(&rchan->desc.wait, &list);
+
+	list_for_each_entry(desc, &list, node)
+		rcar_dmac_realloc_hwdesc(rchan, desc, 0);
+
+	list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) {
+		list_del(&page->node);
+		free_page((unsigned long)page);
+	}
+
+	pm_runtime_put(chan->device->dev);
+}
+
+static struct dma_async_tx_descriptor *
+rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
+			  dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+	struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+	struct scatterlist sgl;
+
+	if (!len)
+		return NULL;
+
+	sg_init_table(&sgl, 1);
+	sg_set_page(&sgl, pfn_to_page(PFN_DOWN(dma_src)), len,
+		    offset_in_page(dma_src));
+	sg_dma_address(&sgl) = dma_src;
+	sg_dma_len(&sgl) = len;
+
+	return rcar_dmac_chan_prep_sg(rchan, &sgl, 1, dma_dest,
+				      DMA_MEM_TO_MEM, flags, false);
+}
+
+static struct dma_async_tx_descriptor *
+rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+			unsigned int sg_len, enum dma_transfer_direction dir,
+			unsigned long flags, void *context)
+{
+	struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+	dma_addr_t dev_addr;
+
+	/* Someone calling slave DMA on a generic channel? */
+	if (rchan->mid_rid < 0 || !sg_len) {
+		dev_warn(chan->device->dev,
+			 "%s: bad parameter: len=%d, id=%d\n",
+			 __func__, sg_len, rchan->mid_rid);
+		return NULL;
+	}
+
+	dev_addr = dir == DMA_DEV_TO_MEM
+		 ? rchan->src_slave_addr : rchan->dst_slave_addr;
+	return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr,
+				      dir, flags, false);
+}
+
+#define RCAR_DMAC_MAX_SG_LEN	32
+
+static struct dma_async_tx_descriptor *
+rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
+			  size_t buf_len, size_t period_len,
+			  enum dma_transfer_direction dir, unsigned long flags)
+{
+	struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+	struct dma_async_tx_descriptor *desc;
+	struct scatterlist *sgl;
+	dma_addr_t dev_addr;
+	unsigned int sg_len;
+	unsigned int i;
+
+	/* Someone calling slave DMA on a generic channel? */
+	if (rchan->mid_rid < 0 || buf_len < period_len) {
+		dev_warn(chan->device->dev,
+			"%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
+			__func__, buf_len, period_len, rchan->mid_rid);
+		return NULL;
+	}
+
+	sg_len = buf_len / period_len;
+	if (sg_len > RCAR_DMAC_MAX_SG_LEN) {
+		dev_err(chan->device->dev,
+			"chan%u: sg length %d exceds limit %d",
+			rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN);
+		return NULL;
+	}
+
+	/*
+	 * Allocate the sg list dynamically as it would consume too much stack
+	 * space.
+	 */
+	sgl = kcalloc(sg_len, sizeof(*sgl), GFP_NOWAIT);
+	if (!sgl)
+		return NULL;
+
+	sg_init_table(sgl, sg_len);
+
+	for (i = 0; i < sg_len; ++i) {
+		dma_addr_t src = buf_addr + (period_len * i);
+
+		sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
+			    offset_in_page(src));
+		sg_dma_address(&sgl[i]) = src;
+		sg_dma_len(&sgl[i]) = period_len;
+	}
+
+	dev_addr = dir == DMA_DEV_TO_MEM
+		 ? rchan->src_slave_addr : rchan->dst_slave_addr;
+	desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr,
+				      dir, flags, true);
+
+	kfree(sgl);
+	return desc;
+}
+
+static int rcar_dmac_device_config(struct dma_chan *chan,
+				   struct dma_slave_config *cfg)
+{
+	struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+
+	/*
+	 * We could lock this, but you shouldn't be configuring the
+	 * channel, while using it...
+	 */
+	rchan->src_slave_addr = cfg->src_addr;
+	rchan->dst_slave_addr = cfg->dst_addr;
+	rchan->src_xfer_size = cfg->src_addr_width;
+	rchan->dst_xfer_size = cfg->dst_addr_width;
+
+	return 0;
+}
+
+static int rcar_dmac_chan_terminate_all(struct dma_chan *chan)
+{
+	struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&rchan->lock, flags);
+	rcar_dmac_chan_halt(rchan);
+	spin_unlock_irqrestore(&rchan->lock, flags);
+
+	/*
+	 * FIXME: No new interrupt can occur now, but the IRQ thread might still
+	 * be running.
+	 */
+
+	rcar_dmac_chan_reinit(rchan);
+
+	return 0;
+}
+
+static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
+					       dma_cookie_t cookie)
+{
+	struct rcar_dmac_desc *desc = chan->desc.running;
+	struct rcar_dmac_xfer_chunk *running = NULL;
+	struct rcar_dmac_xfer_chunk *chunk;
+	unsigned int residue = 0;
+	unsigned int dptr = 0;
+
+	if (!desc)
+		return 0;
+
+	/*
+	 * If the cookie doesn't correspond to the currently running transfer
+	 * then the descriptor hasn't been processed yet, and the residue is
+	 * equal to the full descriptor size.
+	 */
+	if (cookie != desc->async_tx.cookie)
+		return desc->size;
+
+	/*
+	 * In descriptor mode the descriptor running pointer is not maintained
+	 * by the interrupt handler, find the running descriptor from the
+	 * descriptor pointer field in the CHCRB register. In non-descriptor
+	 * mode just use the running descriptor pointer.
+	 */
+	if (desc->hwdescs.use) {
+		dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
+			RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
+		WARN_ON(dptr >= desc->nchunks);
+	} else {
+		running = desc->running;
+	}
+
+	/* Compute the size of all chunks still to be transferred. */
+	list_for_each_entry_reverse(chunk, &desc->chunks, node) {
+		if (chunk == running || ++dptr == desc->nchunks)
+			break;
+
+		residue += chunk->size;
+	}
+
+	/* Add the residue for the current chunk. */
+	residue += rcar_dmac_chan_read(chan, RCAR_DMATCR) << desc->xfer_shift;
+
+	return residue;
+}
+
+static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
+					   dma_cookie_t cookie,
+					   struct dma_tx_state *txstate)
+{
+	struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+	enum dma_status status;
+	unsigned long flags;
+	unsigned int residue;
+
+	status = dma_cookie_status(chan, cookie, txstate);
+	if (status == DMA_COMPLETE || !txstate)
+		return status;
+
+	spin_lock_irqsave(&rchan->lock, flags);
+	residue = rcar_dmac_chan_get_residue(rchan, cookie);
+	spin_unlock_irqrestore(&rchan->lock, flags);
+
+	dma_set_residue(txstate, residue);
+
+	return status;
+}
+
+static void rcar_dmac_issue_pending(struct dma_chan *chan)
+{
+	struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&rchan->lock, flags);
+
+	if (list_empty(&rchan->desc.pending))
+		goto done;
+
+	/* Append the pending list to the active list. */
+	list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active);
+
+	/*
+	 * If no transfer is running pick the first descriptor from the active
+	 * list and start the transfer.
+	 */
+	if (!rchan->desc.running) {
+		struct rcar_dmac_desc *desc;
+
+		desc = list_first_entry(&rchan->desc.active,
+					struct rcar_dmac_desc, node);
+		rchan->desc.running = desc;
+
+		rcar_dmac_chan_start_xfer(rchan);
+	}
+
+done:
+	spin_unlock_irqrestore(&rchan->lock, flags);
+}
+
+/* -----------------------------------------------------------------------------
+ * IRQ handling
+ */
+
+static irqreturn_t rcar_dmac_isr_desc_stage_end(struct rcar_dmac_chan *chan)
+{
+	struct rcar_dmac_desc *desc = chan->desc.running;
+	unsigned int stage;
+
+	if (WARN_ON(!desc || !desc->cyclic)) {
+		/*
+		 * This should never happen, there should always be a running
+		 * cyclic descriptor when a descriptor stage end interrupt is
+		 * triggered. Warn and return.
+		 */
+		return IRQ_NONE;
+	}
+
+	/* Program the interrupt pointer to the next stage. */
+	stage = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
+		 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
+	rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(stage));
+
+	return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan)
+{
+	struct rcar_dmac_desc *desc = chan->desc.running;
+	irqreturn_t ret = IRQ_WAKE_THREAD;
+
+	if (WARN_ON_ONCE(!desc)) {
+		/*
+		 * This should never happen, there should always be a running
+		 * descriptor when a transfer end interrupt is triggered. Warn
+		 * and return.
+		 */
+		return IRQ_NONE;
+	}
+
+	/*
+	 * The transfer end interrupt isn't generated for each chunk when using
+	 * descriptor mode. Only update the running chunk pointer in
+	 * non-descriptor mode.
+	 */
+	if (!desc->hwdescs.use) {
+		/*
+		 * If we haven't completed the last transfer chunk simply move
+		 * to the next one. Only wake the IRQ thread if the transfer is
+		 * cyclic.
+		 */
+		if (!list_is_last(&desc->running->node, &desc->chunks)) {
+			desc->running = list_next_entry(desc->running, node);
+			if (!desc->cyclic)
+				ret = IRQ_HANDLED;
+			goto done;
+		}
+
+		/*
+		 * We've completed the last transfer chunk. If the transfer is
+		 * cyclic, move back to the first one.
+		 */
+		if (desc->cyclic) {
+			desc->running =
+				list_first_entry(&desc->chunks,
+						 struct rcar_dmac_xfer_chunk,
+						 node);
+			goto done;
+		}
+	}
+
+	/* The descriptor is complete, move it to the done list. */
+	list_move_tail(&desc->node, &chan->desc.done);
+
+	/* Queue the next descriptor, if any. */
+	if (!list_empty(&chan->desc.active))
+		chan->desc.running = list_first_entry(&chan->desc.active,
+						      struct rcar_dmac_desc,
+						      node);
+	else
+		chan->desc.running = NULL;
+
+done:
+	if (chan->desc.running)
+		rcar_dmac_chan_start_xfer(chan);
+
+	return ret;
+}
+
+static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
+{
+	u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE;
+	struct rcar_dmac_chan *chan = dev;
+	irqreturn_t ret = IRQ_NONE;
+	u32 chcr;
+
+	spin_lock(&chan->lock);
+
+	chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
+	if (chcr & RCAR_DMACHCR_TE)
+		mask |= RCAR_DMACHCR_DE;
+	rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask);
+
+	if (chcr & RCAR_DMACHCR_DSE)
+		ret |= rcar_dmac_isr_desc_stage_end(chan);
+
+	if (chcr & RCAR_DMACHCR_TE)
+		ret |= rcar_dmac_isr_transfer_end(chan);
+
+	spin_unlock(&chan->lock);
+
+	return ret;
+}
+
+static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
+{
+	struct rcar_dmac_chan *chan = dev;
+	struct rcar_dmac_desc *desc;
+
+	spin_lock_irq(&chan->lock);
+
+	/* For cyclic transfers notify the user after every chunk. */
+	if (chan->desc.running && chan->desc.running->cyclic) {
+		dma_async_tx_callback callback;
+		void *callback_param;
+
+		desc = chan->desc.running;
+		callback = desc->async_tx.callback;
+		callback_param = desc->async_tx.callback_param;
+
+		if (callback) {
+			spin_unlock_irq(&chan->lock);
+			callback(callback_param);
+			spin_lock_irq(&chan->lock);
+		}
+	}
+
+	/*
+	 * Call the callback function for all descriptors on the done list and
+	 * move them to the ack wait list.
+	 */
+	while (!list_empty(&chan->desc.done)) {
+		desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc,
+					node);
+		dma_cookie_complete(&desc->async_tx);
+		list_del(&desc->node);
+
+		if (desc->async_tx.callback) {
+			spin_unlock_irq(&chan->lock);
+			/*
+			 * We own the only reference to this descriptor, we can
+			 * safely dereference it without holding the channel
+			 * lock.
+			 */
+			desc->async_tx.callback(desc->async_tx.callback_param);
+			spin_lock_irq(&chan->lock);
+		}
+
+		list_add_tail(&desc->node, &chan->desc.wait);
+	}
+
+	spin_unlock_irq(&chan->lock);
+
+	/* Recycle all acked descriptors. */
+	rcar_dmac_desc_recycle_acked(chan);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t rcar_dmac_isr_error(int irq, void *data)
+{
+	struct rcar_dmac *dmac = data;
+
+	if (!(rcar_dmac_read(dmac, RCAR_DMAOR) & RCAR_DMAOR_AE))
+		return IRQ_NONE;
+
+	/*
+	 * An unrecoverable error occurred on an unknown channel. Halt the DMAC,
+	 * abort transfers on all channels, and reinitialize the DMAC.
+	 */
+	rcar_dmac_stop(dmac);
+	rcar_dmac_abort(dmac);
+	rcar_dmac_init(dmac);
+
+	return IRQ_HANDLED;
+}
+
+/* -----------------------------------------------------------------------------
+ * OF xlate and channel filter
+ */
+
+static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg)
+{
+	struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
+	struct of_phandle_args *dma_spec = arg;
+
+	/*
+	 * FIXME: Using a filter on OF platforms is a nonsense. The OF xlate
+	 * function knows from which device it wants to allocate a channel from,
+	 * and would be perfectly capable of selecting the channel it wants.
+	 * Forcing it to call dma_request_channel() and iterate through all
+	 * channels from all controllers is just pointless.
+	 */
+	if (chan->device->device_config != rcar_dmac_device_config ||
+	    dma_spec->np != chan->device->dev->of_node)
+		return false;
+
+	return !test_and_set_bit(dma_spec->args[0], dmac->modules);
+}
+
+static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
+					   struct of_dma *ofdma)
+{
+	struct rcar_dmac_chan *rchan;
+	struct dma_chan *chan;
+	dma_cap_mask_t mask;
+
+	if (dma_spec->args_count != 1)
+		return NULL;
+
+	/* Only slave DMA channels can be allocated via DT */
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	chan = dma_request_channel(mask, rcar_dmac_chan_filter, dma_spec);
+	if (!chan)
+		return NULL;
+
+	rchan = to_rcar_dmac_chan(chan);
+	rchan->mid_rid = dma_spec->args[0];
+
+	return chan;
+}
+
+/* -----------------------------------------------------------------------------
+ * Power management
+ */
+
+#ifdef CONFIG_PM_SLEEP
+static int rcar_dmac_sleep_suspend(struct device *dev)
+{
+	/*
+	 * TODO: Wait for the current transfer to complete and stop the device.
+	 */
+	return 0;
+}
+
+static int rcar_dmac_sleep_resume(struct device *dev)
+{
+	/* TODO: Resume transfers, if any. */
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int rcar_dmac_runtime_suspend(struct device *dev)
+{
+	return 0;
+}
+
+static int rcar_dmac_runtime_resume(struct device *dev)
+{
+	struct rcar_dmac *dmac = dev_get_drvdata(dev);
+
+	return rcar_dmac_init(dmac);
+}
+#endif
+
+static const struct dev_pm_ops rcar_dmac_pm = {
+	SET_SYSTEM_SLEEP_PM_OPS(rcar_dmac_sleep_suspend, rcar_dmac_sleep_resume)
+	SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
+			   NULL)
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe and remove
+ */
+
+static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
+				struct rcar_dmac_chan *rchan,
+				unsigned int index)
+{
+	struct platform_device *pdev = to_platform_device(dmac->dev);
+	struct dma_chan *chan = &rchan->chan;
+	char pdev_irqname[5];
+	char *irqname;
+	int irq;
+	int ret;
+
+	rchan->index = index;
+	rchan->iomem = dmac->iomem + RCAR_DMAC_CHAN_OFFSET(index);
+	rchan->mid_rid = -EINVAL;
+
+	spin_lock_init(&rchan->lock);
+
+	INIT_LIST_HEAD(&rchan->desc.free);
+	INIT_LIST_HEAD(&rchan->desc.pending);
+	INIT_LIST_HEAD(&rchan->desc.active);
+	INIT_LIST_HEAD(&rchan->desc.done);
+	INIT_LIST_HEAD(&rchan->desc.wait);
+
+	/* Request the channel interrupt. */
+	sprintf(pdev_irqname, "ch%u", index);
+	irq = platform_get_irq_byname(pdev, pdev_irqname);
+	if (irq < 0) {
+		dev_err(dmac->dev, "no IRQ specified for channel %u\n", index);
+		return -ENODEV;
+	}
+
+	irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
+				 dev_name(dmac->dev), index);
+	if (!irqname)
+		return -ENOMEM;
+
+	ret = devm_request_threaded_irq(dmac->dev, irq, rcar_dmac_isr_channel,
+					rcar_dmac_isr_channel_thread, 0,
+					irqname, rchan);
+	if (ret) {
+		dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", irq, ret);
+		return ret;
+	}
+
+	/*
+	 * Initialize the DMA engine channel and add it to the DMA engine
+	 * channels list.
+	 */
+	chan->device = &dmac->engine;
+	dma_cookie_init(chan);
+
+	list_add_tail(&chan->device_node, &dmac->engine.channels);
+
+	return 0;
+}
+
+static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
+{
+	struct device_node *np = dev->of_node;
+	int ret;
+
+	ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
+	if (ret < 0) {
+		dev_err(dev, "unable to read dma-channels property\n");
+		return ret;
+	}
+
+	if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
+		dev_err(dev, "invalid number of channels %u\n",
+			dmac->n_channels);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int rcar_dmac_probe(struct platform_device *pdev)
+{
+	const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE |
+		DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
+		DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
+		DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
+	unsigned int channels_offset = 0;
+	struct dma_device *engine;
+	struct rcar_dmac *dmac;
+	struct resource *mem;
+	unsigned int i;
+	char *irqname;
+	int irq;
+	int ret;
+
+	dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
+	if (!dmac)
+		return -ENOMEM;
+
+	dmac->dev = &pdev->dev;
+	platform_set_drvdata(pdev, dmac);
+
+	ret = rcar_dmac_parse_of(&pdev->dev, dmac);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * A still unconfirmed hardware bug prevents the IPMMU microTLB 0 to be
+	 * flushed correctly, resulting in memory corruption. DMAC 0 channel 0
+	 * is connected to microTLB 0 on currently supported platforms, so we
+	 * can't use it with the IPMMU. As the IOMMU API operates at the device
+	 * level we can't disable it selectively, so ignore channel 0 for now if
+	 * the device is part of an IOMMU group.
+	 */
+	if (pdev->dev.iommu_group) {
+		dmac->n_channels--;
+		channels_offset = 1;
+	}
+
+	dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
+				      sizeof(*dmac->channels), GFP_KERNEL);
+	if (!dmac->channels)
+		return -ENOMEM;
+
+	/* Request resources. */
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(dmac->iomem))
+		return PTR_ERR(dmac->iomem);
+
+	irq = platform_get_irq_byname(pdev, "error");
+	if (irq < 0) {
+		dev_err(&pdev->dev, "no error IRQ specified\n");
+		return -ENODEV;
+	}
+
+	irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:error",
+				 dev_name(dmac->dev));
+	if (!irqname)
+		return -ENOMEM;
+
+	ret = devm_request_irq(&pdev->dev, irq, rcar_dmac_isr_error, 0,
+			       irqname, dmac);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n",
+			irq, ret);
+		return ret;
+	}
+
+	/* Enable runtime PM and initialize the device. */
+	pm_runtime_enable(&pdev->dev);
+	ret = pm_runtime_get_sync(&pdev->dev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
+		return ret;
+	}
+
+	ret = rcar_dmac_init(dmac);
+	pm_runtime_put(&pdev->dev);
+
+	if (ret) {
+		dev_err(&pdev->dev, "failed to reset device\n");
+		goto error;
+	}
+
+	/* Initialize the channels. */
+	INIT_LIST_HEAD(&dmac->engine.channels);
+
+	for (i = 0; i < dmac->n_channels; ++i) {
+		ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i],
+					   i + channels_offset);
+		if (ret < 0)
+			goto error;
+	}
+
+	/* Register the DMAC as a DMA provider for DT. */
+	ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate,
+					 NULL);
+	if (ret < 0)
+		goto error;
+
+	/*
+	 * Register the DMA engine device.
+	 *
+	 * Default transfer size of 32 bytes requires 32-byte alignment.
+	 */
+	engine = &dmac->engine;
+	dma_cap_set(DMA_MEMCPY, engine->cap_mask);
+	dma_cap_set(DMA_SLAVE, engine->cap_mask);
+
+	engine->dev = &pdev->dev;
+	engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE);
+
+	engine->src_addr_widths = widths;
+	engine->dst_addr_widths = widths;
+	engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+	engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+	engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources;
+	engine->device_free_chan_resources = rcar_dmac_free_chan_resources;
+	engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy;
+	engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg;
+	engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic;
+	engine->device_config = rcar_dmac_device_config;
+	engine->device_terminate_all = rcar_dmac_chan_terminate_all;
+	engine->device_tx_status = rcar_dmac_tx_status;
+	engine->device_issue_pending = rcar_dmac_issue_pending;
+
+	ret = dma_async_device_register(engine);
+	if (ret < 0)
+		goto error;
+
+	return 0;
+
+error:
+	of_dma_controller_free(pdev->dev.of_node);
+	pm_runtime_disable(&pdev->dev);
+	return ret;
+}
+
+static int rcar_dmac_remove(struct platform_device *pdev)
+{
+	struct rcar_dmac *dmac = platform_get_drvdata(pdev);
+
+	of_dma_controller_free(pdev->dev.of_node);
+	dma_async_device_unregister(&dmac->engine);
+
+	pm_runtime_disable(&pdev->dev);
+
+	return 0;
+}
+
+static void rcar_dmac_shutdown(struct platform_device *pdev)
+{
+	struct rcar_dmac *dmac = platform_get_drvdata(pdev);
+
+	rcar_dmac_stop(dmac);
+}
+
+static const struct of_device_id rcar_dmac_of_ids[] = {
+	{ .compatible = "renesas,rcar-dmac", },
+	{ /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids);
+
+static struct platform_driver rcar_dmac_driver = {
+	.driver		= {
+		.pm	= &rcar_dmac_pm,
+		.name	= "rcar-dmac",
+		.of_match_table = rcar_dmac_of_ids,
+	},
+	.probe		= rcar_dmac_probe,
+	.remove		= rcar_dmac_remove,
+	.shutdown	= rcar_dmac_shutdown,
+};
+
+module_platform_driver(rcar_dmac_driver);
+
+MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver");
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_LICENSE("GPL v2");

+ 6 - 0
drivers/dma/sh/rcar-hpbdma.c

@@ -534,6 +534,8 @@ static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id)
 
 
 static int hpb_dmae_probe(struct platform_device *pdev)
 static int hpb_dmae_probe(struct platform_device *pdev)
 {
 {
+	const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE |
+		DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES;
 	struct hpb_dmae_pdata *pdata = pdev->dev.platform_data;
 	struct hpb_dmae_pdata *pdata = pdev->dev.platform_data;
 	struct hpb_dmae_device *hpbdev;
 	struct hpb_dmae_device *hpbdev;
 	struct dma_device *dma_dev;
 	struct dma_device *dma_dev;
@@ -595,6 +597,10 @@ static int hpb_dmae_probe(struct platform_device *pdev)
 
 
 	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
 	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
 	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
 	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+	dma_dev->src_addr_widths = widths;
+	dma_dev->dst_addr_widths = widths;
+	dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
 
 
 	hpbdev->shdma_dev.ops = &hpb_dmae_ops;
 	hpbdev->shdma_dev.ops = &hpb_dmae_ops;
 	hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc);
 	hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc);

+ 33 - 39
drivers/dma/sh/shdma-base.c

@@ -729,57 +729,50 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
 	return desc;
 	return desc;
 }
 }
 
 
-static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-			  unsigned long arg)
+static int shdma_terminate_all(struct dma_chan *chan)
 {
 {
 	struct shdma_chan *schan = to_shdma_chan(chan);
 	struct shdma_chan *schan = to_shdma_chan(chan);
 	struct shdma_dev *sdev = to_shdma_dev(chan->device);
 	struct shdma_dev *sdev = to_shdma_dev(chan->device);
 	const struct shdma_ops *ops = sdev->ops;
 	const struct shdma_ops *ops = sdev->ops;
-	struct dma_slave_config *config;
 	unsigned long flags;
 	unsigned long flags;
-	int ret;
 
 
-	switch (cmd) {
-	case DMA_TERMINATE_ALL:
-		spin_lock_irqsave(&schan->chan_lock, flags);
-		ops->halt_channel(schan);
+	spin_lock_irqsave(&schan->chan_lock, flags);
+	ops->halt_channel(schan);
 
 
-		if (ops->get_partial && !list_empty(&schan->ld_queue)) {
-			/* Record partial transfer */
-			struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
-						struct shdma_desc, node);
-			desc->partial = ops->get_partial(schan, desc);
-		}
+	if (ops->get_partial && !list_empty(&schan->ld_queue)) {
+		/* Record partial transfer */
+		struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
+							   struct shdma_desc, node);
+		desc->partial = ops->get_partial(schan, desc);
+	}
 
 
-		spin_unlock_irqrestore(&schan->chan_lock, flags);
+	spin_unlock_irqrestore(&schan->chan_lock, flags);
 
 
-		shdma_chan_ld_cleanup(schan, true);
-		break;
-	case DMA_SLAVE_CONFIG:
-		/*
-		 * So far only .slave_id is used, but the slave drivers are
-		 * encouraged to also set a transfer direction and an address.
-		 */
-		if (!arg)
-			return -EINVAL;
-		/*
-		 * We could lock this, but you shouldn't be configuring the
-		 * channel, while using it...
-		 */
-		config = (struct dma_slave_config *)arg;
-		ret = shdma_setup_slave(schan, config->slave_id,
-					config->direction == DMA_DEV_TO_MEM ?
-					config->src_addr : config->dst_addr);
-		if (ret < 0)
-			return ret;
-		break;
-	default:
-		return -ENXIO;
-	}
+	shdma_chan_ld_cleanup(schan, true);
 
 
 	return 0;
 	return 0;
 }
 }
 
 
+static int shdma_config(struct dma_chan *chan,
+			struct dma_slave_config *config)
+{
+	struct shdma_chan *schan = to_shdma_chan(chan);
+
+	/*
+	 * So far only .slave_id is used, but the slave drivers are
+	 * encouraged to also set a transfer direction and an address.
+	 */
+	if (!config)
+		return -EINVAL;
+	/*
+	 * We could lock this, but you shouldn't be configuring the
+	 * channel, while using it...
+	 */
+	return shdma_setup_slave(schan, config->slave_id,
+				 config->direction == DMA_DEV_TO_MEM ?
+				 config->src_addr : config->dst_addr);
+}
+
 static void shdma_issue_pending(struct dma_chan *chan)
 static void shdma_issue_pending(struct dma_chan *chan)
 {
 {
 	struct shdma_chan *schan = to_shdma_chan(chan);
 	struct shdma_chan *schan = to_shdma_chan(chan);
@@ -1002,7 +995,8 @@ int shdma_init(struct device *dev, struct shdma_dev *sdev,
 	/* Compulsory for DMA_SLAVE fields */
 	/* Compulsory for DMA_SLAVE fields */
 	dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
 	dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
 	dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic;
 	dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic;
-	dma_dev->device_control = shdma_control;
+	dma_dev->device_config = shdma_config;
+	dma_dev->device_terminate_all = shdma_terminate_all;
 
 
 	dma_dev->dev = dev;
 	dma_dev->dev = dev;
 
 

+ 15 - 8
drivers/dma/sh/shdmac.c

@@ -588,6 +588,7 @@ static void sh_dmae_shutdown(struct platform_device *pdev)
 	sh_dmae_ctl_stop(shdev);
 	sh_dmae_ctl_stop(shdev);
 }
 }
 
 
+#ifdef CONFIG_PM
 static int sh_dmae_runtime_suspend(struct device *dev)
 static int sh_dmae_runtime_suspend(struct device *dev)
 {
 {
 	return 0;
 	return 0;
@@ -599,8 +600,9 @@ static int sh_dmae_runtime_resume(struct device *dev)
 
 
 	return sh_dmae_rst(shdev);
 	return sh_dmae_rst(shdev);
 }
 }
+#endif
 
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int sh_dmae_suspend(struct device *dev)
 static int sh_dmae_suspend(struct device *dev)
 {
 {
 	return 0;
 	return 0;
@@ -632,16 +634,12 @@ static int sh_dmae_resume(struct device *dev)
 
 
 	return 0;
 	return 0;
 }
 }
-#else
-#define sh_dmae_suspend NULL
-#define sh_dmae_resume NULL
 #endif
 #endif
 
 
 static const struct dev_pm_ops sh_dmae_pm = {
 static const struct dev_pm_ops sh_dmae_pm = {
-	.suspend		= sh_dmae_suspend,
-	.resume			= sh_dmae_resume,
-	.runtime_suspend	= sh_dmae_runtime_suspend,
-	.runtime_resume		= sh_dmae_runtime_resume,
+	SET_SYSTEM_SLEEP_PM_OPS(sh_dmae_suspend, sh_dmae_resume)
+	SET_RUNTIME_PM_OPS(sh_dmae_runtime_suspend, sh_dmae_runtime_resume,
+			   NULL)
 };
 };
 
 
 static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
 static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
@@ -684,6 +682,10 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
 
 
 static int sh_dmae_probe(struct platform_device *pdev)
 static int sh_dmae_probe(struct platform_device *pdev)
 {
 {
+	const enum dma_slave_buswidth widths =
+		DMA_SLAVE_BUSWIDTH_1_BYTE   | DMA_SLAVE_BUSWIDTH_2_BYTES |
+		DMA_SLAVE_BUSWIDTH_4_BYTES  | DMA_SLAVE_BUSWIDTH_8_BYTES |
+		DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES;
 	const struct sh_dmae_pdata *pdata;
 	const struct sh_dmae_pdata *pdata;
 	unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {};
 	unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {};
 	int chan_irq[SH_DMAE_MAX_CHANNELS];
 	int chan_irq[SH_DMAE_MAX_CHANNELS];
@@ -746,6 +748,11 @@ static int sh_dmae_probe(struct platform_device *pdev)
 			return PTR_ERR(shdev->dmars);
 			return PTR_ERR(shdev->dmars);
 	}
 	}
 
 
+	dma_dev->src_addr_widths = widths;
+	dma_dev->dst_addr_widths = widths;
+	dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+
 	if (!pdata->slave_only)
 	if (!pdata->slave_only)
 		dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
 		dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
 	if (pdata->slave && pdata->slave_num)
 	if (pdata->slave && pdata->slave_num)

+ 16 - 43
drivers/dma/sirf-dma.c

@@ -281,9 +281,10 @@ static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
 	return cookie;
 	return cookie;
 }
 }
 
 
-static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
-	struct dma_slave_config *config)
+static int sirfsoc_dma_slave_config(struct dma_chan *chan,
+				    struct dma_slave_config *config)
 {
 {
+	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
 	unsigned long flags;
 	unsigned long flags;
 
 
 	if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
 	if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
@@ -297,8 +298,9 @@ static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
 	return 0;
 	return 0;
 }
 }
 
 
-static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
+static int sirfsoc_dma_terminate_all(struct dma_chan *chan)
 {
 {
+	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
 	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
 	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
 	int cid = schan->chan.chan_id;
 	int cid = schan->chan.chan_id;
 	unsigned long flags;
 	unsigned long flags;
@@ -327,8 +329,9 @@ static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
 	return 0;
 	return 0;
 }
 }
 
 
-static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan *schan)
+static int sirfsoc_dma_pause_chan(struct dma_chan *chan)
 {
 {
+	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
 	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
 	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
 	int cid = schan->chan.chan_id;
 	int cid = schan->chan.chan_id;
 	unsigned long flags;
 	unsigned long flags;
@@ -348,8 +351,9 @@ static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan *schan)
 	return 0;
 	return 0;
 }
 }
 
 
-static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan *schan)
+static int sirfsoc_dma_resume_chan(struct dma_chan *chan)
 {
 {
+	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
 	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
 	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
 	int cid = schan->chan.chan_id;
 	int cid = schan->chan.chan_id;
 	unsigned long flags;
 	unsigned long flags;
@@ -369,30 +373,6 @@ static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan *schan)
 	return 0;
 	return 0;
 }
 }
 
 
-static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-	unsigned long arg)
-{
-	struct dma_slave_config *config;
-	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
-
-	switch (cmd) {
-	case DMA_PAUSE:
-		return sirfsoc_dma_pause_chan(schan);
-	case DMA_RESUME:
-		return sirfsoc_dma_resume_chan(schan);
-	case DMA_TERMINATE_ALL:
-		return sirfsoc_dma_terminate_all(schan);
-	case DMA_SLAVE_CONFIG:
-		config = (struct dma_slave_config *)arg;
-		return sirfsoc_dma_slave_config(schan, config);
-
-	default:
-		break;
-	}
-
-	return -ENOSYS;
-}
-
 /* Alloc channel resources */
 /* Alloc channel resources */
 static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
 static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
 {
 {
@@ -648,18 +628,6 @@ EXPORT_SYMBOL(sirfsoc_dma_filter_id);
 	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
 	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
 	BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
 	BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
 
 
-static int sirfsoc_dma_device_slave_caps(struct dma_chan *dchan,
-	struct dma_slave_caps *caps)
-{
-	caps->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
-	caps->dstn_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
-	caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-	caps->cmd_pause = true;
-	caps->cmd_terminate = true;
-
-	return 0;
-}
-
 static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec,
 static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec,
 	struct of_dma *ofdma)
 	struct of_dma *ofdma)
 {
 {
@@ -739,11 +707,16 @@ static int sirfsoc_dma_probe(struct platform_device *op)
 	dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
 	dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
 	dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
 	dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
 	dma->device_issue_pending = sirfsoc_dma_issue_pending;
 	dma->device_issue_pending = sirfsoc_dma_issue_pending;
-	dma->device_control = sirfsoc_dma_control;
+	dma->device_config = sirfsoc_dma_slave_config;
+	dma->device_pause = sirfsoc_dma_pause_chan;
+	dma->device_resume = sirfsoc_dma_resume_chan;
+	dma->device_terminate_all = sirfsoc_dma_terminate_all;
 	dma->device_tx_status = sirfsoc_dma_tx_status;
 	dma->device_tx_status = sirfsoc_dma_tx_status;
 	dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
 	dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
 	dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
 	dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
-	dma->device_slave_caps = sirfsoc_dma_device_slave_caps;
+	dma->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
+	dma->dst_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
+	dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
 
 
 	INIT_LIST_HEAD(&dma->channels);
 	INIT_LIST_HEAD(&dma->channels);
 	dma_cap_set(DMA_SLAVE, dma->cap_mask);
 	dma_cap_set(DMA_SLAVE, dma->cap_mask);

+ 30 - 33
drivers/dma/ste_dma40.c

@@ -1429,11 +1429,17 @@ static bool d40_tx_is_linked(struct d40_chan *d40c)
 	return is_link;
 	return is_link;
 }
 }
 
 
-static int d40_pause(struct d40_chan *d40c)
+static int d40_pause(struct dma_chan *chan)
 {
 {
+	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
 	int res = 0;
 	int res = 0;
 	unsigned long flags;
 	unsigned long flags;
 
 
+	if (d40c->phy_chan == NULL) {
+		chan_err(d40c, "Channel is not allocated!\n");
+		return -EINVAL;
+	}
+
 	if (!d40c->busy)
 	if (!d40c->busy)
 		return 0;
 		return 0;
 
 
@@ -1448,11 +1454,17 @@ static int d40_pause(struct d40_chan *d40c)
 	return res;
 	return res;
 }
 }
 
 
-static int d40_resume(struct d40_chan *d40c)
+static int d40_resume(struct dma_chan *chan)
 {
 {
+	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
 	int res = 0;
 	int res = 0;
 	unsigned long flags;
 	unsigned long flags;
 
 
+	if (d40c->phy_chan == NULL) {
+		chan_err(d40c, "Channel is not allocated!\n");
+		return -EINVAL;
+	}
+
 	if (!d40c->busy)
 	if (!d40c->busy)
 		return 0;
 		return 0;
 
 
@@ -2604,12 +2616,17 @@ static void d40_issue_pending(struct dma_chan *chan)
 	spin_unlock_irqrestore(&d40c->lock, flags);
 	spin_unlock_irqrestore(&d40c->lock, flags);
 }
 }
 
 
-static void d40_terminate_all(struct dma_chan *chan)
+static int d40_terminate_all(struct dma_chan *chan)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
 	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
 	int ret;
 	int ret;
 
 
+	if (d40c->phy_chan == NULL) {
+		chan_err(d40c, "Channel is not allocated!\n");
+		return -EINVAL;
+	}
+
 	spin_lock_irqsave(&d40c->lock, flags);
 	spin_lock_irqsave(&d40c->lock, flags);
 
 
 	pm_runtime_get_sync(d40c->base->dev);
 	pm_runtime_get_sync(d40c->base->dev);
@@ -2627,6 +2644,7 @@ static void d40_terminate_all(struct dma_chan *chan)
 	d40c->busy = false;
 	d40c->busy = false;
 
 
 	spin_unlock_irqrestore(&d40c->lock, flags);
 	spin_unlock_irqrestore(&d40c->lock, flags);
+	return 0;
 }
 }
 
 
 static int
 static int
@@ -2673,6 +2691,11 @@ static int d40_set_runtime_config(struct dma_chan *chan,
 	u32 src_maxburst, dst_maxburst;
 	u32 src_maxburst, dst_maxburst;
 	int ret;
 	int ret;
 
 
+	if (d40c->phy_chan == NULL) {
+		chan_err(d40c, "Channel is not allocated!\n");
+		return -EINVAL;
+	}
+
 	src_addr_width = config->src_addr_width;
 	src_addr_width = config->src_addr_width;
 	src_maxburst = config->src_maxburst;
 	src_maxburst = config->src_maxburst;
 	dst_addr_width = config->dst_addr_width;
 	dst_addr_width = config->dst_addr_width;
@@ -2781,35 +2804,6 @@ static int d40_set_runtime_config(struct dma_chan *chan,
 	return 0;
 	return 0;
 }
 }
 
 
-static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-		       unsigned long arg)
-{
-	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
-
-	if (d40c->phy_chan == NULL) {
-		chan_err(d40c, "Channel is not allocated!\n");
-		return -EINVAL;
-	}
-
-	switch (cmd) {
-	case DMA_TERMINATE_ALL:
-		d40_terminate_all(chan);
-		return 0;
-	case DMA_PAUSE:
-		return d40_pause(d40c);
-	case DMA_RESUME:
-		return d40_resume(d40c);
-	case DMA_SLAVE_CONFIG:
-		return d40_set_runtime_config(chan,
-			(struct dma_slave_config *) arg);
-	default:
-		break;
-	}
-
-	/* Other commands are unimplemented */
-	return -ENXIO;
-}
-
 /* Initialization functions */
 /* Initialization functions */
 
 
 static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
 static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
@@ -2870,7 +2864,10 @@ static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
 	dev->device_free_chan_resources = d40_free_chan_resources;
 	dev->device_free_chan_resources = d40_free_chan_resources;
 	dev->device_issue_pending = d40_issue_pending;
 	dev->device_issue_pending = d40_issue_pending;
 	dev->device_tx_status = d40_tx_status;
 	dev->device_tx_status = d40_tx_status;
-	dev->device_control = d40_control;
+	dev->device_config = d40_set_runtime_config;
+	dev->device_pause = d40_pause;
+	dev->device_resume = d40_resume;
+	dev->device_terminate_all = d40_terminate_all;
 	dev->dev = base->dev;
 	dev->dev = base->dev;
 }
 }
 
 

+ 87 - 73
drivers/dma/sun6i-dma.c

@@ -355,38 +355,6 @@ static void sun6i_dma_free_desc(struct virt_dma_desc *vd)
 	kfree(txd);
 	kfree(txd);
 }
 }
 
 
-static int sun6i_dma_terminate_all(struct sun6i_vchan *vchan)
-{
-	struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device);
-	struct sun6i_pchan *pchan = vchan->phy;
-	unsigned long flags;
-	LIST_HEAD(head);
-
-	spin_lock(&sdev->lock);
-	list_del_init(&vchan->node);
-	spin_unlock(&sdev->lock);
-
-	spin_lock_irqsave(&vchan->vc.lock, flags);
-
-	vchan_get_all_descriptors(&vchan->vc, &head);
-
-	if (pchan) {
-		writel(DMA_CHAN_ENABLE_STOP, pchan->base + DMA_CHAN_ENABLE);
-		writel(DMA_CHAN_PAUSE_RESUME, pchan->base + DMA_CHAN_PAUSE);
-
-		vchan->phy = NULL;
-		pchan->vchan = NULL;
-		pchan->desc = NULL;
-		pchan->done = NULL;
-	}
-
-	spin_unlock_irqrestore(&vchan->vc.lock, flags);
-
-	vchan_dma_desc_free_list(&vchan->vc, &head);
-
-	return 0;
-}
-
 static int sun6i_dma_start_desc(struct sun6i_vchan *vchan)
 static int sun6i_dma_start_desc(struct sun6i_vchan *vchan)
 {
 {
 	struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device);
 	struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device);
@@ -675,57 +643,92 @@ err_lli_free:
 	return NULL;
 	return NULL;
 }
 }
 
 
-static int sun6i_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-		       unsigned long arg)
+static int sun6i_dma_config(struct dma_chan *chan,
+			    struct dma_slave_config *config)
+{
+	struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
+
+	memcpy(&vchan->cfg, config, sizeof(*config));
+
+	return 0;
+}
+
+static int sun6i_dma_pause(struct dma_chan *chan)
+{
+	struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
+	struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
+	struct sun6i_pchan *pchan = vchan->phy;
+
+	dev_dbg(chan2dev(chan), "vchan %p: pause\n", &vchan->vc);
+
+	if (pchan) {
+		writel(DMA_CHAN_PAUSE_PAUSE,
+		       pchan->base + DMA_CHAN_PAUSE);
+	} else {
+		spin_lock(&sdev->lock);
+		list_del_init(&vchan->node);
+		spin_unlock(&sdev->lock);
+	}
+
+	return 0;
+}
+
+static int sun6i_dma_resume(struct dma_chan *chan)
 {
 {
 	struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
 	struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
 	struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
 	struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
 	struct sun6i_pchan *pchan = vchan->phy;
 	struct sun6i_pchan *pchan = vchan->phy;
 	unsigned long flags;
 	unsigned long flags;
-	int ret = 0;
 
 
-	switch (cmd) {
-	case DMA_RESUME:
-		dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc);
+	dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc);
 
 
-		spin_lock_irqsave(&vchan->vc.lock, flags);
+	spin_lock_irqsave(&vchan->vc.lock, flags);
 
 
-		if (pchan) {
-			writel(DMA_CHAN_PAUSE_RESUME,
-			       pchan->base + DMA_CHAN_PAUSE);
-		} else if (!list_empty(&vchan->vc.desc_issued)) {
-			spin_lock(&sdev->lock);
-			list_add_tail(&vchan->node, &sdev->pending);
-			spin_unlock(&sdev->lock);
-		}
+	if (pchan) {
+		writel(DMA_CHAN_PAUSE_RESUME,
+		       pchan->base + DMA_CHAN_PAUSE);
+	} else if (!list_empty(&vchan->vc.desc_issued)) {
+		spin_lock(&sdev->lock);
+		list_add_tail(&vchan->node, &sdev->pending);
+		spin_unlock(&sdev->lock);
+	}
 
 
-		spin_unlock_irqrestore(&vchan->vc.lock, flags);
-		break;
+	spin_unlock_irqrestore(&vchan->vc.lock, flags);
 
 
-	case DMA_PAUSE:
-		dev_dbg(chan2dev(chan), "vchan %p: pause\n", &vchan->vc);
+	return 0;
+}
 
 
-		if (pchan) {
-			writel(DMA_CHAN_PAUSE_PAUSE,
-			       pchan->base + DMA_CHAN_PAUSE);
-		} else {
-			spin_lock(&sdev->lock);
-			list_del_init(&vchan->node);
-			spin_unlock(&sdev->lock);
-		}
-		break;
-
-	case DMA_TERMINATE_ALL:
-		ret = sun6i_dma_terminate_all(vchan);
-		break;
-	case DMA_SLAVE_CONFIG:
-		memcpy(&vchan->cfg, (void *)arg, sizeof(struct dma_slave_config));
-		break;
-	default:
-		ret = -ENXIO;
-		break;
+static int sun6i_dma_terminate_all(struct dma_chan *chan)
+{
+	struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
+	struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
+	struct sun6i_pchan *pchan = vchan->phy;
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	spin_lock(&sdev->lock);
+	list_del_init(&vchan->node);
+	spin_unlock(&sdev->lock);
+
+	spin_lock_irqsave(&vchan->vc.lock, flags);
+
+	vchan_get_all_descriptors(&vchan->vc, &head);
+
+	if (pchan) {
+		writel(DMA_CHAN_ENABLE_STOP, pchan->base + DMA_CHAN_ENABLE);
+		writel(DMA_CHAN_PAUSE_RESUME, pchan->base + DMA_CHAN_PAUSE);
+
+		vchan->phy = NULL;
+		pchan->vchan = NULL;
+		pchan->desc = NULL;
+		pchan->done = NULL;
 	}
 	}
-	return ret;
+
+	spin_unlock_irqrestore(&vchan->vc.lock, flags);
+
+	vchan_dma_desc_free_list(&vchan->vc, &head);
+
+	return 0;
 }
 }
 
 
 static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan,
 static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan,
@@ -960,9 +963,20 @@ static int sun6i_dma_probe(struct platform_device *pdev)
 	sdc->slave.device_issue_pending		= sun6i_dma_issue_pending;
 	sdc->slave.device_issue_pending		= sun6i_dma_issue_pending;
 	sdc->slave.device_prep_slave_sg		= sun6i_dma_prep_slave_sg;
 	sdc->slave.device_prep_slave_sg		= sun6i_dma_prep_slave_sg;
 	sdc->slave.device_prep_dma_memcpy	= sun6i_dma_prep_dma_memcpy;
 	sdc->slave.device_prep_dma_memcpy	= sun6i_dma_prep_dma_memcpy;
-	sdc->slave.device_control		= sun6i_dma_control;
 	sdc->slave.copy_align			= 4;
 	sdc->slave.copy_align			= 4;
-
+	sdc->slave.device_config		= sun6i_dma_config;
+	sdc->slave.device_pause			= sun6i_dma_pause;
+	sdc->slave.device_resume		= sun6i_dma_resume;
+	sdc->slave.device_terminate_all		= sun6i_dma_terminate_all;
+	sdc->slave.src_addr_widths		= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+						  BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+						  BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+	sdc->slave.dst_addr_widths		= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+						  BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+						  BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+	sdc->slave.directions			= BIT(DMA_DEV_TO_MEM) |
+						  BIT(DMA_MEM_TO_DEV);
+	sdc->slave.residue_granularity		= DMA_RESIDUE_GRANULARITY_BURST;
 	sdc->slave.dev = &pdev->dev;
 	sdc->slave.dev = &pdev->dev;
 
 
 	sdc->pchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_channels,
 	sdc->pchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_channels,

+ 20 - 22
drivers/dma/tegra20-apb-dma.c

@@ -723,7 +723,7 @@ end:
 	return;
 	return;
 }
 }
 
 
-static void tegra_dma_terminate_all(struct dma_chan *dc)
+static int tegra_dma_terminate_all(struct dma_chan *dc)
 {
 {
 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
 	struct tegra_dma_sg_req *sgreq;
 	struct tegra_dma_sg_req *sgreq;
@@ -736,7 +736,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc)
 	spin_lock_irqsave(&tdc->lock, flags);
 	spin_lock_irqsave(&tdc->lock, flags);
 	if (list_empty(&tdc->pending_sg_req)) {
 	if (list_empty(&tdc->pending_sg_req)) {
 		spin_unlock_irqrestore(&tdc->lock, flags);
 		spin_unlock_irqrestore(&tdc->lock, flags);
-		return;
+		return 0;
 	}
 	}
 
 
 	if (!tdc->busy)
 	if (!tdc->busy)
@@ -777,6 +777,7 @@ skip_dma_stop:
 		dma_desc->cb_count = 0;
 		dma_desc->cb_count = 0;
 	}
 	}
 	spin_unlock_irqrestore(&tdc->lock, flags);
 	spin_unlock_irqrestore(&tdc->lock, flags);
+	return 0;
 }
 }
 
 
 static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
 static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
@@ -827,25 +828,6 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
 	return ret;
 	return ret;
 }
 }
 
 
-static int tegra_dma_device_control(struct dma_chan *dc, enum dma_ctrl_cmd cmd,
-			unsigned long arg)
-{
-	switch (cmd) {
-	case DMA_SLAVE_CONFIG:
-		return tegra_dma_slave_config(dc,
-				(struct dma_slave_config *)arg);
-
-	case DMA_TERMINATE_ALL:
-		tegra_dma_terminate_all(dc);
-		return 0;
-
-	default:
-		break;
-	}
-
-	return -ENXIO;
-}
-
 static inline int get_bus_width(struct tegra_dma_channel *tdc,
 static inline int get_bus_width(struct tegra_dma_channel *tdc,
 		enum dma_slave_buswidth slave_bw)
 		enum dma_slave_buswidth slave_bw)
 {
 {
@@ -1443,7 +1425,23 @@ static int tegra_dma_probe(struct platform_device *pdev)
 					tegra_dma_free_chan_resources;
 					tegra_dma_free_chan_resources;
 	tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
 	tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
 	tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
 	tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
-	tdma->dma_dev.device_control = tegra_dma_device_control;
+	tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+		BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+		BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+		BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
+	tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+		BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+		BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+		BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
+	tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+	/*
+	 * XXX The hardware appears to support
+	 * DMA_RESIDUE_GRANULARITY_BURST-level reporting, but it's
+	 * only used by this driver during tegra_dma_terminate_all()
+	 */
+	tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+	tdma->dma_dev.device_config = tegra_dma_slave_config;
+	tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
 	tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
 	tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
 	tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
 	tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
 
 

+ 2 - 6
drivers/dma/timb_dma.c

@@ -561,8 +561,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
 	return &td_desc->txd;
 	return &td_desc->txd;
 }
 }
 
 
-static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-		      unsigned long arg)
+static int td_terminate_all(struct dma_chan *chan)
 {
 {
 	struct timb_dma_chan *td_chan =
 	struct timb_dma_chan *td_chan =
 		container_of(chan, struct timb_dma_chan, chan);
 		container_of(chan, struct timb_dma_chan, chan);
@@ -570,9 +569,6 @@ static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 
 
 	dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
 	dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
 
 
-	if (cmd != DMA_TERMINATE_ALL)
-		return -ENXIO;
-
 	/* first the easy part, put the queue into the free list */
 	/* first the easy part, put the queue into the free list */
 	spin_lock_bh(&td_chan->lock);
 	spin_lock_bh(&td_chan->lock);
 	list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
 	list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
@@ -697,7 +693,7 @@ static int td_probe(struct platform_device *pdev)
 	dma_cap_set(DMA_SLAVE, td->dma.cap_mask);
 	dma_cap_set(DMA_SLAVE, td->dma.cap_mask);
 	dma_cap_set(DMA_PRIVATE, td->dma.cap_mask);
 	dma_cap_set(DMA_PRIVATE, td->dma.cap_mask);
 	td->dma.device_prep_slave_sg = td_prep_slave_sg;
 	td->dma.device_prep_slave_sg = td_prep_slave_sg;
-	td->dma.device_control = td_control;
+	td->dma.device_terminate_all = td_terminate_all;
 
 
 	td->dma.dev = &pdev->dev;
 	td->dma.dev = &pdev->dev;
 
 

+ 2 - 7
drivers/dma/txx9dmac.c

@@ -901,17 +901,12 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 	return &first->txd;
 	return &first->txd;
 }
 }
 
 
-static int txx9dmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-			    unsigned long arg)
+static int txx9dmac_terminate_all(struct dma_chan *chan)
 {
 {
 	struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
 	struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
 	struct txx9dmac_desc *desc, *_desc;
 	struct txx9dmac_desc *desc, *_desc;
 	LIST_HEAD(list);
 	LIST_HEAD(list);
 
 
-	/* Only supports DMA_TERMINATE_ALL */
-	if (cmd != DMA_TERMINATE_ALL)
-		return -EINVAL;
-
 	dev_vdbg(chan2dev(chan), "terminate_all\n");
 	dev_vdbg(chan2dev(chan), "terminate_all\n");
 	spin_lock_bh(&dc->lock);
 	spin_lock_bh(&dc->lock);
 
 
@@ -1109,7 +1104,7 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev)
 	dc->dma.dev = &pdev->dev;
 	dc->dma.dev = &pdev->dev;
 	dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources;
 	dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources;
 	dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources;
 	dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources;
-	dc->dma.device_control = txx9dmac_control;
+	dc->dma.device_terminate_all = txx9dmac_terminate_all;
 	dc->dma.device_tx_status = txx9dmac_tx_status;
 	dc->dma.device_tx_status = txx9dmac_tx_status;
 	dc->dma.device_issue_pending = txx9dmac_issue_pending;
 	dc->dma.device_issue_pending = txx9dmac_issue_pending;
 	if (pdata && pdata->memcpy_chan == ch) {
 	if (pdata && pdata->memcpy_chan == ch) {

+ 6 - 23
drivers/dma/xilinx/xilinx_vdma.c

@@ -1001,13 +1001,17 @@ error:
  * xilinx_vdma_terminate_all - Halt the channel and free descriptors
  * xilinx_vdma_terminate_all - Halt the channel and free descriptors
  * @chan: Driver specific VDMA Channel pointer
  * @chan: Driver specific VDMA Channel pointer
  */
  */
-static void xilinx_vdma_terminate_all(struct xilinx_vdma_chan *chan)
+static int xilinx_vdma_terminate_all(struct dma_chan *dchan)
 {
 {
+	struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+
 	/* Halt the DMA engine */
 	/* Halt the DMA engine */
 	xilinx_vdma_halt(chan);
 	xilinx_vdma_halt(chan);
 
 
 	/* Remove and free all of the descriptors in the lists */
 	/* Remove and free all of the descriptors in the lists */
 	xilinx_vdma_free_descriptors(chan);
 	xilinx_vdma_free_descriptors(chan);
+
+	return 0;
 }
 }
 
 
 /**
 /**
@@ -1075,27 +1079,6 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
 }
 }
 EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
 EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
 
 
-/**
- * xilinx_vdma_device_control - Configure DMA channel of the device
- * @dchan: DMA Channel pointer
- * @cmd: DMA control command
- * @arg: Channel configuration
- *
- * Return: '0' on success and failure value on error
- */
-static int xilinx_vdma_device_control(struct dma_chan *dchan,
-				      enum dma_ctrl_cmd cmd, unsigned long arg)
-{
-	struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
-
-	if (cmd != DMA_TERMINATE_ALL)
-		return -ENXIO;
-
-	xilinx_vdma_terminate_all(chan);
-
-	return 0;
-}
-
 /* -----------------------------------------------------------------------------
 /* -----------------------------------------------------------------------------
  * Probe and remove
  * Probe and remove
  */
  */
@@ -1300,7 +1283,7 @@ static int xilinx_vdma_probe(struct platform_device *pdev)
 				xilinx_vdma_free_chan_resources;
 				xilinx_vdma_free_chan_resources;
 	xdev->common.device_prep_interleaved_dma =
 	xdev->common.device_prep_interleaved_dma =
 				xilinx_vdma_dma_prep_interleaved;
 				xilinx_vdma_dma_prep_interleaved;
-	xdev->common.device_control = xilinx_vdma_device_control;
+	xdev->common.device_terminate_all = xilinx_vdma_terminate_all;
 	xdev->common.device_tx_status = xilinx_vdma_tx_status;
 	xdev->common.device_tx_status = xilinx_vdma_tx_status;
 	xdev->common.device_issue_pending = xilinx_vdma_issue_pending;
 	xdev->common.device_issue_pending = xilinx_vdma_issue_pending;
 
 

+ 2 - 6
drivers/rapidio/devices/tsi721_dma.c

@@ -815,8 +815,7 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
 	return txd;
 	return txd;
 }
 }
 
 
-static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
-			     unsigned long arg)
+static int tsi721_terminate_all(struct dma_chan *dchan)
 {
 {
 	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
 	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
 	struct tsi721_tx_desc *desc, *_d;
 	struct tsi721_tx_desc *desc, *_d;
@@ -825,9 +824,6 @@ static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
 
 
 	dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
 	dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
 
 
-	if (cmd != DMA_TERMINATE_ALL)
-		return -ENOSYS;
-
 	spin_lock_bh(&bdma_chan->lock);
 	spin_lock_bh(&bdma_chan->lock);
 
 
 	bdma_chan->active = false;
 	bdma_chan->active = false;
@@ -901,7 +897,7 @@ int tsi721_register_dma(struct tsi721_device *priv)
 	mport->dma.device_tx_status = tsi721_tx_status;
 	mport->dma.device_tx_status = tsi721_tx_status;
 	mport->dma.device_issue_pending = tsi721_issue_pending;
 	mport->dma.device_issue_pending = tsi721_issue_pending;
 	mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
 	mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
-	mport->dma.device_control = tsi721_device_control;
+	mport->dma.device_terminate_all = tsi721_terminate_all;
 
 
 	err = dma_async_device_register(&mport->dma);
 	err = dma_async_device_register(&mport->dma);
 	if (err)
 	if (err)

+ 58 - 62
include/linux/dmaengine.h

@@ -188,25 +188,6 @@ enum dma_ctrl_flags {
 	DMA_PREP_FENCE = (1 << 5),
 	DMA_PREP_FENCE = (1 << 5),
 };
 };
 
 
-/**
- * enum dma_ctrl_cmd - DMA operations that can optionally be exercised
- * on a running channel.
- * @DMA_TERMINATE_ALL: terminate all ongoing transfers
- * @DMA_PAUSE: pause ongoing transfers
- * @DMA_RESUME: resume paused transfer
- * @DMA_SLAVE_CONFIG: this command is only implemented by DMA controllers
- * that need to runtime reconfigure the slave channels (as opposed to passing
- * configuration data in statically from the platform). An additional
- * argument of struct dma_slave_config must be passed in with this
- * command.
- */
-enum dma_ctrl_cmd {
-	DMA_TERMINATE_ALL,
-	DMA_PAUSE,
-	DMA_RESUME,
-	DMA_SLAVE_CONFIG,
-};
-
 /**
 /**
  * enum sum_check_bits - bit position of pq_check_flags
  * enum sum_check_bits - bit position of pq_check_flags
  */
  */
@@ -298,6 +279,9 @@ enum dma_slave_buswidth {
 	DMA_SLAVE_BUSWIDTH_3_BYTES = 3,
 	DMA_SLAVE_BUSWIDTH_3_BYTES = 3,
 	DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
 	DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
 	DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
 	DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
+	DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
+	DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
+	DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
 };
 };
 
 
 /**
 /**
@@ -336,9 +320,8 @@ enum dma_slave_buswidth {
  * This struct is passed in as configuration data to a DMA engine
  * This struct is passed in as configuration data to a DMA engine
  * in order to set up a certain channel for DMA transport at runtime.
  * in order to set up a certain channel for DMA transport at runtime.
  * The DMA device/engine has to provide support for an additional
  * The DMA device/engine has to provide support for an additional
- * command in the channel config interface, DMA_SLAVE_CONFIG
- * and this struct will then be passed in as an argument to the
- * DMA engine device_control() function.
+ * callback in the dma_device structure, device_config and this struct
+ * will then be passed in as an argument to the function.
  *
  *
  * The rationale for adding configuration information to this struct is as
  * The rationale for adding configuration information to this struct is as
  * follows: if it is likely that more than one DMA slave controllers in
  * follows: if it is likely that more than one DMA slave controllers in
@@ -387,7 +370,7 @@ enum dma_residue_granularity {
 /* struct dma_slave_caps - expose capabilities of a slave channel only
 /* struct dma_slave_caps - expose capabilities of a slave channel only
  *
  *
  * @src_addr_widths: bit mask of src addr widths the channel supports
  * @src_addr_widths: bit mask of src addr widths the channel supports
- * @dstn_addr_widths: bit mask of dstn addr widths the channel supports
+ * @dst_addr_widths: bit mask of dstn addr widths the channel supports
  * @directions: bit mask of slave direction the channel supported
  * @directions: bit mask of slave direction the channel supported
  * 	since the enum dma_transfer_direction is not defined as bits for each
  * 	since the enum dma_transfer_direction is not defined as bits for each
  * 	type of direction, the dma controller should fill (1 << <TYPE>) and same
  * 	type of direction, the dma controller should fill (1 << <TYPE>) and same
@@ -398,7 +381,7 @@ enum dma_residue_granularity {
  */
  */
 struct dma_slave_caps {
 struct dma_slave_caps {
 	u32 src_addr_widths;
 	u32 src_addr_widths;
-	u32 dstn_addr_widths;
+	u32 dst_addr_widths;
 	u32 directions;
 	u32 directions;
 	bool cmd_pause;
 	bool cmd_pause;
 	bool cmd_terminate;
 	bool cmd_terminate;
@@ -594,6 +577,14 @@ struct dma_tx_state {
  * @fill_align: alignment shift for memset operations
  * @fill_align: alignment shift for memset operations
  * @dev_id: unique device ID
  * @dev_id: unique device ID
  * @dev: struct device reference for dma mapping api
  * @dev: struct device reference for dma mapping api
+ * @src_addr_widths: bit mask of src addr widths the device supports
+ * @dst_addr_widths: bit mask of dst addr widths the device supports
+ * @directions: bit mask of slave direction the device supports since
+ * 	the enum dma_transfer_direction is not defined as bits for
+ * 	each type of direction, the dma controller should fill (1 <<
+ * 	<TYPE>) and same should be checked by controller as well
+ * @residue_granularity: granularity of the transfer residue reported
+ *	by tx_status
  * @device_alloc_chan_resources: allocate resources and return the
  * @device_alloc_chan_resources: allocate resources and return the
  *	number of allocated descriptors
  *	number of allocated descriptors
  * @device_free_chan_resources: release DMA channel's resources
  * @device_free_chan_resources: release DMA channel's resources
@@ -608,14 +599,19 @@ struct dma_tx_state {
  *	The function takes a buffer of size buf_len. The callback function will
  *	The function takes a buffer of size buf_len. The callback function will
  *	be called after period_len bytes have been transferred.
  *	be called after period_len bytes have been transferred.
  * @device_prep_interleaved_dma: Transfer expression in a generic way.
  * @device_prep_interleaved_dma: Transfer expression in a generic way.
- * @device_control: manipulate all pending operations on a channel, returns
- *	zero or error code
+ * @device_config: Pushes a new configuration to a channel, return 0 or an error
+ *	code
+ * @device_pause: Pauses any transfer happening on a channel. Returns
+ *	0 or an error code
+ * @device_resume: Resumes any transfer on a channel previously
+ *	paused. Returns 0 or an error code
+ * @device_terminate_all: Aborts all transfers on a channel. Returns 0
+ *	or an error code
  * @device_tx_status: poll for transaction completion, the optional
  * @device_tx_status: poll for transaction completion, the optional
  *	txstate parameter can be supplied with a pointer to get a
  *	txstate parameter can be supplied with a pointer to get a
  *	struct with auxiliary transfer status information, otherwise the call
  *	struct with auxiliary transfer status information, otherwise the call
  *	will just return a simple status code
  *	will just return a simple status code
  * @device_issue_pending: push pending transactions to hardware
  * @device_issue_pending: push pending transactions to hardware
- * @device_slave_caps: return the slave channel capabilities
  */
  */
 struct dma_device {
 struct dma_device {
 
 
@@ -635,14 +631,19 @@ struct dma_device {
 	int dev_id;
 	int dev_id;
 	struct device *dev;
 	struct device *dev;
 
 
+	u32 src_addr_widths;
+	u32 dst_addr_widths;
+	u32 directions;
+	enum dma_residue_granularity residue_granularity;
+
 	int (*device_alloc_chan_resources)(struct dma_chan *chan);
 	int (*device_alloc_chan_resources)(struct dma_chan *chan);
 	void (*device_free_chan_resources)(struct dma_chan *chan);
 	void (*device_free_chan_resources)(struct dma_chan *chan);
 
 
 	struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
 	struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
-		struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+		struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
 		size_t len, unsigned long flags);
 		size_t len, unsigned long flags);
 	struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
 	struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
-		struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
+		struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
 		unsigned int src_cnt, size_t len, unsigned long flags);
 		unsigned int src_cnt, size_t len, unsigned long flags);
 	struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
 	struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
 		struct dma_chan *chan, dma_addr_t *src,	unsigned int src_cnt,
 		struct dma_chan *chan, dma_addr_t *src,	unsigned int src_cnt,
@@ -674,31 +675,26 @@ struct dma_device {
 	struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
 	struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
 		struct dma_chan *chan, struct dma_interleaved_template *xt,
 		struct dma_chan *chan, struct dma_interleaved_template *xt,
 		unsigned long flags);
 		unsigned long flags);
-	int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-		unsigned long arg);
+
+	int (*device_config)(struct dma_chan *chan,
+			     struct dma_slave_config *config);
+	int (*device_pause)(struct dma_chan *chan);
+	int (*device_resume)(struct dma_chan *chan);
+	int (*device_terminate_all)(struct dma_chan *chan);
 
 
 	enum dma_status (*device_tx_status)(struct dma_chan *chan,
 	enum dma_status (*device_tx_status)(struct dma_chan *chan,
 					    dma_cookie_t cookie,
 					    dma_cookie_t cookie,
 					    struct dma_tx_state *txstate);
 					    struct dma_tx_state *txstate);
 	void (*device_issue_pending)(struct dma_chan *chan);
 	void (*device_issue_pending)(struct dma_chan *chan);
-	int (*device_slave_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
 };
 };
 
 
-static inline int dmaengine_device_control(struct dma_chan *chan,
-					   enum dma_ctrl_cmd cmd,
-					   unsigned long arg)
-{
-	if (chan->device->device_control)
-		return chan->device->device_control(chan, cmd, arg);
-
-	return -ENOSYS;
-}
-
 static inline int dmaengine_slave_config(struct dma_chan *chan,
 static inline int dmaengine_slave_config(struct dma_chan *chan,
 					  struct dma_slave_config *config)
 					  struct dma_slave_config *config)
 {
 {
-	return dmaengine_device_control(chan, DMA_SLAVE_CONFIG,
-			(unsigned long)config);
+	if (chan->device->device_config)
+		return chan->device->device_config(chan, config);
+
+	return -ENOSYS;
 }
 }
 
 
 static inline bool is_slave_direction(enum dma_transfer_direction direction)
 static inline bool is_slave_direction(enum dma_transfer_direction direction)
@@ -765,34 +761,28 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
 			src_sg, src_nents, flags);
 			src_sg, src_nents, flags);
 }
 }
 
 
-static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
-{
-	if (!chan || !caps)
-		return -EINVAL;
-
-	/* check if the channel supports slave transactions */
-	if (!test_bit(DMA_SLAVE, chan->device->cap_mask.bits))
-		return -ENXIO;
-
-	if (chan->device->device_slave_caps)
-		return chan->device->device_slave_caps(chan, caps);
-
-	return -ENXIO;
-}
-
 static inline int dmaengine_terminate_all(struct dma_chan *chan)
 static inline int dmaengine_terminate_all(struct dma_chan *chan)
 {
 {
-	return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
+	if (chan->device->device_terminate_all)
+		return chan->device->device_terminate_all(chan);
+
+	return -ENOSYS;
 }
 }
 
 
 static inline int dmaengine_pause(struct dma_chan *chan)
 static inline int dmaengine_pause(struct dma_chan *chan)
 {
 {
-	return dmaengine_device_control(chan, DMA_PAUSE, 0);
+	if (chan->device->device_pause)
+		return chan->device->device_pause(chan);
+
+	return -ENOSYS;
 }
 }
 
 
 static inline int dmaengine_resume(struct dma_chan *chan)
 static inline int dmaengine_resume(struct dma_chan *chan)
 {
 {
-	return dmaengine_device_control(chan, DMA_RESUME, 0);
+	if (chan->device->device_resume)
+		return chan->device->device_resume(chan);
+
+	return -ENOSYS;
 }
 }
 
 
 static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
 static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
@@ -1059,6 +1049,7 @@ struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
 						  const char *name);
 						  const char *name);
 struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
 struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
 void dma_release_channel(struct dma_chan *chan);
 void dma_release_channel(struct dma_chan *chan);
+int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
 #else
 #else
 static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
 static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
 {
 {
@@ -1093,6 +1084,11 @@ static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
 static inline void dma_release_channel(struct dma_chan *chan)
 static inline void dma_release_channel(struct dma_chan *chan)
 {
 {
 }
 }
+static inline int dma_get_slave_caps(struct dma_chan *chan,
+				     struct dma_slave_caps *caps)
+{
+	return -ENXIO;
+}
 #endif
 #endif
 
 
 /* --- DMA device --- */
 /* --- DMA device --- */

+ 4 - 2
include/linux/platform_data/dma-dw.h

@@ -13,10 +13,12 @@
 
 
 #include <linux/device.h>
 #include <linux/device.h>
 
 
+#define DW_DMA_MAX_NR_MASTERS	4
+
 /**
 /**
  * struct dw_dma_slave - Controller-specific information about a slave
  * struct dw_dma_slave - Controller-specific information about a slave
  *
  *
- * @dma_dev: required DMA master device. Depricated.
+ * @dma_dev:	required DMA master device
  * @src_id:	src request line
  * @src_id:	src request line
  * @dst_id:	dst request line
  * @dst_id:	dst request line
  * @src_master: src master for transfers on allocated channel.
  * @src_master: src master for transfers on allocated channel.
@@ -53,7 +55,7 @@ struct dw_dma_platform_data {
 	unsigned char	chan_priority;
 	unsigned char	chan_priority;
 	unsigned short	block_size;
 	unsigned short	block_size;
 	unsigned char	nr_masters;
 	unsigned char	nr_masters;
-	unsigned char	data_width[4];
+	unsigned char	data_width[DW_DMA_MAX_NR_MASTERS];
 };
 };
 
 
 #endif /* _PLATFORM_DATA_DMA_DW_H */
 #endif /* _PLATFORM_DATA_DMA_DW_H */

+ 7 - 0
include/linux/platform_data/dma-mmp_tdma.h

@@ -28,6 +28,13 @@ struct sram_platdata {
 	int granularity;
 	int granularity;
 };
 };
 
 
+#ifdef CONFIG_ARM
 extern struct gen_pool *sram_get_gpool(char *pool_name);
 extern struct gen_pool *sram_get_gpool(char *pool_name);
+#else
+static inline struct gen_pool *sram_get_gpool(char *pool_name)
+{
+	return NULL;
+}
+#endif
 
 
 #endif /* __DMA_MMP_TDMA_H */
 #endif /* __DMA_MMP_TDMA_H */

+ 1 - 1
sound/soc/soc-generic-dmaengine-pcm.c

@@ -151,7 +151,7 @@ static int dmaengine_pcm_set_runtime_hwparams(struct snd_pcm_substream *substrea
 			hw.info |= SNDRV_PCM_INFO_BATCH;
 			hw.info |= SNDRV_PCM_INFO_BATCH;
 
 
 		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
 		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-			addr_widths = dma_caps.dstn_addr_widths;
+			addr_widths = dma_caps.dst_addr_widths;
 		else
 		else
 			addr_widths = dma_caps.src_addr_widths;
 			addr_widths = dma_caps.src_addr_widths;
 	}
 	}