|
@@ -99,7 +99,9 @@
|
|
|
#define XILINX_DMA_REG_FRMPTR_STS 0x0024
|
|
|
#define XILINX_DMA_REG_PARK_PTR 0x0028
|
|
|
#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
|
|
|
+#define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
|
|
|
#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
|
|
|
+#define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
|
|
|
#define XILINX_DMA_REG_VDMA_VERSION 0x002c
|
|
|
|
|
|
/* Register Direct Mode Registers */
|
|
@@ -163,6 +165,7 @@
|
|
|
#define XILINX_DMA_BD_SOP BIT(27)
|
|
|
#define XILINX_DMA_BD_EOP BIT(26)
|
|
|
#define XILINX_DMA_COALESCE_MAX 255
|
|
|
+#define XILINX_DMA_NUM_DESCS 255
|
|
|
#define XILINX_DMA_NUM_APP_WORDS 5
|
|
|
|
|
|
/* Multi-Channel DMA Descriptor offsets*/
|
|
@@ -211,8 +214,8 @@ struct xilinx_vdma_desc_hw {
|
|
|
* @next_desc_msb: MSB of Next Descriptor Pointer @0x04
|
|
|
* @buf_addr: Buffer address @0x08
|
|
|
* @buf_addr_msb: MSB of Buffer address @0x0C
|
|
|
- * @pad1: Reserved @0x10
|
|
|
- * @pad2: Reserved @0x14
|
|
|
+ * @mcdma_control: Control field for mcdma @0x10
|
|
|
+ * @vsize_stride: Vsize and Stride field for mcdma @0x14
|
|
|
* @control: Control field @0x18
|
|
|
* @status: Status field @0x1C
|
|
|
* @app: APP Fields @0x20 - 0x30
|
|
@@ -232,11 +235,11 @@ struct xilinx_axidma_desc_hw {
|
|
|
/**
|
|
|
* struct xilinx_cdma_desc_hw - Hardware Descriptor
|
|
|
* @next_desc: Next Descriptor Pointer @0x00
|
|
|
- * @next_descmsb: Next Descriptor Pointer MSB @0x04
|
|
|
+ * @next_desc_msb: Next Descriptor Pointer MSB @0x04
|
|
|
* @src_addr: Source address @0x08
|
|
|
- * @src_addrmsb: Source address MSB @0x0C
|
|
|
+ * @src_addr_msb: Source address MSB @0x0C
|
|
|
* @dest_addr: Destination address @0x10
|
|
|
- * @dest_addrmsb: Destination address MSB @0x14
|
|
|
+ * @dest_addr_msb: Destination address MSB @0x14
|
|
|
* @control: Control field @0x18
|
|
|
* @status: Status field @0x1C
|
|
|
*/
|
|
@@ -310,6 +313,7 @@ struct xilinx_dma_tx_descriptor {
|
|
|
* @pending_list: Descriptors waiting
|
|
|
* @active_list: Descriptors ready to submit
|
|
|
* @done_list: Complete descriptors
|
|
|
+ * @free_seg_list: Free descriptors
|
|
|
* @common: DMA common channel
|
|
|
* @desc_pool: Descriptors pool
|
|
|
* @dev: The dma device
|
|
@@ -321,6 +325,7 @@ struct xilinx_dma_tx_descriptor {
|
|
|
* @cyclic: Check for cyclic transfers.
|
|
|
* @genlock: Support genlock mode
|
|
|
* @err: Channel has errors
|
|
|
+ * @idle: Check for channel idle
|
|
|
* @tasklet: Cleanup work after irq
|
|
|
* @config: Device configuration info
|
|
|
* @flush_on_fsync: Flush on Frame sync
|
|
@@ -329,9 +334,12 @@ struct xilinx_dma_tx_descriptor {
|
|
|
* @desc_submitcount: Descriptor h/w submitted count
|
|
|
* @residue: Residue for AXI DMA
|
|
|
* @seg_v: Statically allocated segments base
|
|
|
+ * @seg_p: Physical allocated segments base
|
|
|
* @cyclic_seg_v: Statically allocated segment base for cyclic transfers
|
|
|
+ * @cyclic_seg_p: Physical allocated segments base for cyclic dma
|
|
|
* @start_transfer: Differentiate b/w DMA IP's transfer
|
|
|
* @stop_transfer: Differentiate b/w DMA IP's quiesce
|
|
|
+ * @tdest: TDEST value for mcdma
|
|
|
*/
|
|
|
struct xilinx_dma_chan {
|
|
|
struct xilinx_dma_device *xdev;
|
|
@@ -341,6 +349,7 @@ struct xilinx_dma_chan {
|
|
|
struct list_head pending_list;
|
|
|
struct list_head active_list;
|
|
|
struct list_head done_list;
|
|
|
+ struct list_head free_seg_list;
|
|
|
struct dma_chan common;
|
|
|
struct dma_pool *desc_pool;
|
|
|
struct device *dev;
|
|
@@ -352,6 +361,7 @@ struct xilinx_dma_chan {
|
|
|
bool cyclic;
|
|
|
bool genlock;
|
|
|
bool err;
|
|
|
+ bool idle;
|
|
|
struct tasklet_struct tasklet;
|
|
|
struct xilinx_vdma_config config;
|
|
|
bool flush_on_fsync;
|
|
@@ -360,18 +370,20 @@ struct xilinx_dma_chan {
|
|
|
u32 desc_submitcount;
|
|
|
u32 residue;
|
|
|
struct xilinx_axidma_tx_segment *seg_v;
|
|
|
+ dma_addr_t seg_p;
|
|
|
struct xilinx_axidma_tx_segment *cyclic_seg_v;
|
|
|
+ dma_addr_t cyclic_seg_p;
|
|
|
void (*start_transfer)(struct xilinx_dma_chan *chan);
|
|
|
int (*stop_transfer)(struct xilinx_dma_chan *chan);
|
|
|
u16 tdest;
|
|
|
};
|
|
|
|
|
|
/**
|
|
|
- * enum xdma_ip_type: DMA IP type.
|
|
|
+ * enum xdma_ip_type - DMA IP type.
|
|
|
*
|
|
|
- * XDMA_TYPE_AXIDMA: Axi dma ip.
|
|
|
- * XDMA_TYPE_CDMA: Axi cdma ip.
|
|
|
- * XDMA_TYPE_VDMA: Axi vdma ip.
|
|
|
+ * @XDMA_TYPE_AXIDMA: Axi dma ip.
|
|
|
+ * @XDMA_TYPE_CDMA: Axi cdma ip.
|
|
|
+ * @XDMA_TYPE_VDMA: Axi vdma ip.
|
|
|
*
|
|
|
*/
|
|
|
enum xdma_ip_type {
|
|
@@ -580,18 +592,32 @@ xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
|
|
|
static struct xilinx_axidma_tx_segment *
|
|
|
xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
|
|
|
{
|
|
|
- struct xilinx_axidma_tx_segment *segment;
|
|
|
- dma_addr_t phys;
|
|
|
-
|
|
|
- segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
|
|
|
- if (!segment)
|
|
|
- return NULL;
|
|
|
+ struct xilinx_axidma_tx_segment *segment = NULL;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
- segment->phys = phys;
|
|
|
+ spin_lock_irqsave(&chan->lock, flags);
|
|
|
+ if (!list_empty(&chan->free_seg_list)) {
|
|
|
+ segment = list_first_entry(&chan->free_seg_list,
|
|
|
+ struct xilinx_axidma_tx_segment,
|
|
|
+ node);
|
|
|
+ list_del(&segment->node);
|
|
|
+ }
|
|
|
+ spin_unlock_irqrestore(&chan->lock, flags);
|
|
|
|
|
|
return segment;
|
|
|
}
|
|
|
|
|
|
+static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
|
|
|
+{
|
|
|
+ u32 next_desc = hw->next_desc;
|
|
|
+ u32 next_desc_msb = hw->next_desc_msb;
|
|
|
+
|
|
|
+ memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
|
|
|
+
|
|
|
+ hw->next_desc = next_desc;
|
|
|
+ hw->next_desc_msb = next_desc_msb;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* xilinx_dma_free_tx_segment - Free transaction segment
|
|
|
* @chan: Driver specific DMA channel
|
|
@@ -600,7 +626,9 @@ xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
|
|
|
static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
|
|
|
struct xilinx_axidma_tx_segment *segment)
|
|
|
{
|
|
|
- dma_pool_free(chan->desc_pool, segment, segment->phys);
|
|
|
+ xilinx_dma_clean_hw_desc(&segment->hw);
|
|
|
+
|
|
|
+ list_add_tail(&segment->node, &chan->free_seg_list);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -725,16 +753,31 @@ static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
|
|
|
static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
|
|
|
{
|
|
|
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
dev_dbg(chan->dev, "Free all channel resources.\n");
|
|
|
|
|
|
xilinx_dma_free_descriptors(chan);
|
|
|
+
|
|
|
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
|
|
|
- xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v);
|
|
|
- xilinx_dma_free_tx_segment(chan, chan->seg_v);
|
|
|
+ spin_lock_irqsave(&chan->lock, flags);
|
|
|
+ INIT_LIST_HEAD(&chan->free_seg_list);
|
|
|
+ spin_unlock_irqrestore(&chan->lock, flags);
|
|
|
+
|
|
|
+ /* Free memory that is allocated for BD */
|
|
|
+ dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
|
|
|
+ XILINX_DMA_NUM_DESCS, chan->seg_v,
|
|
|
+ chan->seg_p);
|
|
|
+
|
|
|
+ /* Free Memory that is allocated for cyclic DMA Mode */
|
|
|
+ dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
|
|
|
+ chan->cyclic_seg_v, chan->cyclic_seg_p);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) {
|
|
|
+ dma_pool_destroy(chan->desc_pool);
|
|
|
+ chan->desc_pool = NULL;
|
|
|
}
|
|
|
- dma_pool_destroy(chan->desc_pool);
|
|
|
- chan->desc_pool = NULL;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -817,6 +860,7 @@ static void xilinx_dma_do_tasklet(unsigned long data)
|
|
|
static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
|
|
|
{
|
|
|
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
|
|
|
+ int i;
|
|
|
|
|
|
/* Has this channel already been allocated? */
|
|
|
if (chan->desc_pool)
|
|
@@ -827,11 +871,30 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
|
|
|
* for meeting Xilinx VDMA specification requirement.
|
|
|
*/
|
|
|
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
|
|
|
- chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool",
|
|
|
- chan->dev,
|
|
|
- sizeof(struct xilinx_axidma_tx_segment),
|
|
|
- __alignof__(struct xilinx_axidma_tx_segment),
|
|
|
- 0);
|
|
|
+ /* Allocate the buffer descriptors. */
|
|
|
+ chan->seg_v = dma_zalloc_coherent(chan->dev,
|
|
|
+ sizeof(*chan->seg_v) *
|
|
|
+ XILINX_DMA_NUM_DESCS,
|
|
|
+ &chan->seg_p, GFP_KERNEL);
|
|
|
+ if (!chan->seg_v) {
|
|
|
+ dev_err(chan->dev,
|
|
|
+ "unable to allocate channel %d descriptors\n",
|
|
|
+ chan->id);
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+
|
|
|
+ for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
|
|
|
+ chan->seg_v[i].hw.next_desc =
|
|
|
+ lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
|
|
|
+ ((i + 1) % XILINX_DMA_NUM_DESCS));
|
|
|
+ chan->seg_v[i].hw.next_desc_msb =
|
|
|
+ upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
|
|
|
+ ((i + 1) % XILINX_DMA_NUM_DESCS));
|
|
|
+ chan->seg_v[i].phys = chan->seg_p +
|
|
|
+ sizeof(*chan->seg_v) * i;
|
|
|
+ list_add_tail(&chan->seg_v[i].node,
|
|
|
+ &chan->free_seg_list);
|
|
|
+ }
|
|
|
} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
|
|
|
chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
|
|
|
chan->dev,
|
|
@@ -846,7 +909,8 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
|
|
|
0);
|
|
|
}
|
|
|
|
|
|
- if (!chan->desc_pool) {
|
|
|
+ if (!chan->desc_pool &&
|
|
|
+ (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) {
|
|
|
dev_err(chan->dev,
|
|
|
"unable to allocate channel %d descriptor pool\n",
|
|
|
chan->id);
|
|
@@ -854,23 +918,21 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
|
|
|
}
|
|
|
|
|
|
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
|
|
|
- /*
|
|
|
- * For AXI DMA case after submitting a pending_list, keep
|
|
|
- * an extra segment allocated so that the "next descriptor"
|
|
|
- * pointer on the tail descriptor always points to a
|
|
|
- * valid descriptor, even when paused after reaching taildesc.
|
|
|
- * This way, it is possible to issue additional
|
|
|
- * transfers without halting and restarting the channel.
|
|
|
- */
|
|
|
- chan->seg_v = xilinx_axidma_alloc_tx_segment(chan);
|
|
|
-
|
|
|
/*
|
|
|
* For cyclic DMA mode we need to program the tail Descriptor
|
|
|
* register with a value which is not a part of the BD chain
|
|
|
* so allocating a desc segment during channel allocation for
|
|
|
* programming tail descriptor.
|
|
|
*/
|
|
|
- chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan);
|
|
|
+ chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev,
|
|
|
+ sizeof(*chan->cyclic_seg_v),
|
|
|
+ &chan->cyclic_seg_p, GFP_KERNEL);
|
|
|
+ if (!chan->cyclic_seg_v) {
|
|
|
+ dev_err(chan->dev,
|
|
|
+ "unable to allocate desc segment for cyclic DMA\n");
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+ chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
|
|
|
}
|
|
|
|
|
|
dma_cookie_init(dchan);
|
|
@@ -935,35 +997,11 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * xilinx_dma_is_running - Check if DMA channel is running
|
|
|
- * @chan: Driver specific DMA channel
|
|
|
- *
|
|
|
- * Return: '1' if running, '0' if not.
|
|
|
- */
|
|
|
-static bool xilinx_dma_is_running(struct xilinx_dma_chan *chan)
|
|
|
-{
|
|
|
- return !(dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
|
|
|
- XILINX_DMA_DMASR_HALTED) &&
|
|
|
- (dma_ctrl_read(chan, XILINX_DMA_REG_DMACR) &
|
|
|
- XILINX_DMA_DMACR_RUNSTOP);
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * xilinx_dma_is_idle - Check if DMA channel is idle
|
|
|
- * @chan: Driver specific DMA channel
|
|
|
- *
|
|
|
- * Return: '1' if idle, '0' if not.
|
|
|
- */
|
|
|
-static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan)
|
|
|
-{
|
|
|
- return dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
|
|
|
- XILINX_DMA_DMASR_IDLE;
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* xilinx_dma_stop_transfer - Halt DMA channel
|
|
|
* @chan: Driver specific DMA channel
|
|
|
+ *
|
|
|
+ * Return: '0' on success and failure value on error
|
|
|
*/
|
|
|
static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
|
|
|
{
|
|
@@ -980,6 +1018,8 @@ static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
|
|
|
/**
|
|
|
* xilinx_cdma_stop_transfer - Wait for the current transfer to complete
|
|
|
* @chan: Driver specific DMA channel
|
|
|
+ *
|
|
|
+ * Return: '0' on success and failure value on error
|
|
|
*/
|
|
|
static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
|
|
|
{
|
|
@@ -1022,13 +1062,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
|
|
|
{
|
|
|
struct xilinx_vdma_config *config = &chan->config;
|
|
|
struct xilinx_dma_tx_descriptor *desc, *tail_desc;
|
|
|
- u32 reg;
|
|
|
+ u32 reg, j;
|
|
|
struct xilinx_vdma_tx_segment *tail_segment;
|
|
|
|
|
|
/* This function was invoked with lock held */
|
|
|
if (chan->err)
|
|
|
return;
|
|
|
|
|
|
+ if (!chan->idle)
|
|
|
+ return;
|
|
|
+
|
|
|
if (list_empty(&chan->pending_list))
|
|
|
return;
|
|
|
|
|
@@ -1040,13 +1083,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
|
|
|
tail_segment = list_last_entry(&tail_desc->segments,
|
|
|
struct xilinx_vdma_tx_segment, node);
|
|
|
|
|
|
- /* If it is SG mode and hardware is busy, cannot submit */
|
|
|
- if (chan->has_sg && xilinx_dma_is_running(chan) &&
|
|
|
- !xilinx_dma_is_idle(chan)) {
|
|
|
- dev_dbg(chan->dev, "DMA controller still busy\n");
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
/*
|
|
|
* If hardware is idle, then all descriptors on the running lists are
|
|
|
* done, start new transfers
|
|
@@ -1063,10 +1099,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
|
|
|
else
|
|
|
reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
|
|
|
|
|
|
- /* Configure channel to allow number frame buffers */
|
|
|
- dma_ctrl_write(chan, XILINX_DMA_REG_FRMSTORE,
|
|
|
- chan->desc_pendingcount);
|
|
|
-
|
|
|
/*
|
|
|
* With SG, start with circular mode, so that BDs can be fetched.
|
|
|
* In direct register mode, if not parking, enable circular mode
|
|
@@ -1079,17 +1111,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
|
|
|
|
|
|
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
|
|
|
|
|
|
- if (config->park && (config->park_frm >= 0) &&
|
|
|
- (config->park_frm < chan->num_frms)) {
|
|
|
- if (chan->direction == DMA_MEM_TO_DEV)
|
|
|
- dma_write(chan, XILINX_DMA_REG_PARK_PTR,
|
|
|
- config->park_frm <<
|
|
|
- XILINX_DMA_PARK_PTR_RD_REF_SHIFT);
|
|
|
- else
|
|
|
- dma_write(chan, XILINX_DMA_REG_PARK_PTR,
|
|
|
- config->park_frm <<
|
|
|
- XILINX_DMA_PARK_PTR_WR_REF_SHIFT);
|
|
|
+ j = chan->desc_submitcount;
|
|
|
+ reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
|
|
|
+ if (chan->direction == DMA_MEM_TO_DEV) {
|
|
|
+ reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
|
|
|
+ reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
|
|
|
+ } else {
|
|
|
+ reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
|
|
|
+ reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
|
|
|
}
|
|
|
+ dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
|
|
|
|
|
|
/* Start the hardware */
|
|
|
xilinx_dma_start(chan);
|
|
@@ -1101,6 +1132,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
|
|
|
if (chan->has_sg) {
|
|
|
dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
|
|
|
tail_segment->phys);
|
|
|
+ list_splice_tail_init(&chan->pending_list, &chan->active_list);
|
|
|
+ chan->desc_pendingcount = 0;
|
|
|
} else {
|
|
|
struct xilinx_vdma_tx_segment *segment, *last = NULL;
|
|
|
int i = 0;
|
|
@@ -1130,19 +1163,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
|
|
|
vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
|
|
|
last->hw.stride);
|
|
|
vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
|
|
|
- }
|
|
|
|
|
|
- if (!chan->has_sg) {
|
|
|
- list_del(&desc->node);
|
|
|
- list_add_tail(&desc->node, &chan->active_list);
|
|
|
chan->desc_submitcount++;
|
|
|
chan->desc_pendingcount--;
|
|
|
+ list_del(&desc->node);
|
|
|
+ list_add_tail(&desc->node, &chan->active_list);
|
|
|
if (chan->desc_submitcount == chan->num_frms)
|
|
|
chan->desc_submitcount = 0;
|
|
|
- } else {
|
|
|
- list_splice_tail_init(&chan->pending_list, &chan->active_list);
|
|
|
- chan->desc_pendingcount = 0;
|
|
|
}
|
|
|
+
|
|
|
+ chan->idle = false;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1158,6 +1188,9 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
|
|
|
if (chan->err)
|
|
|
return;
|
|
|
|
|
|
+ if (!chan->idle)
|
|
|
+ return;
|
|
|
+
|
|
|
if (list_empty(&chan->pending_list))
|
|
|
return;
|
|
|
|
|
@@ -1176,6 +1209,12 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
|
|
|
}
|
|
|
|
|
|
if (chan->has_sg) {
|
|
|
+ dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
|
|
|
+ XILINX_CDMA_CR_SGMODE);
|
|
|
+
|
|
|
+ dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
|
|
|
+ XILINX_CDMA_CR_SGMODE);
|
|
|
+
|
|
|
xilinx_write(chan, XILINX_DMA_REG_CURDESC,
|
|
|
head_desc->async_tx.phys);
|
|
|
|
|
@@ -1203,6 +1242,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
|
|
|
|
|
|
list_splice_tail_init(&chan->pending_list, &chan->active_list);
|
|
|
chan->desc_pendingcount = 0;
|
|
|
+ chan->idle = false;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1212,7 +1252,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
|
|
|
static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
|
|
|
{
|
|
|
struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
|
|
|
- struct xilinx_axidma_tx_segment *tail_segment, *old_head, *new_head;
|
|
|
+ struct xilinx_axidma_tx_segment *tail_segment;
|
|
|
u32 reg;
|
|
|
|
|
|
if (chan->err)
|
|
@@ -1221,12 +1261,8 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
|
|
|
if (list_empty(&chan->pending_list))
|
|
|
return;
|
|
|
|
|
|
- /* If it is SG mode and hardware is busy, cannot submit */
|
|
|
- if (chan->has_sg && xilinx_dma_is_running(chan) &&
|
|
|
- !xilinx_dma_is_idle(chan)) {
|
|
|
- dev_dbg(chan->dev, "DMA controller still busy\n");
|
|
|
+ if (!chan->idle)
|
|
|
return;
|
|
|
- }
|
|
|
|
|
|
head_desc = list_first_entry(&chan->pending_list,
|
|
|
struct xilinx_dma_tx_descriptor, node);
|
|
@@ -1235,21 +1271,6 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
|
|
|
tail_segment = list_last_entry(&tail_desc->segments,
|
|
|
struct xilinx_axidma_tx_segment, node);
|
|
|
|
|
|
- if (chan->has_sg && !chan->xdev->mcdma) {
|
|
|
- old_head = list_first_entry(&head_desc->segments,
|
|
|
- struct xilinx_axidma_tx_segment, node);
|
|
|
- new_head = chan->seg_v;
|
|
|
- /* Copy Buffer Descriptor fields. */
|
|
|
- new_head->hw = old_head->hw;
|
|
|
-
|
|
|
- /* Swap and save new reserve */
|
|
|
- list_replace_init(&old_head->node, &new_head->node);
|
|
|
- chan->seg_v = old_head;
|
|
|
-
|
|
|
- tail_segment->hw.next_desc = chan->seg_v->phys;
|
|
|
- head_desc->async_tx.phys = new_head->phys;
|
|
|
- }
|
|
|
-
|
|
|
reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
|
|
|
|
|
|
if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
|
|
@@ -1324,6 +1345,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
|
|
|
|
|
|
list_splice_tail_init(&chan->pending_list, &chan->active_list);
|
|
|
chan->desc_pendingcount = 0;
|
|
|
+ chan->idle = false;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1388,6 +1410,8 @@ static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
|
|
|
}
|
|
|
|
|
|
chan->err = false;
|
|
|
+ chan->idle = true;
|
|
|
+ chan->desc_submitcount = 0;
|
|
|
|
|
|
return err;
|
|
|
}
|
|
@@ -1469,6 +1493,7 @@ static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
|
|
|
if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
|
|
|
spin_lock(&chan->lock);
|
|
|
xilinx_dma_complete_descriptor(chan);
|
|
|
+ chan->idle = true;
|
|
|
chan->start_transfer(chan);
|
|
|
spin_unlock(&chan->lock);
|
|
|
}
|
|
@@ -1591,7 +1616,7 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
|
|
|
{
|
|
|
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
|
|
|
struct xilinx_dma_tx_descriptor *desc;
|
|
|
- struct xilinx_vdma_tx_segment *segment, *prev = NULL;
|
|
|
+ struct xilinx_vdma_tx_segment *segment;
|
|
|
struct xilinx_vdma_desc_hw *hw;
|
|
|
|
|
|
if (!is_slave_direction(xt->dir))
|
|
@@ -1645,8 +1670,6 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
|
|
|
/* Insert the segment into the descriptor segments list. */
|
|
|
list_add_tail(&segment->node, &desc->segments);
|
|
|
|
|
|
- prev = segment;
|
|
|
-
|
|
|
/* Link the last hardware descriptor with the first. */
|
|
|
segment = list_first_entry(&desc->segments,
|
|
|
struct xilinx_vdma_tx_segment, node);
|
|
@@ -1733,7 +1756,7 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
|
|
|
{
|
|
|
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
|
|
|
struct xilinx_dma_tx_descriptor *desc;
|
|
|
- struct xilinx_axidma_tx_segment *segment = NULL, *prev = NULL;
|
|
|
+ struct xilinx_axidma_tx_segment *segment = NULL;
|
|
|
u32 *app_w = (u32 *)context;
|
|
|
struct scatterlist *sg;
|
|
|
size_t copy;
|
|
@@ -1784,10 +1807,6 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
|
|
|
XILINX_DMA_NUM_APP_WORDS);
|
|
|
}
|
|
|
|
|
|
- if (prev)
|
|
|
- prev->hw.next_desc = segment->phys;
|
|
|
-
|
|
|
- prev = segment;
|
|
|
sg_used += copy;
|
|
|
|
|
|
/*
|
|
@@ -1801,7 +1820,6 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
|
|
|
segment = list_first_entry(&desc->segments,
|
|
|
struct xilinx_axidma_tx_segment, node);
|
|
|
desc->async_tx.phys = segment->phys;
|
|
|
- prev->hw.next_desc = segment->phys;
|
|
|
|
|
|
/* For the last DMA_MEM_TO_DEV transfer, set EOP */
|
|
|
if (chan->direction == DMA_MEM_TO_DEV) {
|
|
@@ -1821,11 +1839,14 @@ error:
|
|
|
|
|
|
/**
|
|
|
* xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
|
|
|
- * @chan: DMA channel
|
|
|
- * @sgl: scatterlist to transfer to/from
|
|
|
- * @sg_len: number of entries in @scatterlist
|
|
|
+ * @dchan: DMA channel
|
|
|
+ * @buf_addr: Physical address of the buffer
|
|
|
+ * @buf_len: Total length of the cyclic buffers
|
|
|
+ * @period_len: length of individual cyclic buffer
|
|
|
* @direction: DMA direction
|
|
|
* @flags: transfer ack flags
|
|
|
+ *
|
|
|
+ * Return: Async transaction descriptor on success and NULL on failure
|
|
|
*/
|
|
|
static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
|
|
|
struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
|
|
@@ -2009,7 +2030,9 @@ error:
|
|
|
|
|
|
/**
|
|
|
* xilinx_dma_terminate_all - Halt the channel and free descriptors
|
|
|
- * @chan: Driver specific DMA Channel pointer
|
|
|
+ * @dchan: Driver specific DMA Channel pointer
|
|
|
+ *
|
|
|
+ * Return: '0' always.
|
|
|
*/
|
|
|
static int xilinx_dma_terminate_all(struct dma_chan *dchan)
|
|
|
{
|
|
@@ -2029,6 +2052,7 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
|
|
|
|
|
|
/* Remove and free all of the descriptors in the lists */
|
|
|
xilinx_dma_free_descriptors(chan);
|
|
|
+ chan->idle = true;
|
|
|
|
|
|
if (chan->cyclic) {
|
|
|
reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
|
|
@@ -2037,6 +2061,10 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
|
|
|
chan->cyclic = false;
|
|
|
}
|
|
|
|
|
|
+ if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
|
|
|
+ dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
|
|
|
+ XILINX_CDMA_CR_SGMODE);
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -2323,6 +2351,7 @@ static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
|
|
|
*
|
|
|
* @xdev: Driver specific device structure
|
|
|
* @node: Device node
|
|
|
+ * @chan_id: DMA Channel id
|
|
|
*
|
|
|
* Return: '0' on success and failure value on error
|
|
|
*/
|
|
@@ -2344,11 +2373,18 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
|
|
|
chan->has_sg = xdev->has_sg;
|
|
|
chan->desc_pendingcount = 0x0;
|
|
|
chan->ext_addr = xdev->ext_addr;
|
|
|
+ /* This variable ensures that descriptors are not
|
|
|
+ * Submitted when dma engine is in progress. This variable is
|
|
|
+ * Added to avoid polling for a bit in the status register to
|
|
|
+ * Know dma state in the driver hot path.
|
|
|
+ */
|
|
|
+ chan->idle = true;
|
|
|
|
|
|
spin_lock_init(&chan->lock);
|
|
|
INIT_LIST_HEAD(&chan->pending_list);
|
|
|
INIT_LIST_HEAD(&chan->done_list);
|
|
|
INIT_LIST_HEAD(&chan->active_list);
|
|
|
+ INIT_LIST_HEAD(&chan->free_seg_list);
|
|
|
|
|
|
/* Retrieve the channel properties from the device tree */
|
|
|
has_dre = of_property_read_bool(node, "xlnx,include-dre");
|
|
@@ -2379,6 +2415,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
|
|
|
chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
|
|
|
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
|
|
|
chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
|
|
|
+ chan->config.park = 1;
|
|
|
|
|
|
if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
|
|
|
xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
|
|
@@ -2395,6 +2432,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
|
|
|
chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
|
|
|
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
|
|
|
chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
|
|
|
+ chan->config.park = 1;
|
|
|
|
|
|
if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
|
|
|
xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
|
|
@@ -2459,7 +2497,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
|
|
|
* Return: 0 always.
|
|
|
*/
|
|
|
static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
|
|
|
- struct device_node *node) {
|
|
|
+ struct device_node *node)
|
|
|
+{
|
|
|
int ret, i, nr_channels = 1;
|
|
|
|
|
|
ret = of_property_read_u32(node, "dma-channels", &nr_channels);
|
|
@@ -2654,7 +2693,12 @@ static int xilinx_dma_probe(struct platform_device *pdev)
|
|
|
goto error;
|
|
|
}
|
|
|
|
|
|
- dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
|
|
|
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
|
|
|
+ dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
|
|
|
+ else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
|
|
|
+ dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
|
|
|
+ else
|
|
|
+ dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
|
|
|
|
|
|
return 0;
|
|
|
|