|
@@ -594,6 +594,14 @@ struct dma_tx_state {
|
|
* @fill_align: alignment shift for memset operations
|
|
* @fill_align: alignment shift for memset operations
|
|
* @dev_id: unique device ID
|
|
* @dev_id: unique device ID
|
|
* @dev: struct device reference for dma mapping api
|
|
* @dev: struct device reference for dma mapping api
|
|
|
|
+ * @src_addr_widths: bit mask of src addr widths the device supports
|
|
|
|
+ * @dst_addr_widths: bit mask of dst addr widths the device supports
|
|
|
|
+ * @directions: bit mask of slave direction the device supports since
|
|
|
|
+ * the enum dma_transfer_direction is not defined as bits for
|
|
|
|
+ * each type of direction, the dma controller should fill (1 <<
|
|
|
|
+ * <TYPE>) and same should be checked by controller as well
|
|
|
|
+ * @residue_granularity: granularity of the transfer residue reported
|
|
|
|
+ * by tx_status
|
|
* @device_alloc_chan_resources: allocate resources and return the
|
|
* @device_alloc_chan_resources: allocate resources and return the
|
|
* number of allocated descriptors
|
|
* number of allocated descriptors
|
|
* @device_free_chan_resources: release DMA channel's resources
|
|
* @device_free_chan_resources: release DMA channel's resources
|
|
@@ -643,6 +651,11 @@ struct dma_device {
|
|
int dev_id;
|
|
int dev_id;
|
|
struct device *dev;
|
|
struct device *dev;
|
|
|
|
|
|
|
|
+ u32 src_addr_widths;
|
|
|
|
+ u32 dst_addr_widths;
|
|
|
|
+ u32 directions;
|
|
|
|
+ enum dma_residue_granularity residue_granularity;
|
|
|
|
+
|
|
int (*device_alloc_chan_resources)(struct dma_chan *chan);
|
|
int (*device_alloc_chan_resources)(struct dma_chan *chan);
|
|
void (*device_free_chan_resources)(struct dma_chan *chan);
|
|
void (*device_free_chan_resources)(struct dma_chan *chan);
|
|
|
|
|
|
@@ -784,17 +797,37 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
|
|
|
|
|
|
static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
|
|
static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
|
|
{
|
|
{
|
|
|
|
+ struct dma_device *device;
|
|
|
|
+
|
|
if (!chan || !caps)
|
|
if (!chan || !caps)
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
+ device = chan->device;
|
|
|
|
+
|
|
/* check if the channel supports slave transactions */
|
|
/* check if the channel supports slave transactions */
|
|
- if (!test_bit(DMA_SLAVE, chan->device->cap_mask.bits))
|
|
|
|
|
|
+ if (!test_bit(DMA_SLAVE, device->cap_mask.bits))
|
|
|
|
+ return -ENXIO;
|
|
|
|
+
|
|
|
|
+ if (device->device_slave_caps)
|
|
|
|
+ return device->device_slave_caps(chan, caps);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Check whether it reports it uses the generic slave
|
|
|
|
+ * capabilities, if not, that means it doesn't support any
|
|
|
|
+ * kind of slave capabilities reporting.
|
|
|
|
+ */
|
|
|
|
+ if (!device->directions)
|
|
return -ENXIO;
|
|
return -ENXIO;
|
|
|
|
|
|
- if (chan->device->device_slave_caps)
|
|
|
|
- return chan->device->device_slave_caps(chan, caps);
|
|
|
|
|
|
+ caps->src_addr_widths = device->src_addr_widths;
|
|
|
|
+ caps->dst_addr_widths = device->dst_addr_widths;
|
|
|
|
+ caps->directions = device->directions;
|
|
|
|
+ caps->residue_granularity = device->residue_granularity;
|
|
|
|
+
|
|
|
|
+ caps->cmd_pause = !!device->device_pause;
|
|
|
|
+ caps->cmd_terminate = !!device->device_terminate_all;
|
|
|
|
|
|
- return -ENXIO;
|
|
|
|
|
|
+ return 0;
|
|
}
|
|
}
|
|
|
|
|
|
static inline int dmaengine_terminate_all(struct dma_chan *chan)
|
|
static inline int dmaengine_terminate_all(struct dma_chan *chan)
|