|
@@ -923,28 +923,85 @@ int dma_async_device_register(struct dma_device *device)
|
|
|
return -ENODEV;
|
|
|
|
|
|
/* validate device routines */
|
|
|
- BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
|
|
|
- !device->device_prep_dma_memcpy);
|
|
|
- BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
|
|
|
- !device->device_prep_dma_xor);
|
|
|
- BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
|
|
|
- !device->device_prep_dma_xor_val);
|
|
|
- BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
|
|
|
- !device->device_prep_dma_pq);
|
|
|
- BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
|
|
|
- !device->device_prep_dma_pq_val);
|
|
|
- BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
|
|
|
- !device->device_prep_dma_memset);
|
|
|
- BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
|
|
|
- !device->device_prep_dma_interrupt);
|
|
|
- BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
|
|
|
- !device->device_prep_dma_cyclic);
|
|
|
- BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
|
|
|
- !device->device_prep_interleaved_dma);
|
|
|
-
|
|
|
- BUG_ON(!device->device_tx_status);
|
|
|
- BUG_ON(!device->device_issue_pending);
|
|
|
- BUG_ON(!device->dev);
|
|
|
+ if (!device->dev) {
|
|
|
+ pr_err("DMAdevice must have dev\n");
|
|
|
+ return -EIO;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
|
|
|
+ dev_err(device->dev,
|
|
|
+ "Device claims capability %s, but op is not defined\n",
|
|
|
+ "DMA_MEMCPY");
|
|
|
+ return -EIO;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
|
|
|
+ dev_err(device->dev,
|
|
|
+ "Device claims capability %s, but op is not defined\n",
|
|
|
+ "DMA_XOR");
|
|
|
+ return -EIO;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
|
|
|
+ dev_err(device->dev,
|
|
|
+ "Device claims capability %s, but op is not defined\n",
|
|
|
+ "DMA_XOR_VAL");
|
|
|
+ return -EIO;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
|
|
|
+ dev_err(device->dev,
|
|
|
+ "Device claims capability %s, but op is not defined\n",
|
|
|
+ "DMA_PQ");
|
|
|
+ return -EIO;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
|
|
|
+ dev_err(device->dev,
|
|
|
+ "Device claims capability %s, but op is not defined\n",
|
|
|
+ "DMA_PQ_VAL");
|
|
|
+ return -EIO;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
|
|
|
+ dev_err(device->dev,
|
|
|
+ "Device claims capability %s, but op is not defined\n",
|
|
|
+ "DMA_MEMSET");
|
|
|
+ return -EIO;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
|
|
|
+ dev_err(device->dev,
|
|
|
+ "Device claims capability %s, but op is not defined\n",
|
|
|
+ "DMA_INTERRUPT");
|
|
|
+ return -EIO;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
|
|
|
+ dev_err(device->dev,
|
|
|
+ "Device claims capability %s, but op is not defined\n",
|
|
|
+ "DMA_CYCLIC");
|
|
|
+ return -EIO;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
|
|
|
+ dev_err(device->dev,
|
|
|
+ "Device claims capability %s, but op is not defined\n",
|
|
|
+ "DMA_INTERLEAVE");
|
|
|
+ return -EIO;
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ if (!device->device_tx_status) {
|
|
|
+ dev_err(device->dev, "Device tx_status is not defined\n");
|
|
|
+ return -EIO;
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ if (!device->device_issue_pending) {
|
|
|
+ dev_err(device->dev, "Device issue_pending is not defined\n");
|
|
|
+ return -EIO;
|
|
|
+ }
|
|
|
|
|
|
/* note: this only matters in the
|
|
|
* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
|