Browse Source

dmaengine: ioatdma: PQ err descriptors should callback with err results

The err completion callback is missing from the error handler. Two
reasons we never hit this. On Xeon because the hw err workaround, the
completion happens on a NULL descriptor so we don't do callback on the
PQ descriptor. On Atom we have DWBES support and thus the callback already
happened or we don't halt on error, so that was take cared of. But this code
needs to be corrected for future error handlers.

Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Acked-by Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Dave Jiang 11 years ago
parent
commit
abf538ae03
1 changed files with 11 additions and 0 deletions
  1. 11 0
      drivers/dma/ioat/dma_v3.c

+ 11 - 0
drivers/dma/ioat/dma_v3.c

@@ -489,6 +489,7 @@ static void ioat3_eh(struct ioat2_dma_chan *ioat)
 	struct ioat_chan_common *chan = &ioat->base;
 	struct pci_dev *pdev = to_pdev(chan);
 	struct ioat_dma_descriptor *hw;
+	struct dma_async_tx_descriptor *tx;
 	u64 phys_complete;
 	struct ioat_ring_ent *desc;
 	u32 err_handled = 0;
@@ -534,6 +535,16 @@ static void ioat3_eh(struct ioat2_dma_chan *ioat)
 		dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n",
 			__func__, chanerr, err_handled);
 		BUG();
+	} else { /* cleanup the faulty descriptor */
+		tx = &desc->txd;
+		if (tx->cookie) {
+			dma_cookie_complete(tx);
+			dma_descriptor_unmap(tx);
+			if (tx->callback) {
+				tx->callback(tx->callback_param);
+				tx->callback = NULL;
+			}
+		}
 	}
 
 	writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);