Przeglądaj źródła

Merge branch 'topic/ioatdma' into for-linus

Vinod Koul 9 lat temu
rodzic
commit
212dac5665
4 zmienionych plików z 152 dodań i 5 usunięć
  1. 2 1
      drivers/dma/ioat/dma.c
  2. 4 2
      drivers/dma/ioat/dma.h
  3. 112 2
      drivers/dma/ioat/init.c
  4. 34 0
      drivers/dma/ioat/prep.c

+ 2 - 1
drivers/dma/ioat/dma.c

@@ -197,7 +197,8 @@ static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
 void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
 {
 	spin_lock_bh(&ioat_chan->prep_lock);
-	__ioat_start_null_desc(ioat_chan);
+	if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
+		__ioat_start_null_desc(ioat_chan);
 	spin_unlock_bh(&ioat_chan->prep_lock);
 }
 

+ 4 - 2
drivers/dma/ioat/dma.h

@@ -82,8 +82,9 @@ struct ioatdma_device {
 	struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
 	struct dma_device dma_dev;
 	u8 version;
-	struct msix_entry msix_entries[4];
-	struct ioatdma_chan *idx[4];
+#define IOAT_MAX_CHANS 4
+	struct msix_entry msix_entries[IOAT_MAX_CHANS];
+	struct ioatdma_chan *idx[IOAT_MAX_CHANS];
 	struct dca_provider *dca;
 	enum ioat_irq_mode irq_mode;
 	u32 cap;
@@ -95,6 +96,7 @@ struct ioatdma_chan {
 	dma_addr_t last_completion;
 	spinlock_t cleanup_lock;
 	unsigned long state;
+	#define IOAT_CHAN_DOWN 0
 	#define IOAT_COMPLETION_ACK 1
 	#define IOAT_RESET_PENDING 2
 	#define IOAT_KOBJ_INIT_FAIL 3

+ 112 - 2
drivers/dma/ioat/init.c

@@ -27,6 +27,7 @@
 #include <linux/workqueue.h>
 #include <linux/prefetch.h>
 #include <linux/dca.h>
+#include <linux/aer.h>
 #include "dma.h"
 #include "registers.h"
 #include "hw.h"
@@ -1186,13 +1187,116 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
 	return 0;
 }
 
+static void ioat_shutdown(struct pci_dev *pdev)
+{
+	struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev);
+	struct ioatdma_chan *ioat_chan;
+	int i;
+
+	if (!ioat_dma)
+		return;
+
+	for (i = 0; i < IOAT_MAX_CHANS; i++) {
+		ioat_chan = ioat_dma->idx[i];
+		if (!ioat_chan)
+			continue;
+
+		spin_lock_bh(&ioat_chan->prep_lock);
+		set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
+		del_timer_sync(&ioat_chan->timer);
+		spin_unlock_bh(&ioat_chan->prep_lock);
+		/* this should quiesce then reset */
+		ioat_reset_hw(ioat_chan);
+	}
+
+	ioat_disable_interrupts(ioat_dma);
+}
+
+void ioat_resume(struct ioatdma_device *ioat_dma)
+{
+	struct ioatdma_chan *ioat_chan;
+	u32 chanerr;
+	int i;
+
+	for (i = 0; i < IOAT_MAX_CHANS; i++) {
+		ioat_chan = ioat_dma->idx[i];
+		if (!ioat_chan)
+			continue;
+
+		spin_lock_bh(&ioat_chan->prep_lock);
+		clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
+		spin_unlock_bh(&ioat_chan->prep_lock);
+
+		chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+		writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+
+		/* no need to reset as shutdown already did that */
+	}
+}
+
 #define DRV_NAME "ioatdma"
 
+static pci_ers_result_t ioat_pcie_error_detected(struct pci_dev *pdev,
+						 enum pci_channel_state error)
+{
+	dev_dbg(&pdev->dev, "%s: PCIe AER error %d\n", DRV_NAME, error);
+
+	/* quiesce and block I/O */
+	ioat_shutdown(pdev);
+
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t ioat_pcie_error_slot_reset(struct pci_dev *pdev)
+{
+	pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
+	int err;
+
+	dev_dbg(&pdev->dev, "%s post reset handling\n", DRV_NAME);
+
+	if (pci_enable_device_mem(pdev) < 0) {
+		dev_err(&pdev->dev,
+			"Failed to enable PCIe device after reset.\n");
+		result = PCI_ERS_RESULT_DISCONNECT;
+	} else {
+		pci_set_master(pdev);
+		pci_restore_state(pdev);
+		pci_save_state(pdev);
+		pci_wake_from_d3(pdev, false);
+	}
+
+	err = pci_cleanup_aer_uncorrect_error_status(pdev);
+	if (err) {
+		dev_err(&pdev->dev,
+			"AER uncorrect error status clear failed: %#x\n", err);
+	}
+
+	return result;
+}
+
+static void ioat_pcie_error_resume(struct pci_dev *pdev)
+{
+	struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev);
+
+	dev_dbg(&pdev->dev, "%s: AER handling resuming\n", DRV_NAME);
+
+	/* initialize and bring everything back */
+	ioat_resume(ioat_dma);
+}
+
+static const struct pci_error_handlers ioat_err_handler = {
+	.error_detected = ioat_pcie_error_detected,
+	.slot_reset = ioat_pcie_error_slot_reset,
+	.resume = ioat_pcie_error_resume,
+};
+
 static struct pci_driver ioat_pci_driver = {
 	.name		= DRV_NAME,
 	.id_table	= ioat_pci_tbl,
 	.probe		= ioat_pci_probe,
 	.remove		= ioat_remove,
+	.shutdown	= ioat_shutdown,
+	.err_handler	= &ioat_err_handler,
 };
 
 static struct ioatdma_device *
@@ -1245,13 +1349,17 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	pci_set_drvdata(pdev, device);
 
 	device->version = readb(device->reg_base + IOAT_VER_OFFSET);
-	if (device->version >= IOAT_VER_3_0)
+	if (device->version >= IOAT_VER_3_0) {
 		err = ioat3_dma_probe(device, ioat_dca_enabled);
-	else
+
+		if (device->version >= IOAT_VER_3_3)
+			pci_enable_pcie_error_reporting(pdev);
+	} else
 		return -ENODEV;
 
 	if (err) {
 		dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
+		pci_disable_pcie_error_reporting(pdev);
 		return -ENODEV;
 	}
 
@@ -1271,6 +1379,8 @@ static void ioat_remove(struct pci_dev *pdev)
 		free_dca_provider(device->dca);
 		device->dca = NULL;
 	}
+
+	pci_disable_pcie_error_reporting(pdev);
 	ioat_dma_remove(device);
 }
 

+ 34 - 0
drivers/dma/ioat/prep.c

@@ -121,6 +121,9 @@ ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
 	size_t total_len = len;
 	int num_descs, idx, i;
 
+	if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
+		return NULL;
+
 	num_descs = ioat_xferlen_to_descs(ioat_chan, len);
 	if (likely(num_descs) &&
 	    ioat_check_space_lock(ioat_chan, num_descs) == 0)
@@ -254,6 +257,11 @@ struct dma_async_tx_descriptor *
 ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
 	       unsigned int src_cnt, size_t len, unsigned long flags)
 {
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
+
+	if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
+		return NULL;
+
 	return __ioat_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
 }
 
@@ -262,6 +270,11 @@ ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
 		    unsigned int src_cnt, size_t len,
 		    enum sum_check_flags *result, unsigned long flags)
 {
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
+
+	if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
+		return NULL;
+
 	/* the cleanup routine only sets bits on validate failure, it
 	 * does not clear bits on validate success... so clear it here
 	 */
@@ -574,6 +587,11 @@ ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
 	      unsigned int src_cnt, const unsigned char *scf, size_t len,
 	      unsigned long flags)
 {
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
+
+	if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
+		return NULL;
+
 	/* specify valid address for disabled result */
 	if (flags & DMA_PREP_PQ_DISABLE_P)
 		dst[0] = dst[1];
@@ -614,6 +632,11 @@ ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
 		  unsigned int src_cnt, const unsigned char *scf, size_t len,
 		  enum sum_check_flags *pqres, unsigned long flags)
 {
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
+
+	if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
+		return NULL;
+
 	/* specify valid address for disabled result */
 	if (flags & DMA_PREP_PQ_DISABLE_P)
 		pq[0] = pq[1];
@@ -638,6 +661,10 @@ ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
 {
 	unsigned char scf[MAX_SCF];
 	dma_addr_t pq[2];
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
+
+	if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
+		return NULL;
 
 	if (src_cnt > MAX_SCF)
 		return NULL;
@@ -661,6 +688,10 @@ ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
 {
 	unsigned char scf[MAX_SCF];
 	dma_addr_t pq[2];
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
+
+	if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
+		return NULL;
 
 	if (src_cnt > MAX_SCF)
 		return NULL;
@@ -689,6 +720,9 @@ ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
 	struct ioat_ring_ent *desc;
 	struct ioat_dma_descriptor *hw;
 
+	if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
+		return NULL;
+
 	if (ioat_check_space_lock(ioat_chan, 1) == 0)
 		desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
 	else