|
@@ -1588,6 +1588,65 @@ static int pnv_eeh_next_error(struct eeh_pe **pe)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static int pnv_eeh_restore_vf_config(struct pci_dn *pdn)
|
|
|
+{
|
|
|
+ struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
|
|
|
+ u32 devctl, cmd, cap2, aer_capctl;
|
|
|
+ int old_mps;
|
|
|
+
|
|
|
+ if (edev->pcie_cap) {
|
|
|
+ /* Restore MPS */
|
|
|
+ old_mps = (ffs(pdn->mps) - 8) << 5;
|
|
|
+ eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
|
|
|
+ 2, &devctl);
|
|
|
+ devctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
|
|
|
+ devctl |= old_mps;
|
|
|
+ eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
|
|
|
+ 2, devctl);
|
|
|
+
|
|
|
+ /* Disable Completion Timeout */
|
|
|
+ eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCAP2,
|
|
|
+ 4, &cap2);
|
|
|
+ if (cap2 & 0x10) {
|
|
|
+ eeh_ops->read_config(pdn,
|
|
|
+ edev->pcie_cap + PCI_EXP_DEVCTL2,
|
|
|
+ 4, &cap2);
|
|
|
+ cap2 |= 0x10;
|
|
|
+ eeh_ops->write_config(pdn,
|
|
|
+ edev->pcie_cap + PCI_EXP_DEVCTL2,
|
|
|
+ 4, cap2);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Enable SERR and parity checking */
|
|
|
+ eeh_ops->read_config(pdn, PCI_COMMAND, 2, &cmd);
|
|
|
+ cmd |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
|
|
|
+ eeh_ops->write_config(pdn, PCI_COMMAND, 2, cmd);
|
|
|
+
|
|
|
+ /* Enable report various errors */
|
|
|
+ if (edev->pcie_cap) {
|
|
|
+ eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
|
|
|
+ 2, &devctl);
|
|
|
+ devctl &= ~PCI_EXP_DEVCTL_CERE;
|
|
|
+ devctl |= (PCI_EXP_DEVCTL_NFERE |
|
|
|
+ PCI_EXP_DEVCTL_FERE |
|
|
|
+ PCI_EXP_DEVCTL_URRE);
|
|
|
+ eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
|
|
|
+ 2, devctl);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Enable ECRC generation and check */
|
|
|
+ if (edev->pcie_cap && edev->aer_cap) {
|
|
|
+ eeh_ops->read_config(pdn, edev->aer_cap + PCI_ERR_CAP,
|
|
|
+ 4, &aer_capctl);
|
|
|
+ aer_capctl |= (PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
|
|
|
+ eeh_ops->write_config(pdn, edev->aer_cap + PCI_ERR_CAP,
|
|
|
+ 4, aer_capctl);
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static int pnv_eeh_restore_config(struct pci_dn *pdn)
|
|
|
{
|
|
|
struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
|
|
@@ -1597,9 +1656,21 @@ static int pnv_eeh_restore_config(struct pci_dn *pdn)
|
|
|
if (!edev)
|
|
|
return -EEXIST;
|
|
|
|
|
|
- phb = edev->phb->private_data;
|
|
|
- ret = opal_pci_reinit(phb->opal_id,
|
|
|
- OPAL_REINIT_PCI_DEV, edev->config_addr);
|
|
|
+ /*
|
|
|
+ * We have to restore the PCI config space after reset since the
|
|
|
+ * firmware can't see SRIOV VFs.
|
|
|
+ *
|
|
|
+ * FIXME: The MPS, error routing rules, timeout setting are worthy
|
|
|
+ * to be exported by firmware in extendible way.
|
|
|
+ */
|
|
|
+ if (edev->physfn) {
|
|
|
+ ret = pnv_eeh_restore_vf_config(pdn);
|
|
|
+ } else {
|
|
|
+ phb = edev->phb->private_data;
|
|
|
+ ret = opal_pci_reinit(phb->opal_id,
|
|
|
+ OPAL_REINIT_PCI_DEV, edev->config_addr);
|
|
|
+ }
|
|
|
+
|
|
|
if (ret) {
|
|
|
pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n",
|
|
|
__func__, edev->config_addr, ret);
|
|
@@ -1644,6 +1715,24 @@ void pcibios_bus_add_device(struct pci_dev *pdev)
|
|
|
eeh_sysfs_add_device(pdev);
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_PCI_IOV
|
|
|
+static void pnv_pci_fixup_vf_mps(struct pci_dev *pdev)
|
|
|
+{
|
|
|
+ struct pci_dn *pdn = pci_get_pdn(pdev);
|
|
|
+ int parent_mps;
|
|
|
+
|
|
|
+ if (!pdev->is_virtfn)
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* Synchronize MPS for VF and PF */
|
|
|
+ parent_mps = pcie_get_mps(pdev->physfn);
|
|
|
+ if ((128 << pdev->pcie_mpss) >= parent_mps)
|
|
|
+ pcie_set_mps(pdev, parent_mps);
|
|
|
+ pdn->mps = pcie_get_mps(pdev);
|
|
|
+}
|
|
|
+DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pnv_pci_fixup_vf_mps);
|
|
|
+#endif /* CONFIG_PCI_IOV */
|
|
|
+
|
|
|
/**
|
|
|
* eeh_powernv_init - Register platform dependent EEH operations
|
|
|
*
|