|
@@ -771,8 +771,12 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
|
|
|
return -ENXIO;
|
|
|
}
|
|
|
|
|
|
- /* Configure PELTV */
|
|
|
- pnv_ioda_set_peltv(phb, pe, true);
|
|
|
+ /*
|
|
|
+ * Configure PELTV. NPUs don't have a PELTV table so skip
|
|
|
+ * configuration on them.
|
|
|
+ */
|
|
|
+ if (phb->type != PNV_PHB_NPU)
|
|
|
+ pnv_ioda_set_peltv(phb, pe, true);
|
|
|
|
|
|
/* Setup reverse map */
|
|
|
for (rid = pe->rid; rid < rid_end; rid++)
|
|
@@ -915,7 +919,6 @@ static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset)
|
|
|
}
|
|
|
#endif /* CONFIG_PCI_IOV */
|
|
|
|
|
|
-#if 0
|
|
|
static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
|
|
|
{
|
|
|
struct pci_controller *hose = pci_bus_to_host(dev->bus);
|
|
@@ -932,11 +935,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
|
|
|
if (pdn->pe_number != IODA_INVALID_PE)
|
|
|
return NULL;
|
|
|
|
|
|
- /* PE#0 has been pre-set */
|
|
|
- if (dev->bus->number == 0)
|
|
|
- pe_num = 0;
|
|
|
- else
|
|
|
- pe_num = pnv_ioda_alloc_pe(phb);
|
|
|
+ pe_num = pnv_ioda_alloc_pe(phb);
|
|
|
if (pe_num == IODA_INVALID_PE) {
|
|
|
pr_warning("%s: Not enough PE# available, disabling device\n",
|
|
|
pci_name(dev));
|
|
@@ -954,6 +953,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
|
|
|
pci_dev_get(dev);
|
|
|
pdn->pcidev = dev;
|
|
|
pdn->pe_number = pe_num;
|
|
|
+ pe->flags = PNV_IODA_PE_DEV;
|
|
|
pe->pdev = dev;
|
|
|
pe->pbus = NULL;
|
|
|
pe->tce32_seg = -1;
|
|
@@ -984,7 +984,6 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
|
|
|
|
|
|
return pe;
|
|
|
}
|
|
|
-#endif /* Useful for SRIOV case */
|
|
|
|
|
|
static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
|
|
|
{
|
|
@@ -1075,6 +1074,18 @@ static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
|
|
|
pnv_ioda_link_pe_by_weight(phb, pe);
|
|
|
}
|
|
|
|
|
|
+static void pnv_ioda_setup_dev_PEs(struct pci_bus *bus)
|
|
|
+{
|
|
|
+ struct pci_bus *child;
|
|
|
+ struct pci_dev *pdev;
|
|
|
+
|
|
|
+ list_for_each_entry(pdev, &bus->devices, bus_list)
|
|
|
+ pnv_ioda_setup_dev_PE(pdev);
|
|
|
+
|
|
|
+ list_for_each_entry(child, &bus->children, node)
|
|
|
+ pnv_ioda_setup_dev_PEs(child);
|
|
|
+}
|
|
|
+
|
|
|
static void pnv_ioda_setup_PEs(struct pci_bus *bus)
|
|
|
{
|
|
|
struct pci_dev *dev;
|
|
@@ -1111,7 +1122,15 @@ static void pnv_pci_ioda_setup_PEs(void)
|
|
|
if (phb->reserve_m64_pe)
|
|
|
phb->reserve_m64_pe(hose->bus, NULL, true);
|
|
|
|
|
|
- pnv_ioda_setup_PEs(hose->bus);
|
|
|
+ /*
|
|
|
+ * On NPU PHB, we expect separate PEs for individual PCI
|
|
|
+ * functions. PCI bus dependent PEs are required for the
|
|
|
+ * remaining types of PHBs.
|
|
|
+ */
|
|
|
+ if (phb->type == PNV_PHB_NPU)
|
|
|
+ pnv_ioda_setup_dev_PEs(hose->bus);
|
|
|
+ else
|
|
|
+ pnv_ioda_setup_PEs(hose->bus);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1570,6 +1589,8 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
|
|
|
struct pnv_ioda_pe *pe;
|
|
|
uint64_t top;
|
|
|
bool bypass = false;
|
|
|
+ struct pci_dev *linked_npu_dev;
|
|
|
+ int i;
|
|
|
|
|
|
if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
|
|
|
return -ENODEV;;
|
|
@@ -1588,6 +1609,15 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
|
|
|
set_dma_ops(&pdev->dev, &dma_iommu_ops);
|
|
|
}
|
|
|
*pdev->dev.dma_mask = dma_mask;
|
|
|
+
|
|
|
+ /* Update peer npu devices */
|
|
|
+ if (pe->flags & PNV_IODA_PE_PEER)
|
|
|
+ for (i = 0; pe->peers[i]; i++) {
|
|
|
+ linked_npu_dev = pe->peers[i]->pdev;
|
|
|
+ if (dma_get_mask(&linked_npu_dev->dev) != dma_mask)
|
|
|
+ dma_set_mask(&linked_npu_dev->dev, dma_mask);
|
|
|
+ }
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1732,12 +1762,23 @@ static inline void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_ioda_pe *pe)
|
|
|
/* 01xb - invalidate TCEs that match the specified PE# */
|
|
|
unsigned long val = (0x4ull << 60) | (pe->pe_number & 0xFF);
|
|
|
struct pnv_phb *phb = pe->phb;
|
|
|
+ struct pnv_ioda_pe *npe;
|
|
|
+ int i;
|
|
|
|
|
|
if (!phb->ioda.tce_inval_reg)
|
|
|
return;
|
|
|
|
|
|
mb(); /* Ensure above stores are visible */
|
|
|
__raw_writeq(cpu_to_be64(val), phb->ioda.tce_inval_reg);
|
|
|
+
|
|
|
+ if (pe->flags & PNV_IODA_PE_PEER)
|
|
|
+ for (i = 0; i < PNV_IODA_MAX_PEER_PES; i++) {
|
|
|
+ npe = pe->peers[i];
|
|
|
+ if (!npe || npe->phb->type != PNV_PHB_NPU)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ pnv_npu_tce_invalidate_entire(npe);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static void pnv_pci_ioda2_do_tce_invalidate(unsigned pe_number, bool rm,
|
|
@@ -1772,15 +1813,28 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
|
|
|
struct iommu_table_group_link *tgl;
|
|
|
|
|
|
list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
|
|
|
+ struct pnv_ioda_pe *npe;
|
|
|
struct pnv_ioda_pe *pe = container_of(tgl->table_group,
|
|
|
struct pnv_ioda_pe, table_group);
|
|
|
__be64 __iomem *invalidate = rm ?
|
|
|
(__be64 __iomem *)pe->phb->ioda.tce_inval_reg_phys :
|
|
|
pe->phb->ioda.tce_inval_reg;
|
|
|
+ int i;
|
|
|
|
|
|
pnv_pci_ioda2_do_tce_invalidate(pe->pe_number, rm,
|
|
|
invalidate, tbl->it_page_shift,
|
|
|
index, npages);
|
|
|
+
|
|
|
+ if (pe->flags & PNV_IODA_PE_PEER)
|
|
|
+ /* Invalidate PEs using the same TCE table */
|
|
|
+ for (i = 0; i < PNV_IODA_MAX_PEER_PES; i++) {
|
|
|
+ npe = pe->peers[i];
|
|
|
+ if (!npe || npe->phb->type != PNV_PHB_NPU)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ pnv_npu_tce_invalidate(npe, tbl, index,
|
|
|
+ npages, rm);
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -2428,10 +2482,17 @@ static void pnv_ioda_setup_dma(struct pnv_phb *phb)
|
|
|
pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n",
|
|
|
pe->dma_weight, segs);
|
|
|
pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs);
|
|
|
- } else {
|
|
|
+ } else if (phb->type == PNV_PHB_IODA2) {
|
|
|
pe_info(pe, "Assign DMA32 space\n");
|
|
|
segs = 0;
|
|
|
pnv_pci_ioda2_setup_dma_pe(phb, pe);
|
|
|
+ } else if (phb->type == PNV_PHB_NPU) {
|
|
|
+ /*
|
|
|
+ * We initialise the DMA space for an NPU PHB
|
|
|
+ * after setup of the PHB is complete as we
|
|
|
+ * point the NPU TVT to the the same location
|
|
|
+ * as the PHB3 TVT.
|
|
|
+ */
|
|
|
}
|
|
|
|
|
|
remaining -= segs;
|
|
@@ -2873,6 +2934,11 @@ static void pnv_pci_ioda_setup_seg(void)
|
|
|
|
|
|
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
|
|
|
phb = hose->private_data;
|
|
|
+
|
|
|
+ /* NPU PHB does not support IO or MMIO segmentation */
|
|
|
+ if (phb->type == PNV_PHB_NPU)
|
|
|
+ continue;
|
|
|
+
|
|
|
list_for_each_entry(pe, &phb->ioda.pe_list, list) {
|
|
|
pnv_ioda_setup_pe_seg(hose, pe);
|
|
|
}
|
|
@@ -2912,6 +2978,27 @@ static void pnv_pci_ioda_create_dbgfs(void)
|
|
|
#endif /* CONFIG_DEBUG_FS */
|
|
|
}
|
|
|
|
|
|
+static void pnv_npu_ioda_fixup(void)
|
|
|
+{
|
|
|
+ bool enable_bypass;
|
|
|
+ struct pci_controller *hose, *tmp;
|
|
|
+ struct pnv_phb *phb;
|
|
|
+ struct pnv_ioda_pe *pe;
|
|
|
+
|
|
|
+ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
|
|
|
+ phb = hose->private_data;
|
|
|
+ if (phb->type != PNV_PHB_NPU)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ list_for_each_entry(pe, &phb->ioda.pe_dma_list, dma_link) {
|
|
|
+ enable_bypass = dma_get_mask(&pe->pdev->dev) ==
|
|
|
+ DMA_BIT_MASK(64);
|
|
|
+ pnv_npu_init_dma_pe(pe);
|
|
|
+ pnv_npu_dma_set_bypass(pe, enable_bypass);
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static void pnv_pci_ioda_fixup(void)
|
|
|
{
|
|
|
pnv_pci_ioda_setup_PEs();
|
|
@@ -2924,6 +3011,9 @@ static void pnv_pci_ioda_fixup(void)
|
|
|
eeh_init();
|
|
|
eeh_addr_cache_build();
|
|
|
#endif
|
|
|
+
|
|
|
+ /* Link NPU IODA tables to their PCI devices. */
|
|
|
+ pnv_npu_ioda_fixup();
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -3038,6 +3128,19 @@ static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
|
|
|
.shutdown = pnv_pci_ioda_shutdown,
|
|
|
};
|
|
|
|
|
|
+static const struct pci_controller_ops pnv_npu_ioda_controller_ops = {
|
|
|
+ .dma_dev_setup = pnv_pci_dma_dev_setup,
|
|
|
+#ifdef CONFIG_PCI_MSI
|
|
|
+ .setup_msi_irqs = pnv_setup_msi_irqs,
|
|
|
+ .teardown_msi_irqs = pnv_teardown_msi_irqs,
|
|
|
+#endif
|
|
|
+ .enable_device_hook = pnv_pci_enable_device_hook,
|
|
|
+ .window_alignment = pnv_pci_window_alignment,
|
|
|
+ .reset_secondary_bus = pnv_pci_reset_secondary_bus,
|
|
|
+ .dma_set_mask = pnv_npu_dma_set_mask,
|
|
|
+ .shutdown = pnv_pci_ioda_shutdown,
|
|
|
+};
|
|
|
+
|
|
|
static void __init pnv_pci_init_ioda_phb(struct device_node *np,
|
|
|
u64 hub_id, int ioda_type)
|
|
|
{
|
|
@@ -3093,6 +3196,8 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
|
|
|
phb->model = PNV_PHB_MODEL_P7IOC;
|
|
|
else if (of_device_is_compatible(np, "ibm,power8-pciex"))
|
|
|
phb->model = PNV_PHB_MODEL_PHB3;
|
|
|
+ else if (of_device_is_compatible(np, "ibm,power8-npu-pciex"))
|
|
|
+ phb->model = PNV_PHB_MODEL_NPU;
|
|
|
else
|
|
|
phb->model = PNV_PHB_MODEL_UNKNOWN;
|
|
|
|
|
@@ -3193,7 +3298,11 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
|
|
|
* the child P2P bridges) can form individual PE.
|
|
|
*/
|
|
|
ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
|
|
|
- hose->controller_ops = pnv_pci_ioda_controller_ops;
|
|
|
+
|
|
|
+ if (phb->type == PNV_PHB_NPU)
|
|
|
+ hose->controller_ops = pnv_npu_ioda_controller_ops;
|
|
|
+ else
|
|
|
+ hose->controller_ops = pnv_pci_ioda_controller_ops;
|
|
|
|
|
|
#ifdef CONFIG_PCI_IOV
|
|
|
ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
|
|
@@ -3228,6 +3337,11 @@ void __init pnv_pci_init_ioda2_phb(struct device_node *np)
|
|
|
pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2);
|
|
|
}
|
|
|
|
|
|
+void __init pnv_pci_init_npu_phb(struct device_node *np)
|
|
|
+{
|
|
|
+ pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU);
|
|
|
+}
|
|
|
+
|
|
|
void __init pnv_pci_init_ioda_hub(struct device_node *np)
|
|
|
{
|
|
|
struct device_node *phbn;
|