|
@@ -23,10 +23,16 @@
|
|
|
#include <asm/octeon/octeon.h>
|
|
|
|
|
|
#ifdef CONFIG_PCI
|
|
|
+#include <linux/pci.h>
|
|
|
#include <asm/octeon/pci-octeon.h>
|
|
|
#include <asm/octeon/cvmx-npi-defs.h>
|
|
|
#include <asm/octeon/cvmx-pci-defs.h>
|
|
|
|
|
|
+struct octeon_dma_map_ops {
|
|
|
+ dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr);
|
|
|
+ phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
|
|
|
+};
|
|
|
+
|
|
|
static dma_addr_t octeon_hole_phys_to_dma(phys_addr_t paddr)
|
|
|
{
|
|
|
if (paddr >= CVMX_PCIE_BAR1_PHYS_BASE && paddr < (CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_PHYS_SIZE))
|
|
@@ -60,6 +66,11 @@ static phys_addr_t octeon_gen1_dma_to_phys(struct device *dev, dma_addr_t daddr)
|
|
|
return daddr;
|
|
|
}
|
|
|
|
|
|
+static const struct octeon_dma_map_ops octeon_gen1_ops = {
|
|
|
+ .phys_to_dma = octeon_gen1_phys_to_dma,
|
|
|
+ .dma_to_phys = octeon_gen1_dma_to_phys,
|
|
|
+};
|
|
|
+
|
|
|
static dma_addr_t octeon_gen2_phys_to_dma(struct device *dev, phys_addr_t paddr)
|
|
|
{
|
|
|
return octeon_hole_phys_to_dma(paddr);
|
|
@@ -70,6 +81,11 @@ static phys_addr_t octeon_gen2_dma_to_phys(struct device *dev, dma_addr_t daddr)
|
|
|
return octeon_hole_dma_to_phys(daddr);
|
|
|
}
|
|
|
|
|
|
+static const struct octeon_dma_map_ops octeon_gen2_ops = {
|
|
|
+ .phys_to_dma = octeon_gen2_phys_to_dma,
|
|
|
+ .dma_to_phys = octeon_gen2_dma_to_phys,
|
|
|
+};
|
|
|
+
|
|
|
static dma_addr_t octeon_big_phys_to_dma(struct device *dev, phys_addr_t paddr)
|
|
|
{
|
|
|
if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
|
|
@@ -92,6 +108,11 @@ static phys_addr_t octeon_big_dma_to_phys(struct device *dev, dma_addr_t daddr)
|
|
|
return daddr;
|
|
|
}
|
|
|
|
|
|
+static const struct octeon_dma_map_ops octeon_big_ops = {
|
|
|
+ .phys_to_dma = octeon_big_phys_to_dma,
|
|
|
+ .dma_to_phys = octeon_big_dma_to_phys,
|
|
|
+};
|
|
|
+
|
|
|
static dma_addr_t octeon_small_phys_to_dma(struct device *dev,
|
|
|
phys_addr_t paddr)
|
|
|
{
|
|
@@ -120,6 +141,32 @@ static phys_addr_t octeon_small_dma_to_phys(struct device *dev,
|
|
|
return daddr;
|
|
|
}
|
|
|
|
|
|
+static const struct octeon_dma_map_ops octeon_small_ops = {
|
|
|
+ .phys_to_dma = octeon_small_phys_to_dma,
|
|
|
+ .dma_to_phys = octeon_small_dma_to_phys,
|
|
|
+};
|
|
|
+
|
|
|
+static const struct octeon_dma_map_ops *octeon_pci_dma_ops;
|
|
|
+
|
|
|
+void __init octeon_pci_dma_init(void)
|
|
|
+{
|
|
|
+ switch (octeon_dma_bar_type) {
|
|
|
+ case OCTEON_DMA_BAR_TYPE_PCIE:
|
|
|
+ octeon_pci_dma_ops = &octeon_gen1_ops;
|
|
|
+ break;
|
|
|
+ case OCTEON_DMA_BAR_TYPE_PCIE2:
|
|
|
+ octeon_pci_dma_ops = &octeon_gen2_ops;
|
|
|
+ break;
|
|
|
+ case OCTEON_DMA_BAR_TYPE_BIG:
|
|
|
+ octeon_pci_dma_ops = &octeon_big_ops;
|
|
|
+ break;
|
|
|
+ case OCTEON_DMA_BAR_TYPE_SMALL:
|
|
|
+ octeon_pci_dma_ops = &octeon_small_ops;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ BUG();
|
|
|
+ }
|
|
|
+}
|
|
|
#endif /* CONFIG_PCI */
|
|
|
|
|
|
static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page,
|
|
@@ -165,57 +212,37 @@ static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
|
|
|
-{
|
|
|
- return paddr;
|
|
|
-}
|
|
|
-
|
|
|
-static phys_addr_t octeon_unity_dma_to_phys(struct device *dev, dma_addr_t daddr)
|
|
|
-{
|
|
|
- return daddr;
|
|
|
-}
|
|
|
-
|
|
|
-struct octeon_dma_map_ops {
|
|
|
- const struct dma_map_ops dma_map_ops;
|
|
|
- dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr);
|
|
|
- phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
|
|
|
-};
|
|
|
-
|
|
|
dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
|
|
|
{
|
|
|
- struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
|
|
|
- struct octeon_dma_map_ops,
|
|
|
- dma_map_ops);
|
|
|
-
|
|
|
- return ops->phys_to_dma(dev, paddr);
|
|
|
+#ifdef CONFIG_PCI
|
|
|
+ if (dev && dev_is_pci(dev))
|
|
|
+ return octeon_pci_dma_ops->phys_to_dma(dev, paddr);
|
|
|
+#endif
|
|
|
+ return paddr;
|
|
|
}
|
|
|
|
|
|
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
|
|
|
{
|
|
|
- struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
|
|
|
- struct octeon_dma_map_ops,
|
|
|
- dma_map_ops);
|
|
|
-
|
|
|
- return ops->dma_to_phys(dev, daddr);
|
|
|
+#ifdef CONFIG_PCI
|
|
|
+ if (dev && dev_is_pci(dev))
|
|
|
+ return octeon_pci_dma_ops->dma_to_phys(dev, daddr);
|
|
|
+#endif
|
|
|
+ return daddr;
|
|
|
}
|
|
|
|
|
|
-static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
|
|
|
- .dma_map_ops = {
|
|
|
- .alloc = octeon_dma_alloc_coherent,
|
|
|
- .free = swiotlb_free,
|
|
|
- .map_page = octeon_dma_map_page,
|
|
|
- .unmap_page = swiotlb_unmap_page,
|
|
|
- .map_sg = octeon_dma_map_sg,
|
|
|
- .unmap_sg = swiotlb_unmap_sg_attrs,
|
|
|
- .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
|
|
|
- .sync_single_for_device = octeon_dma_sync_single_for_device,
|
|
|
- .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
|
|
|
- .sync_sg_for_device = octeon_dma_sync_sg_for_device,
|
|
|
- .mapping_error = swiotlb_dma_mapping_error,
|
|
|
- .dma_supported = swiotlb_dma_supported
|
|
|
- },
|
|
|
- .phys_to_dma = octeon_unity_phys_to_dma,
|
|
|
- .dma_to_phys = octeon_unity_dma_to_phys
|
|
|
+static const struct dma_map_ops octeon_swiotlb_ops = {
|
|
|
+ .alloc = octeon_dma_alloc_coherent,
|
|
|
+ .free = swiotlb_free,
|
|
|
+ .map_page = octeon_dma_map_page,
|
|
|
+ .unmap_page = swiotlb_unmap_page,
|
|
|
+ .map_sg = octeon_dma_map_sg,
|
|
|
+ .unmap_sg = swiotlb_unmap_sg_attrs,
|
|
|
+ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
|
|
|
+ .sync_single_for_device = octeon_dma_sync_single_for_device,
|
|
|
+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
|
|
|
+ .sync_sg_for_device = octeon_dma_sync_sg_for_device,
|
|
|
+ .mapping_error = swiotlb_dma_mapping_error,
|
|
|
+ .dma_supported = swiotlb_dma_supported
|
|
|
};
|
|
|
|
|
|
char *octeon_swiotlb;
|
|
@@ -281,51 +308,5 @@ void __init plat_swiotlb_setup(void)
|
|
|
if (swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1) == -ENOMEM)
|
|
|
panic("Cannot allocate SWIOTLB buffer");
|
|
|
|
|
|
- mips_dma_map_ops = &octeon_linear_dma_map_ops.dma_map_ops;
|
|
|
+ mips_dma_map_ops = &octeon_swiotlb_ops;
|
|
|
}
|
|
|
-
|
|
|
-#ifdef CONFIG_PCI
|
|
|
-static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = {
|
|
|
- .dma_map_ops = {
|
|
|
- .alloc = octeon_dma_alloc_coherent,
|
|
|
- .free = swiotlb_free,
|
|
|
- .map_page = octeon_dma_map_page,
|
|
|
- .unmap_page = swiotlb_unmap_page,
|
|
|
- .map_sg = octeon_dma_map_sg,
|
|
|
- .unmap_sg = swiotlb_unmap_sg_attrs,
|
|
|
- .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
|
|
|
- .sync_single_for_device = octeon_dma_sync_single_for_device,
|
|
|
- .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
|
|
|
- .sync_sg_for_device = octeon_dma_sync_sg_for_device,
|
|
|
- .mapping_error = swiotlb_dma_mapping_error,
|
|
|
- .dma_supported = swiotlb_dma_supported
|
|
|
- },
|
|
|
-};
|
|
|
-
|
|
|
-const struct dma_map_ops *octeon_pci_dma_map_ops;
|
|
|
-
|
|
|
-void __init octeon_pci_dma_init(void)
|
|
|
-{
|
|
|
- switch (octeon_dma_bar_type) {
|
|
|
- case OCTEON_DMA_BAR_TYPE_PCIE2:
|
|
|
- _octeon_pci_dma_map_ops.phys_to_dma = octeon_gen2_phys_to_dma;
|
|
|
- _octeon_pci_dma_map_ops.dma_to_phys = octeon_gen2_dma_to_phys;
|
|
|
- break;
|
|
|
- case OCTEON_DMA_BAR_TYPE_PCIE:
|
|
|
- _octeon_pci_dma_map_ops.phys_to_dma = octeon_gen1_phys_to_dma;
|
|
|
- _octeon_pci_dma_map_ops.dma_to_phys = octeon_gen1_dma_to_phys;
|
|
|
- break;
|
|
|
- case OCTEON_DMA_BAR_TYPE_BIG:
|
|
|
- _octeon_pci_dma_map_ops.phys_to_dma = octeon_big_phys_to_dma;
|
|
|
- _octeon_pci_dma_map_ops.dma_to_phys = octeon_big_dma_to_phys;
|
|
|
- break;
|
|
|
- case OCTEON_DMA_BAR_TYPE_SMALL:
|
|
|
- _octeon_pci_dma_map_ops.phys_to_dma = octeon_small_phys_to_dma;
|
|
|
- _octeon_pci_dma_map_ops.dma_to_phys = octeon_small_dma_to_phys;
|
|
|
- break;
|
|
|
- default:
|
|
|
- BUG();
|
|
|
- }
|
|
|
- octeon_pci_dma_map_ops = &_octeon_pci_dma_map_ops.dma_map_ops;
|
|
|
-}
|
|
|
-#endif /* CONFIG_PCI */
|