|
@@ -30,8 +30,19 @@
|
|
|
#define DRIVER_NAME "pci_sun4v"
|
|
|
#define PFX DRIVER_NAME ": "
|
|
|
|
|
|
-static unsigned long vpci_major = 1;
|
|
|
-static unsigned long vpci_minor = 1;
|
|
|
+static unsigned long vpci_major;
|
|
|
+static unsigned long vpci_minor;
|
|
|
+
|
|
|
+struct vpci_version {
|
|
|
+ unsigned long major;
|
|
|
+ unsigned long minor;
|
|
|
+};
|
|
|
+
|
|
|
+/* Ordered from largest major to lowest */
|
|
|
+static struct vpci_version vpci_versions[] = {
|
|
|
+ { .major = 2, .minor = 0 },
|
|
|
+ { .major = 1, .minor = 1 },
|
|
|
+};
|
|
|
|
|
|
#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
|
|
|
|
|
@@ -67,6 +78,10 @@ static long iommu_batch_flush(struct iommu_batch *p)
|
|
|
u64 *pglist = p->pglist;
|
|
|
unsigned long npages = p->npages;
|
|
|
|
|
|
+ /* VPCI maj=1, min=[0,1] only supports read and write */
|
|
|
+ if (vpci_major < 2)
|
|
|
+ prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
|
|
|
+
|
|
|
while (npages != 0) {
|
|
|
long num;
|
|
|
|
|
@@ -133,6 +148,7 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
|
|
|
unsigned long attrs)
|
|
|
{
|
|
|
unsigned long flags, order, first_page, npages, n;
|
|
|
+ unsigned long prot = 0;
|
|
|
struct iommu *iommu;
|
|
|
struct page *page;
|
|
|
void *ret;
|
|
@@ -146,6 +162,9 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
|
|
|
|
|
|
npages = size >> IO_PAGE_SHIFT;
|
|
|
|
|
|
+ if (attrs & DMA_ATTR_WEAK_ORDERING)
|
|
|
+ prot = HV_PCI_MAP_ATTR_RELAXED_ORDER;
|
|
|
+
|
|
|
nid = dev->archdata.numa_node;
|
|
|
page = alloc_pages_node(nid, gfp, order);
|
|
|
if (unlikely(!page))
|
|
@@ -169,7 +188,7 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
iommu_batch_start(dev,
|
|
|
- (HV_PCI_MAP_ATTR_READ |
|
|
|
+ (HV_PCI_MAP_ATTR_READ | prot |
|
|
|
HV_PCI_MAP_ATTR_WRITE),
|
|
|
entry);
|
|
|
|
|
@@ -266,6 +285,9 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
|
|
|
if (direction != DMA_TO_DEVICE)
|
|
|
prot |= HV_PCI_MAP_ATTR_WRITE;
|
|
|
|
|
|
+ if (attrs & DMA_ATTR_WEAK_ORDERING)
|
|
|
+ prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
|
|
|
+
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
iommu_batch_start(dev, prot, entry);
|
|
@@ -344,6 +366,9 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
|
|
if (direction != DMA_TO_DEVICE)
|
|
|
prot |= HV_PCI_MAP_ATTR_WRITE;
|
|
|
|
|
|
+ if (attrs & DMA_ATTR_WEAK_ORDERING)
|
|
|
+ prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
|
|
|
+
|
|
|
outs = s = segstart = &sglist[0];
|
|
|
outcount = 1;
|
|
|
incount = nelems;
|
|
@@ -907,22 +932,27 @@ static int pci_sun4v_probe(struct platform_device *op)
|
|
|
struct device_node *dp;
|
|
|
struct iommu *iommu;
|
|
|
u32 devhandle;
|
|
|
- int i, err;
|
|
|
+ int i, err = -ENODEV;
|
|
|
|
|
|
dp = op->dev.of_node;
|
|
|
|
|
|
if (!hvapi_negotiated++) {
|
|
|
- err = sun4v_hvapi_register(HV_GRP_PCI,
|
|
|
- vpci_major,
|
|
|
- &vpci_minor);
|
|
|
+ for (i = 0; i < ARRAY_SIZE(vpci_versions); i++) {
|
|
|
+ vpci_major = vpci_versions[i].major;
|
|
|
+ vpci_minor = vpci_versions[i].minor;
|
|
|
+
|
|
|
+ err = sun4v_hvapi_register(HV_GRP_PCI, vpci_major,
|
|
|
+ &vpci_minor);
|
|
|
+ if (!err)
|
|
|
+ break;
|
|
|
+ }
|
|
|
|
|
|
if (err) {
|
|
|
- printk(KERN_ERR PFX "Could not register hvapi, "
|
|
|
- "err=%d\n", err);
|
|
|
+ pr_err(PFX "Could not register hvapi, err=%d\n", err);
|
|
|
return err;
|
|
|
}
|
|
|
- printk(KERN_INFO PFX "Registered hvapi major[%lu] minor[%lu]\n",
|
|
|
- vpci_major, vpci_minor);
|
|
|
+ pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n",
|
|
|
+ vpci_major, vpci_minor);
|
|
|
|
|
|
dma_ops = &sun4v_dma_ops;
|
|
|
}
|