|
@@ -63,6 +63,12 @@
|
|
|
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
|
|
|
#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
|
|
|
|
|
|
+/* Reserved IOVA ranges */
|
|
|
+#define MSI_RANGE_START (0xfee00000)
|
|
|
+#define MSI_RANGE_END (0xfeefffff)
|
|
|
+#define HT_RANGE_START (0xfd00000000ULL)
|
|
|
+#define HT_RANGE_END (0xffffffffffULL)
|
|
|
+
|
|
|
/*
|
|
|
* This bitmap is used to advertise the page sizes our hardware support
|
|
|
* to the IOMMU core, which will then use this information to split
|
|
@@ -169,6 +175,9 @@ struct dma_ops_domain {
|
|
|
struct iova_domain iovad;
|
|
|
};
|
|
|
|
|
|
+static struct iova_domain reserved_iova_ranges;
|
|
|
+static struct lock_class_key reserved_rbtree_key;
|
|
|
+
|
|
|
/****************************************************************************
|
|
|
*
|
|
|
* Helper functions
|
|
@@ -2058,6 +2067,9 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void)
|
|
|
init_iova_domain(&dma_dom->iovad, PAGE_SIZE,
|
|
|
IOVA_START_PFN, DMA_32BIT_PFN);
|
|
|
|
|
|
+ /* Initialize reserved ranges */
|
|
|
+ copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad);
|
|
|
+
|
|
|
return dma_dom;
|
|
|
|
|
|
free_dma_dom:
|
|
@@ -2963,6 +2975,59 @@ static struct dma_map_ops amd_iommu_dma_ops = {
|
|
|
.set_dma_mask = set_dma_mask,
|
|
|
};
|
|
|
|
|
|
+static int init_reserved_iova_ranges(void)
|
|
|
+{
|
|
|
+ struct pci_dev *pdev = NULL;
|
|
|
+ struct iova *val;
|
|
|
+
|
|
|
+ init_iova_domain(&reserved_iova_ranges, PAGE_SIZE,
|
|
|
+ IOVA_START_PFN, DMA_32BIT_PFN);
|
|
|
+
|
|
|
+ lockdep_set_class(&reserved_iova_ranges.iova_rbtree_lock,
|
|
|
+ &reserved_rbtree_key);
|
|
|
+
|
|
|
+ /* MSI memory range */
|
|
|
+ val = reserve_iova(&reserved_iova_ranges,
|
|
|
+ IOVA_PFN(MSI_RANGE_START), IOVA_PFN(MSI_RANGE_END));
|
|
|
+ if (!val) {
|
|
|
+ pr_err("Reserving MSI range failed\n");
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* HT memory range */
|
|
|
+ val = reserve_iova(&reserved_iova_ranges,
|
|
|
+ IOVA_PFN(HT_RANGE_START), IOVA_PFN(HT_RANGE_END));
|
|
|
+ if (!val) {
|
|
|
+ pr_err("Reserving HT range failed\n");
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Memory used for PCI resources
|
|
|
+ * FIXME: Check whether we can reserve the PCI-hole completly
|
|
|
+ */
|
|
|
+ for_each_pci_dev(pdev) {
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < PCI_NUM_RESOURCES; ++i) {
|
|
|
+ struct resource *r = &pdev->resource[i];
|
|
|
+
|
|
|
+ if (!(r->flags & IORESOURCE_MEM))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ val = reserve_iova(&reserved_iova_ranges,
|
|
|
+ IOVA_PFN(r->start),
|
|
|
+ IOVA_PFN(r->end));
|
|
|
+ if (!val) {
|
|
|
+ pr_err("Reserve pci-resource range failed\n");
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
int __init amd_iommu_init_api(void)
|
|
|
{
|
|
|
int ret, err = 0;
|
|
@@ -2971,6 +3036,10 @@ int __init amd_iommu_init_api(void)
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
|
+ ret = init_reserved_iova_ranges();
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
|
|
|
if (err)
|
|
|
return err;
|