|
@@ -485,37 +485,14 @@ static int dmar_forcedac;
|
|
|
static int intel_iommu_strict;
|
|
|
static int intel_iommu_superpage = 1;
|
|
|
static int intel_iommu_ecs = 1;
|
|
|
-static int intel_iommu_pasid28;
|
|
|
static int iommu_identity_mapping;
|
|
|
|
|
|
#define IDENTMAP_ALL 1
|
|
|
#define IDENTMAP_GFX 2
|
|
|
#define IDENTMAP_AZALIA 4
|
|
|
|
|
|
-/* Broadwell and Skylake have broken ECS support — normal so-called "second
|
|
|
- * level" translation of DMA requests-without-PASID doesn't actually happen
|
|
|
- * unless you also set the NESTE bit in an extended context-entry. Which of
|
|
|
- * course means that SVM doesn't work because it's trying to do nested
|
|
|
- * translation of the physical addresses it finds in the process page tables,
|
|
|
- * through the IOVA->phys mapping found in the "second level" page tables.
|
|
|
- *
|
|
|
- * The VT-d specification was retroactively changed to change the definition
|
|
|
- * of the capability bits and pretend that Broadwell/Skylake never happened...
|
|
|
- * but unfortunately the wrong bit was changed. It's ECS which is broken, but
|
|
|
- * for some reason it was the PASID capability bit which was redefined (from
|
|
|
- * bit 28 on BDW/SKL to bit 40 in future).
|
|
|
- *
|
|
|
- * So our test for ECS needs to eschew those implementations which set the old
|
|
|
- * PASID capabiity bit 28, since those are the ones on which ECS is broken.
|
|
|
- * Unless we are working around the 'pasid28' limitations, that is, by putting
|
|
|
- * the device into passthrough mode for normal DMA and thus masking the bug.
|
|
|
- */
|
|
|
-#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
|
|
|
- (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
|
|
|
-/* PASID support is thus enabled if ECS is enabled and *either* of the old
|
|
|
- * or new capability bits are set. */
|
|
|
-#define pasid_enabled(iommu) (ecs_enabled(iommu) && \
|
|
|
- (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
|
|
|
+#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap))
|
|
|
+#define pasid_enabled(iommu) (ecs_enabled(iommu) && ecap_pasid(iommu->ecap))
|
|
|
|
|
|
int intel_iommu_gfx_mapped;
|
|
|
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
|
|
@@ -578,11 +555,6 @@ static int __init intel_iommu_setup(char *str)
|
|
|
printk(KERN_INFO
|
|
|
"Intel-IOMMU: disable extended context table support\n");
|
|
|
intel_iommu_ecs = 0;
|
|
|
- } else if (!strncmp(str, "pasid28", 7)) {
|
|
|
- printk(KERN_INFO
|
|
|
- "Intel-IOMMU: enable pre-production PASID support\n");
|
|
|
- intel_iommu_pasid28 = 1;
|
|
|
- iommu_identity_mapping |= IDENTMAP_GFX;
|
|
|
} else if (!strncmp(str, "tboot_noforce", 13)) {
|
|
|
printk(KERN_INFO
|
|
|
"Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
|
|
@@ -1606,6 +1578,18 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
|
|
|
iommu_flush_dev_iotlb(domain, addr, mask);
|
|
|
}
|
|
|
|
|
|
+/* Notification for newly created mappings */
|
|
|
+static inline void __mapping_notify_one(struct intel_iommu *iommu,
|
|
|
+ struct dmar_domain *domain,
|
|
|
+ unsigned long pfn, unsigned int pages)
|
|
|
+{
|
|
|
+ /* It's a non-present to present mapping. Only flush if caching mode */
|
|
|
+ if (cap_caching_mode(iommu->cap))
|
|
|
+ iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
|
|
|
+ else
|
|
|
+ iommu_flush_write_buffer(iommu);
|
|
|
+}
|
|
|
+
|
|
|
static void iommu_flush_iova(struct iova_domain *iovad)
|
|
|
{
|
|
|
struct dmar_domain *domain;
|
|
@@ -2340,18 +2324,47 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
|
|
+ struct scatterlist *sg, unsigned long phys_pfn,
|
|
|
+ unsigned long nr_pages, int prot)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+ struct intel_iommu *iommu;
|
|
|
+
|
|
|
+ /* Do the real mapping first */
|
|
|
+ ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ /* Notify about the new mapping */
|
|
|
+ if (domain_type_is_vm(domain)) {
|
|
|
+ /* VM typed domains can have more than one IOMMUs */
|
|
|
+ int iommu_id;
|
|
|
+ for_each_domain_iommu(iommu_id, domain) {
|
|
|
+ iommu = g_iommus[iommu_id];
|
|
|
+ __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ /* General domains only have one IOMMU */
|
|
|
+ iommu = domain_get_iommu(domain);
|
|
|
+ __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
|
|
struct scatterlist *sg, unsigned long nr_pages,
|
|
|
int prot)
|
|
|
{
|
|
|
- return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
|
|
|
+ return domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
|
|
|
}
|
|
|
|
|
|
static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
|
|
unsigned long phys_pfn, unsigned long nr_pages,
|
|
|
int prot)
|
|
|
{
|
|
|
- return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
|
|
|
+ return domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
|
|
|
}
|
|
|
|
|
|
static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
|
|
@@ -2534,7 +2547,7 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
|
|
|
struct device_domain_info *info = NULL;
|
|
|
struct dmar_domain *domain = NULL;
|
|
|
struct intel_iommu *iommu;
|
|
|
- u16 req_id, dma_alias;
|
|
|
+ u16 dma_alias;
|
|
|
unsigned long flags;
|
|
|
u8 bus, devfn;
|
|
|
|
|
@@ -2542,8 +2555,6 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
|
|
|
if (!iommu)
|
|
|
return NULL;
|
|
|
|
|
|
- req_id = ((u16)bus << 8) | devfn;
|
|
|
-
|
|
|
if (dev_is_pci(dev)) {
|
|
|
struct pci_dev *pdev = to_pci_dev(dev);
|
|
|
|
|
@@ -2657,9 +2668,9 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
|
|
|
*/
|
|
|
dma_pte_clear_range(domain, first_vpfn, last_vpfn);
|
|
|
|
|
|
- return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
|
|
|
- last_vpfn - first_vpfn + 1,
|
|
|
- DMA_PTE_READ|DMA_PTE_WRITE);
|
|
|
+ return __domain_mapping(domain, first_vpfn, NULL,
|
|
|
+ first_vpfn, last_vpfn - first_vpfn + 1,
|
|
|
+ DMA_PTE_READ|DMA_PTE_WRITE);
|
|
|
}
|
|
|
|
|
|
static int domain_prepare_identity_map(struct device *dev,
|
|
@@ -3626,14 +3637,6 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
|
|
|
if (ret)
|
|
|
goto error;
|
|
|
|
|
|
- /* it's a non-present to present mapping. Only flush if caching mode */
|
|
|
- if (cap_caching_mode(iommu->cap))
|
|
|
- iommu_flush_iotlb_psi(iommu, domain,
|
|
|
- mm_to_dma_pfn(iova_pfn),
|
|
|
- size, 0, 1);
|
|
|
- else
|
|
|
- iommu_flush_write_buffer(iommu);
|
|
|
-
|
|
|
start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
|
|
|
start_paddr += paddr & ~PAGE_MASK;
|
|
|
return start_paddr;
|
|
@@ -3820,12 +3823,6 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
- /* it's a non-present to present mapping. Only flush if caching mode */
|
|
|
- if (cap_caching_mode(iommu->cap))
|
|
|
- iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
|
|
|
- else
|
|
|
- iommu_flush_write_buffer(iommu);
|
|
|
-
|
|
|
return nelems;
|
|
|
}
|
|
|
|