|
@@ -76,8 +76,6 @@ LIST_HEAD(hpet_map);
|
|
|
* Domain for untranslated devices - only allocated
|
|
|
* if iommu=pt passed on kernel cmd line.
|
|
|
*/
|
|
|
-static struct protection_domain *pt_domain;
|
|
|
-
|
|
|
static const struct iommu_ops amd_iommu_ops;
|
|
|
|
|
|
static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
|
|
@@ -96,7 +94,7 @@ struct iommu_dev_data {
|
|
|
struct protection_domain *domain; /* Domain the device is bound to */
|
|
|
u16 devid; /* PCI Device ID */
|
|
|
bool iommu_v2; /* Device can make use of IOMMUv2 */
|
|
|
- bool passthrough; /* Default for device is pt_domain */
|
|
|
+ bool passthrough; /* Device is identity mapped */
|
|
|
struct {
|
|
|
bool enabled;
|
|
|
int qdep;
|
|
@@ -116,7 +114,6 @@ struct iommu_cmd {
|
|
|
struct kmem_cache *amd_iommu_irq_cache;
|
|
|
|
|
|
static void update_domain(struct protection_domain *domain);
|
|
|
-static int alloc_passthrough_domain(void);
|
|
|
static int protection_domain_init(struct protection_domain *domain);
|
|
|
|
|
|
/****************************************************************************
|
|
@@ -2167,15 +2164,17 @@ static int attach_device(struct device *dev,
|
|
|
dev_data = get_dev_data(dev);
|
|
|
|
|
|
if (domain->flags & PD_IOMMUV2_MASK) {
|
|
|
- if (!dev_data->iommu_v2 || !dev_data->passthrough)
|
|
|
+ if (!dev_data->passthrough)
|
|
|
return -EINVAL;
|
|
|
|
|
|
- if (pdev_iommuv2_enable(pdev) != 0)
|
|
|
- return -EINVAL;
|
|
|
+ if (dev_data->iommu_v2) {
|
|
|
+ if (pdev_iommuv2_enable(pdev) != 0)
|
|
|
+ return -EINVAL;
|
|
|
|
|
|
- dev_data->ats.enabled = true;
|
|
|
- dev_data->ats.qdep = pci_ats_queue_depth(pdev);
|
|
|
- dev_data->pri_tlp = pci_pri_tlp_required(pdev);
|
|
|
+ dev_data->ats.enabled = true;
|
|
|
+ dev_data->ats.qdep = pci_ats_queue_depth(pdev);
|
|
|
+ dev_data->pri_tlp = pci_pri_tlp_required(pdev);
|
|
|
+ }
|
|
|
} else if (amd_iommu_iotlb_sup &&
|
|
|
pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
|
|
|
dev_data->ats.enabled = true;
|
|
@@ -2221,15 +2220,6 @@ static void __detach_device(struct iommu_dev_data *dev_data)
|
|
|
do_detach(head);
|
|
|
|
|
|
spin_unlock_irqrestore(&domain->lock, flags);
|
|
|
-
|
|
|
- /*
|
|
|
- * If we run in passthrough mode the device must be assigned to the
|
|
|
- * passthrough domain if it is detached from any other domain.
|
|
|
- * Make sure we can deassign from the pt_domain itself.
|
|
|
- */
|
|
|
- if (dev_data->passthrough &&
|
|
|
- (dev_data->domain == NULL && domain != pt_domain))
|
|
|
- __attach_device(dev_data, pt_domain);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -2249,7 +2239,7 @@ static void detach_device(struct device *dev)
|
|
|
__detach_device(dev_data);
|
|
|
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
|
|
|
|
|
|
- if (domain->flags & PD_IOMMUV2_MASK)
|
|
|
+ if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
|
|
|
pdev_iommuv2_disable(to_pci_dev(dev));
|
|
|
else if (dev_data->ats.enabled)
|
|
|
pci_disable_ats(to_pci_dev(dev));
|
|
@@ -2287,17 +2277,15 @@ static int amd_iommu_add_device(struct device *dev)
|
|
|
|
|
|
BUG_ON(!dev_data);
|
|
|
|
|
|
- if (dev_data->iommu_v2)
|
|
|
+ if (iommu_pass_through || dev_data->iommu_v2)
|
|
|
iommu_request_dm_for_dev(dev);
|
|
|
|
|
|
/* Domains are initialized for this device - have a look what we ended up with */
|
|
|
domain = iommu_get_domain_for_dev(dev);
|
|
|
- if (domain->type == IOMMU_DOMAIN_IDENTITY) {
|
|
|
+ if (domain->type == IOMMU_DOMAIN_IDENTITY)
|
|
|
dev_data->passthrough = true;
|
|
|
- dev->archdata.dma_ops = &nommu_dma_ops;
|
|
|
- } else {
|
|
|
+ else
|
|
|
dev->archdata.dma_ops = &amd_iommu_dma_ops;
|
|
|
- }
|
|
|
|
|
|
out:
|
|
|
iommu_completion_wait(iommu);
|
|
@@ -2862,8 +2850,17 @@ int __init amd_iommu_init_api(void)
|
|
|
|
|
|
int __init amd_iommu_init_dma_ops(void)
|
|
|
{
|
|
|
+ swiotlb = iommu_pass_through ? 1 : 0;
|
|
|
iommu_detected = 1;
|
|
|
- swiotlb = 0;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * In case we don't initialize SWIOTLB (actually the common case
|
|
|
+ * when AMD IOMMU is enabled), make sure there are global
|
|
|
+ * dma_ops set as a fall-back for devices not handled by this
|
|
|
+ * driver (for example non-PCI devices).
|
|
|
+ */
|
|
|
+ if (!swiotlb)
|
|
|
+ dma_ops = &nommu_dma_ops;
|
|
|
|
|
|
amd_iommu_stats_init();
|
|
|
|
|
@@ -2947,21 +2944,6 @@ out_err:
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
-static int alloc_passthrough_domain(void)
|
|
|
-{
|
|
|
- if (pt_domain != NULL)
|
|
|
- return 0;
|
|
|
-
|
|
|
- /* allocate passthrough domain */
|
|
|
- pt_domain = protection_domain_alloc();
|
|
|
- if (!pt_domain)
|
|
|
- return -ENOMEM;
|
|
|
-
|
|
|
- pt_domain->mode = PAGE_MODE_NONE;
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
|
|
|
{
|
|
|
struct protection_domain *pdomain;
|
|
@@ -3222,33 +3204,6 @@ static const struct iommu_ops amd_iommu_ops = {
|
|
|
*
|
|
|
*****************************************************************************/
|
|
|
|
|
|
-int __init amd_iommu_init_passthrough(void)
|
|
|
-{
|
|
|
- struct iommu_dev_data *dev_data;
|
|
|
- struct pci_dev *dev = NULL;
|
|
|
- int ret;
|
|
|
-
|
|
|
- ret = alloc_passthrough_domain();
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
-
|
|
|
- for_each_pci_dev(dev) {
|
|
|
- if (!check_device(&dev->dev))
|
|
|
- continue;
|
|
|
-
|
|
|
- dev_data = get_dev_data(&dev->dev);
|
|
|
- dev_data->passthrough = true;
|
|
|
-
|
|
|
- attach_device(&dev->dev, pt_domain);
|
|
|
- }
|
|
|
-
|
|
|
- amd_iommu_stats_init();
|
|
|
-
|
|
|
- pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
/* IOMMUv2 specific functions */
|
|
|
int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
|
|
|
{
|
|
@@ -3363,7 +3318,12 @@ static int __flush_pasid(struct protection_domain *domain, int pasid,
|
|
|
struct amd_iommu *iommu;
|
|
|
int qdep;
|
|
|
|
|
|
- BUG_ON(!dev_data->ats.enabled);
|
|
|
+ /*
|
|
|
+ There might be non-IOMMUv2 capable devices in an IOMMUv2
|
|
|
+ * domain.
|
|
|
+ */
|
|
|
+ if (!dev_data->ats.enabled)
|
|
|
+ continue;
|
|
|
|
|
|
qdep = dev_data->ats.qdep;
|
|
|
iommu = amd_iommu_rlookup_table[dev_data->devid];
|