|
@@ -28,6 +28,7 @@
|
|
|
|
|
|
#define pr_fmt(fmt) "arm-smmu: " fmt
|
|
|
|
|
|
+#include <linux/atomic.h>
|
|
|
#include <linux/delay.h>
|
|
|
#include <linux/dma-iommu.h>
|
|
|
#include <linux/dma-mapping.h>
|
|
@@ -40,6 +41,8 @@
|
|
|
#include <linux/module.h>
|
|
|
#include <linux/of.h>
|
|
|
#include <linux/of_address.h>
|
|
|
+#include <linux/of_device.h>
|
|
|
+#include <linux/of_iommu.h>
|
|
|
#include <linux/pci.h>
|
|
|
#include <linux/platform_device.h>
|
|
|
#include <linux/slab.h>
|
|
@@ -49,15 +52,9 @@
|
|
|
|
|
|
#include "io-pgtable.h"
|
|
|
|
|
|
-/* Maximum number of stream IDs assigned to a single device */
|
|
|
-#define MAX_MASTER_STREAMIDS 128
|
|
|
-
|
|
|
/* Maximum number of context banks per SMMU */
|
|
|
#define ARM_SMMU_MAX_CBS 128
|
|
|
|
|
|
-/* Maximum number of mapping groups per SMMU */
|
|
|
-#define ARM_SMMU_MAX_SMRS 128
|
|
|
-
|
|
|
/* SMMU global address space */
|
|
|
#define ARM_SMMU_GR0(smmu) ((smmu)->base)
|
|
|
#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
|
|
@@ -165,21 +162,27 @@
|
|
|
#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
|
|
|
#define SMR_VALID (1 << 31)
|
|
|
#define SMR_MASK_SHIFT 16
|
|
|
-#define SMR_MASK_MASK 0x7fff
|
|
|
#define SMR_ID_SHIFT 0
|
|
|
-#define SMR_ID_MASK 0x7fff
|
|
|
|
|
|
#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
|
|
|
#define S2CR_CBNDX_SHIFT 0
|
|
|
#define S2CR_CBNDX_MASK 0xff
|
|
|
#define S2CR_TYPE_SHIFT 16
|
|
|
#define S2CR_TYPE_MASK 0x3
|
|
|
-#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
|
|
|
-#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
|
|
|
-#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
|
|
|
+enum arm_smmu_s2cr_type {
|
|
|
+ S2CR_TYPE_TRANS,
|
|
|
+ S2CR_TYPE_BYPASS,
|
|
|
+ S2CR_TYPE_FAULT,
|
|
|
+};
|
|
|
|
|
|
#define S2CR_PRIVCFG_SHIFT 24
|
|
|
-#define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
|
|
|
+#define S2CR_PRIVCFG_MASK 0x3
|
|
|
+enum arm_smmu_s2cr_privcfg {
|
|
|
+ S2CR_PRIVCFG_DEFAULT,
|
|
|
+ S2CR_PRIVCFG_DIPAN,
|
|
|
+ S2CR_PRIVCFG_UNPRIV,
|
|
|
+ S2CR_PRIVCFG_PRIV,
|
|
|
+};
|
|
|
|
|
|
/* Context bank attribute registers */
|
|
|
#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
|
|
@@ -217,6 +220,7 @@
|
|
|
#define ARM_SMMU_CB_TTBR0 0x20
|
|
|
#define ARM_SMMU_CB_TTBR1 0x28
|
|
|
#define ARM_SMMU_CB_TTBCR 0x30
|
|
|
+#define ARM_SMMU_CB_CONTEXTIDR 0x34
|
|
|
#define ARM_SMMU_CB_S1_MAIR0 0x38
|
|
|
#define ARM_SMMU_CB_S1_MAIR1 0x3c
|
|
|
#define ARM_SMMU_CB_PAR 0x50
|
|
@@ -239,7 +243,6 @@
|
|
|
#define SCTLR_AFE (1 << 2)
|
|
|
#define SCTLR_TRE (1 << 1)
|
|
|
#define SCTLR_M (1 << 0)
|
|
|
-#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
|
|
|
|
|
|
#define ARM_MMU500_ACTLR_CPRE (1 << 1)
|
|
|
|
|
@@ -296,23 +299,33 @@ enum arm_smmu_implementation {
|
|
|
CAVIUM_SMMUV2,
|
|
|
};
|
|
|
|
|
|
+struct arm_smmu_s2cr {
|
|
|
+ struct iommu_group *group;
|
|
|
+ int count;
|
|
|
+ enum arm_smmu_s2cr_type type;
|
|
|
+ enum arm_smmu_s2cr_privcfg privcfg;
|
|
|
+ u8 cbndx;
|
|
|
+};
|
|
|
+
|
|
|
+#define s2cr_init_val (struct arm_smmu_s2cr){ \
|
|
|
+ .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
|
|
|
+}
|
|
|
+
|
|
|
struct arm_smmu_smr {
|
|
|
- u8 idx;
|
|
|
u16 mask;
|
|
|
u16 id;
|
|
|
+ bool valid;
|
|
|
};
|
|
|
|
|
|
struct arm_smmu_master_cfg {
|
|
|
- int num_streamids;
|
|
|
- u16 streamids[MAX_MASTER_STREAMIDS];
|
|
|
- struct arm_smmu_smr *smrs;
|
|
|
-};
|
|
|
-
|
|
|
-struct arm_smmu_master {
|
|
|
- struct device_node *of_node;
|
|
|
- struct rb_node node;
|
|
|
- struct arm_smmu_master_cfg cfg;
|
|
|
+ struct arm_smmu_device *smmu;
|
|
|
+ s16 smendx[];
|
|
|
};
|
|
|
+#define INVALID_SMENDX -1
|
|
|
+#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
|
|
|
+#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
|
|
|
+#define for_each_cfg_sme(fw, i, idx) \
|
|
|
+ for (i = 0; idx = __fwspec_cfg(fw)->smendx[i], i < fw->num_ids; ++i)
|
|
|
|
|
|
struct arm_smmu_device {
|
|
|
struct device *dev;
|
|
@@ -346,7 +359,11 @@ struct arm_smmu_device {
|
|
|
atomic_t irptndx;
|
|
|
|
|
|
u32 num_mapping_groups;
|
|
|
- DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
|
|
|
+ u16 streamid_mask;
|
|
|
+ u16 smr_mask_mask;
|
|
|
+ struct arm_smmu_smr *smrs;
|
|
|
+ struct arm_smmu_s2cr *s2crs;
|
|
|
+ struct mutex stream_map_mutex;
|
|
|
|
|
|
unsigned long va_size;
|
|
|
unsigned long ipa_size;
|
|
@@ -357,9 +374,6 @@ struct arm_smmu_device {
|
|
|
u32 num_context_irqs;
|
|
|
unsigned int *irqs;
|
|
|
|
|
|
- struct list_head list;
|
|
|
- struct rb_root masters;
|
|
|
-
|
|
|
u32 cavium_id_base; /* Specific to Cavium */
|
|
|
};
|
|
|
|
|
@@ -397,15 +411,6 @@ struct arm_smmu_domain {
|
|
|
struct iommu_domain domain;
|
|
|
};
|
|
|
|
|
|
-struct arm_smmu_phandle_args {
|
|
|
- struct device_node *np;
|
|
|
- int args_count;
|
|
|
- uint32_t args[MAX_MASTER_STREAMIDS];
|
|
|
-};
|
|
|
-
|
|
|
-static DEFINE_SPINLOCK(arm_smmu_devices_lock);
|
|
|
-static LIST_HEAD(arm_smmu_devices);
|
|
|
-
|
|
|
struct arm_smmu_option_prop {
|
|
|
u32 opt;
|
|
|
const char *prop;
|
|
@@ -413,6 +418,8 @@ struct arm_smmu_option_prop {
|
|
|
|
|
|
static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
|
|
|
|
|
|
+static bool using_legacy_binding, using_generic_binding;
|
|
|
+
|
|
|
static struct arm_smmu_option_prop arm_smmu_options[] = {
|
|
|
{ ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
|
|
|
{ 0, NULL},
|
|
@@ -444,131 +451,86 @@ static struct device_node *dev_get_dev_node(struct device *dev)
|
|
|
|
|
|
while (!pci_is_root_bus(bus))
|
|
|
bus = bus->parent;
|
|
|
- return bus->bridge->parent->of_node;
|
|
|
+ return of_node_get(bus->bridge->parent->of_node);
|
|
|
}
|
|
|
|
|
|
- return dev->of_node;
|
|
|
+ return of_node_get(dev->of_node);
|
|
|
}
|
|
|
|
|
|
-static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
|
|
|
- struct device_node *dev_node)
|
|
|
+static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
|
|
|
{
|
|
|
- struct rb_node *node = smmu->masters.rb_node;
|
|
|
-
|
|
|
- while (node) {
|
|
|
- struct arm_smmu_master *master;
|
|
|
-
|
|
|
- master = container_of(node, struct arm_smmu_master, node);
|
|
|
-
|
|
|
- if (dev_node < master->of_node)
|
|
|
- node = node->rb_left;
|
|
|
- else if (dev_node > master->of_node)
|
|
|
- node = node->rb_right;
|
|
|
- else
|
|
|
- return master;
|
|
|
- }
|
|
|
-
|
|
|
- return NULL;
|
|
|
+ *((__be32 *)data) = cpu_to_be32(alias);
|
|
|
+ return 0; /* Continue walking */
|
|
|
}
|
|
|
|
|
|
-static struct arm_smmu_master_cfg *
|
|
|
-find_smmu_master_cfg(struct device *dev)
|
|
|
+static int __find_legacy_master_phandle(struct device *dev, void *data)
|
|
|
{
|
|
|
- struct arm_smmu_master_cfg *cfg = NULL;
|
|
|
- struct iommu_group *group = iommu_group_get(dev);
|
|
|
-
|
|
|
- if (group) {
|
|
|
- cfg = iommu_group_get_iommudata(group);
|
|
|
- iommu_group_put(group);
|
|
|
- }
|
|
|
-
|
|
|
- return cfg;
|
|
|
+ struct of_phandle_iterator *it = *(void **)data;
|
|
|
+ struct device_node *np = it->node;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
|
|
|
+ "#stream-id-cells", 0)
|
|
|
+ if (it->node == np) {
|
|
|
+ *(void **)data = dev;
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+ it->node = np;
|
|
|
+ return err == -ENOENT ? 0 : err;
|
|
|
}
|
|
|
|
|
|
-static int insert_smmu_master(struct arm_smmu_device *smmu,
|
|
|
- struct arm_smmu_master *master)
|
|
|
+static struct platform_driver arm_smmu_driver;
|
|
|
+static struct iommu_ops arm_smmu_ops;
|
|
|
+
|
|
|
+static int arm_smmu_register_legacy_master(struct device *dev,
|
|
|
+ struct arm_smmu_device **smmu)
|
|
|
{
|
|
|
- struct rb_node **new, *parent;
|
|
|
-
|
|
|
- new = &smmu->masters.rb_node;
|
|
|
- parent = NULL;
|
|
|
- while (*new) {
|
|
|
- struct arm_smmu_master *this
|
|
|
- = container_of(*new, struct arm_smmu_master, node);
|
|
|
-
|
|
|
- parent = *new;
|
|
|
- if (master->of_node < this->of_node)
|
|
|
- new = &((*new)->rb_left);
|
|
|
- else if (master->of_node > this->of_node)
|
|
|
- new = &((*new)->rb_right);
|
|
|
- else
|
|
|
- return -EEXIST;
|
|
|
+ struct device *smmu_dev;
|
|
|
+ struct device_node *np;
|
|
|
+ struct of_phandle_iterator it;
|
|
|
+ void *data = ⁢
|
|
|
+ u32 *sids;
|
|
|
+ __be32 pci_sid;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ np = dev_get_dev_node(dev);
|
|
|
+ if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
|
|
|
+ of_node_put(np);
|
|
|
+ return -ENODEV;
|
|
|
}
|
|
|
|
|
|
- rb_link_node(&master->node, parent, new);
|
|
|
- rb_insert_color(&master->node, &smmu->masters);
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int register_smmu_master(struct arm_smmu_device *smmu,
|
|
|
- struct device *dev,
|
|
|
- struct arm_smmu_phandle_args *masterspec)
|
|
|
-{
|
|
|
- int i;
|
|
|
- struct arm_smmu_master *master;
|
|
|
+ it.node = np;
|
|
|
+ err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
|
|
|
+ __find_legacy_master_phandle);
|
|
|
+ smmu_dev = data;
|
|
|
+ of_node_put(np);
|
|
|
+ if (err == 0)
|
|
|
+ return -ENODEV;
|
|
|
+ if (err < 0)
|
|
|
+ return err;
|
|
|
|
|
|
- master = find_smmu_master(smmu, masterspec->np);
|
|
|
- if (master) {
|
|
|
- dev_err(dev,
|
|
|
- "rejecting multiple registrations for master device %s\n",
|
|
|
- masterspec->np->name);
|
|
|
- return -EBUSY;
|
|
|
+ if (dev_is_pci(dev)) {
|
|
|
+ /* "mmu-masters" assumes Stream ID == Requester ID */
|
|
|
+ pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
|
|
|
+ &pci_sid);
|
|
|
+ it.cur = &pci_sid;
|
|
|
+ it.cur_count = 1;
|
|
|
}
|
|
|
|
|
|
- if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
|
|
|
- dev_err(dev,
|
|
|
- "reached maximum number (%d) of stream IDs for master device %s\n",
|
|
|
- MAX_MASTER_STREAMIDS, masterspec->np->name);
|
|
|
- return -ENOSPC;
|
|
|
- }
|
|
|
+ err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
|
|
|
+ &arm_smmu_ops);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
|
|
|
- master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
|
|
|
- if (!master)
|
|
|
+ sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
|
|
|
+ if (!sids)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- master->of_node = masterspec->np;
|
|
|
- master->cfg.num_streamids = masterspec->args_count;
|
|
|
-
|
|
|
- for (i = 0; i < master->cfg.num_streamids; ++i) {
|
|
|
- u16 streamid = masterspec->args[i];
|
|
|
-
|
|
|
- if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
|
|
|
- (streamid >= smmu->num_mapping_groups)) {
|
|
|
- dev_err(dev,
|
|
|
- "stream ID for master device %s greater than maximum allowed (%d)\n",
|
|
|
- masterspec->np->name, smmu->num_mapping_groups);
|
|
|
- return -ERANGE;
|
|
|
- }
|
|
|
- master->cfg.streamids[i] = streamid;
|
|
|
- }
|
|
|
- return insert_smmu_master(smmu, master);
|
|
|
-}
|
|
|
-
|
|
|
-static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
|
|
|
-{
|
|
|
- struct arm_smmu_device *smmu;
|
|
|
- struct arm_smmu_master *master = NULL;
|
|
|
- struct device_node *dev_node = dev_get_dev_node(dev);
|
|
|
-
|
|
|
- spin_lock(&arm_smmu_devices_lock);
|
|
|
- list_for_each_entry(smmu, &arm_smmu_devices, list) {
|
|
|
- master = find_smmu_master(smmu, dev_node);
|
|
|
- if (master)
|
|
|
- break;
|
|
|
- }
|
|
|
- spin_unlock(&arm_smmu_devices_lock);
|
|
|
-
|
|
|
- return master ? smmu : NULL;
|
|
|
+ *smmu = dev_get_drvdata(smmu_dev);
|
|
|
+ of_phandle_iterator_args(&it, sids, it.cur_count);
|
|
|
+ err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
|
|
|
+ kfree(sids);
|
|
|
+ return err;
|
|
|
}
|
|
|
|
|
|
static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
|
|
@@ -738,7 +700,7 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
|
|
|
static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
|
|
|
struct io_pgtable_cfg *pgtbl_cfg)
|
|
|
{
|
|
|
- u32 reg;
|
|
|
+ u32 reg, reg2;
|
|
|
u64 reg64;
|
|
|
bool stage1;
|
|
|
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
|
|
@@ -781,14 +743,22 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
|
|
|
|
|
|
/* TTBRs */
|
|
|
if (stage1) {
|
|
|
- reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
|
|
|
-
|
|
|
- reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
|
|
|
- writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
|
|
|
-
|
|
|
- reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
|
|
|
- reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
|
|
|
- writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
|
|
|
+ u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
|
|
|
+
|
|
|
+ if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
|
|
|
+ reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
|
|
|
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
|
|
|
+ reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
|
|
|
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
|
|
|
+ writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
|
|
|
+ } else {
|
|
|
+ reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
|
|
|
+ reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
|
|
|
+ writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
|
|
|
+ reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
|
|
|
+ reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
|
|
|
+ writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
|
|
|
+ }
|
|
|
} else {
|
|
|
reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
|
|
|
writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
|
|
@@ -796,28 +766,36 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
|
|
|
|
|
|
/* TTBCR */
|
|
|
if (stage1) {
|
|
|
- reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
|
|
|
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
|
|
|
- if (smmu->version > ARM_SMMU_V1) {
|
|
|
- reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
|
|
|
- reg |= TTBCR2_SEP_UPSTREAM;
|
|
|
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
|
|
|
+ if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
|
|
|
+ reg = pgtbl_cfg->arm_v7s_cfg.tcr;
|
|
|
+ reg2 = 0;
|
|
|
+ } else {
|
|
|
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
|
|
|
+ reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
|
|
|
+ reg2 |= TTBCR2_SEP_UPSTREAM;
|
|
|
}
|
|
|
+ if (smmu->version > ARM_SMMU_V1)
|
|
|
+ writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
|
|
|
} else {
|
|
|
reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
|
|
|
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
|
|
|
}
|
|
|
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
|
|
|
|
|
|
/* MAIRs (stage-1 only) */
|
|
|
if (stage1) {
|
|
|
- reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
|
|
|
+ if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
|
|
|
+ reg = pgtbl_cfg->arm_v7s_cfg.prrr;
|
|
|
+ reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
|
|
|
+ } else {
|
|
|
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
|
|
|
+ reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
|
|
|
+ }
|
|
|
writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
|
|
|
- reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
|
|
|
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
|
|
|
+ writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
|
|
|
}
|
|
|
|
|
|
/* SCTLR */
|
|
|
- reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
|
|
|
+ reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
|
|
|
if (stage1)
|
|
|
reg |= SCTLR_S1_ASIDPNE;
|
|
|
#ifdef __BIG_ENDIAN
|
|
@@ -841,12 +819,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
|
|
if (smmu_domain->smmu)
|
|
|
goto out_unlock;
|
|
|
|
|
|
- /* We're bypassing these SIDs, so don't allocate an actual context */
|
|
|
- if (domain->type == IOMMU_DOMAIN_DMA) {
|
|
|
- smmu_domain->smmu = smmu;
|
|
|
- goto out_unlock;
|
|
|
- }
|
|
|
-
|
|
|
/*
|
|
|
* Mapping the requested stage onto what we support is surprisingly
|
|
|
* complicated, mainly because the spec allows S1+S2 SMMUs without
|
|
@@ -880,6 +852,11 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
|
|
*/
|
|
|
if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
|
|
|
cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
|
|
|
+ if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
|
|
|
+ !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
|
|
|
+ (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
|
|
|
+ (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
|
|
|
+ cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
|
|
|
if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
|
|
|
(smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
|
|
|
ARM_SMMU_FEAT_FMT_AARCH64_16K |
|
|
@@ -899,10 +876,14 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
|
|
oas = smmu->ipa_size;
|
|
|
if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
|
|
|
fmt = ARM_64_LPAE_S1;
|
|
|
- } else {
|
|
|
+ } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
|
|
|
fmt = ARM_32_LPAE_S1;
|
|
|
ias = min(ias, 32UL);
|
|
|
oas = min(oas, 40UL);
|
|
|
+ } else {
|
|
|
+ fmt = ARM_V7S;
|
|
|
+ ias = min(ias, 32UL);
|
|
|
+ oas = min(oas, 32UL);
|
|
|
}
|
|
|
break;
|
|
|
case ARM_SMMU_DOMAIN_NESTED:
|
|
@@ -958,6 +939,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
|
|
|
|
|
/* Update the domain's page sizes to reflect the page table format */
|
|
|
domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
|
|
|
+ domain->geometry.aperture_end = (1UL << ias) - 1;
|
|
|
+ domain->geometry.force_aperture = true;
|
|
|
|
|
|
/* Initialise the context bank with our page table cfg */
|
|
|
arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
|
|
@@ -996,7 +979,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
|
|
|
void __iomem *cb_base;
|
|
|
int irq;
|
|
|
|
|
|
- if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
|
|
|
+ if (!smmu)
|
|
|
return;
|
|
|
|
|
|
/*
|
|
@@ -1030,8 +1013,8 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
|
|
|
if (!smmu_domain)
|
|
|
return NULL;
|
|
|
|
|
|
- if (type == IOMMU_DOMAIN_DMA &&
|
|
|
- iommu_get_dma_cookie(&smmu_domain->domain)) {
|
|
|
+ if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
|
|
|
+ iommu_get_dma_cookie(&smmu_domain->domain))) {
|
|
|
kfree(smmu_domain);
|
|
|
return NULL;
|
|
|
}
|
|
@@ -1055,162 +1038,197 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
|
|
|
kfree(smmu_domain);
|
|
|
}
|
|
|
|
|
|
-static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
|
|
|
- struct arm_smmu_master_cfg *cfg)
|
|
|
+static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
|
|
|
{
|
|
|
- int i;
|
|
|
- struct arm_smmu_smr *smrs;
|
|
|
- void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
|
|
|
+ struct arm_smmu_smr *smr = smmu->smrs + idx;
|
|
|
+ u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
|
|
|
|
|
|
- if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
|
|
|
- return 0;
|
|
|
+ if (smr->valid)
|
|
|
+ reg |= SMR_VALID;
|
|
|
+ writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
|
|
|
+}
|
|
|
|
|
|
- if (cfg->smrs)
|
|
|
- return -EEXIST;
|
|
|
+static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
|
|
|
+{
|
|
|
+ struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
|
|
|
+ u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
|
|
|
+ (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
|
|
|
+ (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
|
|
|
|
|
|
- smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
|
|
|
- if (!smrs) {
|
|
|
- dev_err(smmu->dev, "failed to allocate %d SMRs\n",
|
|
|
- cfg->num_streamids);
|
|
|
- return -ENOMEM;
|
|
|
- }
|
|
|
+ writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
|
|
|
+}
|
|
|
|
|
|
- /* Allocate the SMRs on the SMMU */
|
|
|
- for (i = 0; i < cfg->num_streamids; ++i) {
|
|
|
- int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
|
|
|
- smmu->num_mapping_groups);
|
|
|
- if (idx < 0) {
|
|
|
- dev_err(smmu->dev, "failed to allocate free SMR\n");
|
|
|
- goto err_free_smrs;
|
|
|
- }
|
|
|
+static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
|
|
|
+{
|
|
|
+ arm_smmu_write_s2cr(smmu, idx);
|
|
|
+ if (smmu->smrs)
|
|
|
+ arm_smmu_write_smr(smmu, idx);
|
|
|
+}
|
|
|
|
|
|
- smrs[i] = (struct arm_smmu_smr) {
|
|
|
- .idx = idx,
|
|
|
- .mask = 0, /* We don't currently share SMRs */
|
|
|
- .id = cfg->streamids[i],
|
|
|
- };
|
|
|
- }
|
|
|
+static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
|
|
|
+{
|
|
|
+ struct arm_smmu_smr *smrs = smmu->smrs;
|
|
|
+ int i, free_idx = -ENOSPC;
|
|
|
|
|
|
- /* It worked! Now, poke the actual hardware */
|
|
|
- for (i = 0; i < cfg->num_streamids; ++i) {
|
|
|
- u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
|
|
|
- smrs[i].mask << SMR_MASK_SHIFT;
|
|
|
- writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
|
|
|
- }
|
|
|
+ /* Stream indexing is blissfully easy */
|
|
|
+ if (!smrs)
|
|
|
+ return id;
|
|
|
|
|
|
- cfg->smrs = smrs;
|
|
|
- return 0;
|
|
|
+ /* Validating SMRs is... less so */
|
|
|
+ for (i = 0; i < smmu->num_mapping_groups; ++i) {
|
|
|
+ if (!smrs[i].valid) {
|
|
|
+ /*
|
|
|
+ * Note the first free entry we come across, which
|
|
|
+ * we'll claim in the end if nothing else matches.
|
|
|
+ */
|
|
|
+ if (free_idx < 0)
|
|
|
+ free_idx = i;
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ /*
|
|
|
+ * If the new entry is _entirely_ matched by an existing entry,
|
|
|
+ * then reuse that, with the guarantee that there also cannot
|
|
|
+ * be any subsequent conflicting entries. In normal use we'd
|
|
|
+ * expect simply identical entries for this case, but there's
|
|
|
+ * no harm in accommodating the generalisation.
|
|
|
+ */
|
|
|
+ if ((mask & smrs[i].mask) == mask &&
|
|
|
+ !((id ^ smrs[i].id) & ~smrs[i].mask))
|
|
|
+ return i;
|
|
|
+ /*
|
|
|
+ * If the new entry has any other overlap with an existing one,
|
|
|
+ * though, then there always exists at least one stream ID
|
|
|
+ * which would cause a conflict, and we can't allow that risk.
|
|
|
+ */
|
|
|
+ if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
|
|
|
-err_free_smrs:
|
|
|
- while (--i >= 0)
|
|
|
- __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
|
|
|
- kfree(smrs);
|
|
|
- return -ENOSPC;
|
|
|
+ return free_idx;
|
|
|
}
|
|
|
|
|
|
-static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
|
|
|
- struct arm_smmu_master_cfg *cfg)
|
|
|
+static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
|
|
|
{
|
|
|
- int i;
|
|
|
- void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
|
|
|
- struct arm_smmu_smr *smrs = cfg->smrs;
|
|
|
-
|
|
|
- if (!smrs)
|
|
|
- return;
|
|
|
-
|
|
|
- /* Invalidate the SMRs before freeing back to the allocator */
|
|
|
- for (i = 0; i < cfg->num_streamids; ++i) {
|
|
|
- u8 idx = smrs[i].idx;
|
|
|
+ if (--smmu->s2crs[idx].count)
|
|
|
+ return false;
|
|
|
|
|
|
- writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
|
|
|
- __arm_smmu_free_bitmap(smmu->smr_map, idx);
|
|
|
- }
|
|
|
+ smmu->s2crs[idx] = s2cr_init_val;
|
|
|
+ if (smmu->smrs)
|
|
|
+ smmu->smrs[idx].valid = false;
|
|
|
|
|
|
- cfg->smrs = NULL;
|
|
|
- kfree(smrs);
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
-static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
|
|
|
- struct arm_smmu_master_cfg *cfg)
|
|
|
+static int arm_smmu_master_alloc_smes(struct device *dev)
|
|
|
{
|
|
|
- int i, ret;
|
|
|
- struct arm_smmu_device *smmu = smmu_domain->smmu;
|
|
|
- void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
|
|
|
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
|
|
|
+ struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
|
|
|
+ struct arm_smmu_device *smmu = cfg->smmu;
|
|
|
+ struct arm_smmu_smr *smrs = smmu->smrs;
|
|
|
+ struct iommu_group *group;
|
|
|
+ int i, idx, ret;
|
|
|
|
|
|
- /*
|
|
|
- * FIXME: This won't be needed once we have IOMMU-backed DMA ops
|
|
|
- * for all devices behind the SMMU. Note that we need to take
|
|
|
- * care configuring SMRs for devices both a platform_device and
|
|
|
- * and a PCI device (i.e. a PCI host controller)
|
|
|
- */
|
|
|
- if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
|
|
|
- return 0;
|
|
|
+ mutex_lock(&smmu->stream_map_mutex);
|
|
|
+ /* Figure out a viable stream map entry allocation */
|
|
|
+ for_each_cfg_sme(fwspec, i, idx) {
|
|
|
+ u16 sid = fwspec->ids[i];
|
|
|
+ u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
|
|
|
|
|
|
- /* Devices in an IOMMU group may already be configured */
|
|
|
- ret = arm_smmu_master_configure_smrs(smmu, cfg);
|
|
|
- if (ret)
|
|
|
- return ret == -EEXIST ? 0 : ret;
|
|
|
+ if (idx != INVALID_SMENDX) {
|
|
|
+ ret = -EEXIST;
|
|
|
+ goto out_err;
|
|
|
+ }
|
|
|
|
|
|
- for (i = 0; i < cfg->num_streamids; ++i) {
|
|
|
- u32 idx, s2cr;
|
|
|
+ ret = arm_smmu_find_sme(smmu, sid, mask);
|
|
|
+ if (ret < 0)
|
|
|
+ goto out_err;
|
|
|
+
|
|
|
+ idx = ret;
|
|
|
+ if (smrs && smmu->s2crs[idx].count == 0) {
|
|
|
+ smrs[idx].id = sid;
|
|
|
+ smrs[idx].mask = mask;
|
|
|
+ smrs[idx].valid = true;
|
|
|
+ }
|
|
|
+ smmu->s2crs[idx].count++;
|
|
|
+ cfg->smendx[i] = (s16)idx;
|
|
|
+ }
|
|
|
|
|
|
- idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
|
|
|
- s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
|
|
|
- (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
|
|
|
- writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
|
|
|
+ group = iommu_group_get_for_dev(dev);
|
|
|
+ if (!group)
|
|
|
+ group = ERR_PTR(-ENOMEM);
|
|
|
+ if (IS_ERR(group)) {
|
|
|
+ ret = PTR_ERR(group);
|
|
|
+ goto out_err;
|
|
|
}
|
|
|
+ iommu_group_put(group);
|
|
|
|
|
|
+ /* It worked! Now, poke the actual hardware */
|
|
|
+ for_each_cfg_sme(fwspec, i, idx) {
|
|
|
+ arm_smmu_write_sme(smmu, idx);
|
|
|
+ smmu->s2crs[idx].group = group;
|
|
|
+ }
|
|
|
+
|
|
|
+ mutex_unlock(&smmu->stream_map_mutex);
|
|
|
return 0;
|
|
|
+
|
|
|
+out_err:
|
|
|
+ while (i--) {
|
|
|
+ arm_smmu_free_sme(smmu, cfg->smendx[i]);
|
|
|
+ cfg->smendx[i] = INVALID_SMENDX;
|
|
|
+ }
|
|
|
+ mutex_unlock(&smmu->stream_map_mutex);
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
-static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
|
|
|
- struct arm_smmu_master_cfg *cfg)
|
|
|
+static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
|
|
|
{
|
|
|
- int i;
|
|
|
- struct arm_smmu_device *smmu = smmu_domain->smmu;
|
|
|
- void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
|
|
|
-
|
|
|
- /* An IOMMU group is torn down by the first device to be removed */
|
|
|
- if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
|
|
|
- return;
|
|
|
+ struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
|
|
|
+ struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
|
|
|
+ int i, idx;
|
|
|
|
|
|
- /*
|
|
|
- * We *must* clear the S2CR first, because freeing the SMR means
|
|
|
- * that it can be re-allocated immediately.
|
|
|
- */
|
|
|
- for (i = 0; i < cfg->num_streamids; ++i) {
|
|
|
- u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
|
|
|
- u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
|
|
|
-
|
|
|
- writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
|
|
|
+ mutex_lock(&smmu->stream_map_mutex);
|
|
|
+ for_each_cfg_sme(fwspec, i, idx) {
|
|
|
+ if (arm_smmu_free_sme(smmu, idx))
|
|
|
+ arm_smmu_write_sme(smmu, idx);
|
|
|
+ cfg->smendx[i] = INVALID_SMENDX;
|
|
|
}
|
|
|
-
|
|
|
- arm_smmu_master_free_smrs(smmu, cfg);
|
|
|
+ mutex_unlock(&smmu->stream_map_mutex);
|
|
|
}
|
|
|
|
|
|
-static void arm_smmu_detach_dev(struct device *dev,
|
|
|
- struct arm_smmu_master_cfg *cfg)
|
|
|
+static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
|
|
|
+ struct iommu_fwspec *fwspec)
|
|
|
{
|
|
|
- struct iommu_domain *domain = dev->archdata.iommu;
|
|
|
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
|
|
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
|
|
|
+ struct arm_smmu_s2cr *s2cr = smmu->s2crs;
|
|
|
+ enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
|
|
|
+ u8 cbndx = smmu_domain->cfg.cbndx;
|
|
|
+ int i, idx;
|
|
|
+
|
|
|
+ for_each_cfg_sme(fwspec, i, idx) {
|
|
|
+ if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
|
|
|
+ continue;
|
|
|
|
|
|
- dev->archdata.iommu = NULL;
|
|
|
- arm_smmu_domain_remove_master(smmu_domain, cfg);
|
|
|
+ s2cr[idx].type = type;
|
|
|
+ s2cr[idx].privcfg = S2CR_PRIVCFG_UNPRIV;
|
|
|
+ s2cr[idx].cbndx = cbndx;
|
|
|
+ arm_smmu_write_s2cr(smmu, idx);
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
|
|
{
|
|
|
int ret;
|
|
|
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
|
|
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
|
|
|
struct arm_smmu_device *smmu;
|
|
|
- struct arm_smmu_master_cfg *cfg;
|
|
|
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
|
|
|
|
|
- smmu = find_smmu_for_device(dev);
|
|
|
- if (!smmu) {
|
|
|
+ if (!fwspec || fwspec->ops != &arm_smmu_ops) {
|
|
|
dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
|
|
|
return -ENXIO;
|
|
|
}
|
|
|
|
|
|
+ smmu = fwspec_smmu(fwspec);
|
|
|
/* Ensure that the domain is finalised */
|
|
|
ret = arm_smmu_init_domain_context(domain, smmu);
|
|
|
if (ret < 0)
|
|
@@ -1228,18 +1246,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
|
|
}
|
|
|
|
|
|
/* Looks ok, so add the device to the domain */
|
|
|
- cfg = find_smmu_master_cfg(dev);
|
|
|
- if (!cfg)
|
|
|
- return -ENODEV;
|
|
|
-
|
|
|
- /* Detach the dev from its current domain */
|
|
|
- if (dev->archdata.iommu)
|
|
|
- arm_smmu_detach_dev(dev, cfg);
|
|
|
-
|
|
|
- ret = arm_smmu_domain_add_master(smmu_domain, cfg);
|
|
|
- if (!ret)
|
|
|
- dev->archdata.iommu = domain;
|
|
|
- return ret;
|
|
|
+ return arm_smmu_domain_add_master(smmu_domain, fwspec);
|
|
|
}
|
|
|
|
|
|
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
|
|
@@ -1358,110 +1365,113 @@ static bool arm_smmu_capable(enum iommu_cap cap)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
|
|
|
+static int arm_smmu_match_node(struct device *dev, void *data)
|
|
|
{
|
|
|
- *((u16 *)data) = alias;
|
|
|
- return 0; /* Continue walking */
|
|
|
+ return dev->of_node == data;
|
|
|
}
|
|
|
|
|
|
-static void __arm_smmu_release_pci_iommudata(void *data)
|
|
|
+static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
|
|
|
{
|
|
|
- kfree(data);
|
|
|
+ struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
|
|
|
+ np, arm_smmu_match_node);
|
|
|
+ put_device(dev);
|
|
|
+ return dev ? dev_get_drvdata(dev) : NULL;
|
|
|
}
|
|
|
|
|
|
-static int arm_smmu_init_pci_device(struct pci_dev *pdev,
|
|
|
- struct iommu_group *group)
|
|
|
+static int arm_smmu_add_device(struct device *dev)
|
|
|
{
|
|
|
+ struct arm_smmu_device *smmu;
|
|
|
struct arm_smmu_master_cfg *cfg;
|
|
|
- u16 sid;
|
|
|
- int i;
|
|
|
-
|
|
|
- cfg = iommu_group_get_iommudata(group);
|
|
|
- if (!cfg) {
|
|
|
- cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
|
|
|
- if (!cfg)
|
|
|
- return -ENOMEM;
|
|
|
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
|
|
|
+ int i, ret;
|
|
|
|
|
|
- iommu_group_set_iommudata(group, cfg,
|
|
|
- __arm_smmu_release_pci_iommudata);
|
|
|
+ if (using_legacy_binding) {
|
|
|
+ ret = arm_smmu_register_legacy_master(dev, &smmu);
|
|
|
+ fwspec = dev->iommu_fwspec;
|
|
|
+ if (ret)
|
|
|
+ goto out_free;
|
|
|
+ } else if (fwspec) {
|
|
|
+ smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
|
|
|
+ } else {
|
|
|
+ return -ENODEV;
|
|
|
}
|
|
|
|
|
|
- if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
|
|
|
- return -ENOSPC;
|
|
|
+ ret = -EINVAL;
|
|
|
+ for (i = 0; i < fwspec->num_ids; i++) {
|
|
|
+ u16 sid = fwspec->ids[i];
|
|
|
+ u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
|
|
|
|
|
|
- /*
|
|
|
- * Assume Stream ID == Requester ID for now.
|
|
|
- * We need a way to describe the ID mappings in FDT.
|
|
|
- */
|
|
|
- pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
|
|
|
- for (i = 0; i < cfg->num_streamids; ++i)
|
|
|
- if (cfg->streamids[i] == sid)
|
|
|
- break;
|
|
|
-
|
|
|
- /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
|
|
|
- if (i == cfg->num_streamids)
|
|
|
- cfg->streamids[cfg->num_streamids++] = sid;
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int arm_smmu_init_platform_device(struct device *dev,
|
|
|
- struct iommu_group *group)
|
|
|
-{
|
|
|
- struct arm_smmu_device *smmu = find_smmu_for_device(dev);
|
|
|
- struct arm_smmu_master *master;
|
|
|
+ if (sid & ~smmu->streamid_mask) {
|
|
|
+ dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
|
|
|
+ sid, smmu->streamid_mask);
|
|
|
+ goto out_free;
|
|
|
+ }
|
|
|
+ if (mask & ~smmu->smr_mask_mask) {
|
|
|
+ dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
|
|
|
+ sid, smmu->smr_mask_mask);
|
|
|
+ goto out_free;
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
- if (!smmu)
|
|
|
- return -ENODEV;
|
|
|
+ ret = -ENOMEM;
|
|
|
+ cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!cfg)
|
|
|
+ goto out_free;
|
|
|
|
|
|
- master = find_smmu_master(smmu, dev->of_node);
|
|
|
- if (!master)
|
|
|
- return -ENODEV;
|
|
|
+ cfg->smmu = smmu;
|
|
|
+ fwspec->iommu_priv = cfg;
|
|
|
+ while (i--)
|
|
|
+ cfg->smendx[i] = INVALID_SMENDX;
|
|
|
|
|
|
- iommu_group_set_iommudata(group, &master->cfg, NULL);
|
|
|
+ ret = arm_smmu_master_alloc_smes(dev);
|
|
|
+ if (ret)
|
|
|
+ goto out_free;
|
|
|
|
|
|
return 0;
|
|
|
-}
|
|
|
|
|
|
-static int arm_smmu_add_device(struct device *dev)
|
|
|
-{
|
|
|
- struct iommu_group *group;
|
|
|
-
|
|
|
- group = iommu_group_get_for_dev(dev);
|
|
|
- if (IS_ERR(group))
|
|
|
- return PTR_ERR(group);
|
|
|
-
|
|
|
- iommu_group_put(group);
|
|
|
- return 0;
|
|
|
+out_free:
|
|
|
+ if (fwspec)
|
|
|
+ kfree(fwspec->iommu_priv);
|
|
|
+ iommu_fwspec_free(dev);
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
static void arm_smmu_remove_device(struct device *dev)
|
|
|
{
|
|
|
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
|
|
|
+
|
|
|
+ if (!fwspec || fwspec->ops != &arm_smmu_ops)
|
|
|
+ return;
|
|
|
+
|
|
|
+ arm_smmu_master_free_smes(fwspec);
|
|
|
iommu_group_remove_device(dev);
|
|
|
+ kfree(fwspec->iommu_priv);
|
|
|
+ iommu_fwspec_free(dev);
|
|
|
}
|
|
|
|
|
|
static struct iommu_group *arm_smmu_device_group(struct device *dev)
|
|
|
{
|
|
|
- struct iommu_group *group;
|
|
|
- int ret;
|
|
|
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
|
|
|
+ struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
|
|
|
+ struct iommu_group *group = NULL;
|
|
|
+ int i, idx;
|
|
|
|
|
|
- if (dev_is_pci(dev))
|
|
|
- group = pci_device_group(dev);
|
|
|
- else
|
|
|
- group = generic_device_group(dev);
|
|
|
+ for_each_cfg_sme(fwspec, i, idx) {
|
|
|
+ if (group && smmu->s2crs[idx].group &&
|
|
|
+ group != smmu->s2crs[idx].group)
|
|
|
+ return ERR_PTR(-EINVAL);
|
|
|
+
|
|
|
+ group = smmu->s2crs[idx].group;
|
|
|
+ }
|
|
|
|
|
|
- if (IS_ERR(group))
|
|
|
+ if (group)
|
|
|
return group;
|
|
|
|
|
|
if (dev_is_pci(dev))
|
|
|
- ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
|
|
|
+ group = pci_device_group(dev);
|
|
|
else
|
|
|
- ret = arm_smmu_init_platform_device(dev, group);
|
|
|
-
|
|
|
- if (ret) {
|
|
|
- iommu_group_put(group);
|
|
|
- group = ERR_PTR(ret);
|
|
|
- }
|
|
|
+ group = generic_device_group(dev);
|
|
|
|
|
|
return group;
|
|
|
}
|
|
@@ -1510,6 +1520,19 @@ out_unlock:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
|
|
|
+{
|
|
|
+ u32 fwid = 0;
|
|
|
+
|
|
|
+ if (args->args_count > 0)
|
|
|
+ fwid |= (u16)args->args[0];
|
|
|
+
|
|
|
+ if (args->args_count > 1)
|
|
|
+ fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
|
|
|
+
|
|
|
+ return iommu_fwspec_add_ids(dev, &fwid, 1);
|
|
|
+}
|
|
|
+
|
|
|
static struct iommu_ops arm_smmu_ops = {
|
|
|
.capable = arm_smmu_capable,
|
|
|
.domain_alloc = arm_smmu_domain_alloc,
|
|
@@ -1524,6 +1547,7 @@ static struct iommu_ops arm_smmu_ops = {
|
|
|
.device_group = arm_smmu_device_group,
|
|
|
.domain_get_attr = arm_smmu_domain_get_attr,
|
|
|
.domain_set_attr = arm_smmu_domain_set_attr,
|
|
|
+ .of_xlate = arm_smmu_of_xlate,
|
|
|
.pgsize_bitmap = -1UL, /* Restricted during device attach */
|
|
|
};
|
|
|
|
|
@@ -1531,19 +1555,19 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
|
|
|
{
|
|
|
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
|
|
|
void __iomem *cb_base;
|
|
|
- int i = 0;
|
|
|
+ int i;
|
|
|
u32 reg, major;
|
|
|
|
|
|
/* clear global FSR */
|
|
|
reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
|
|
|
writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
|
|
|
|
|
|
- /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
|
|
|
- reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
|
|
|
- for (i = 0; i < smmu->num_mapping_groups; ++i) {
|
|
|
- writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
|
|
|
- writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
|
|
|
- }
|
|
|
+ /*
|
|
|
+ * Reset stream mapping groups: Initial values mark all SMRn as
|
|
|
+ * invalid and all S2CRn as bypass unless overridden.
|
|
|
+ */
|
|
|
+ for (i = 0; i < smmu->num_mapping_groups; ++i)
|
|
|
+ arm_smmu_write_sme(smmu, i);
|
|
|
|
|
|
/*
|
|
|
* Before clearing ARM_MMU500_ACTLR_CPRE, need to
|
|
@@ -1632,6 +1656,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
|
|
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
|
|
|
u32 id;
|
|
|
bool cttw_dt, cttw_reg;
|
|
|
+ int i;
|
|
|
|
|
|
dev_notice(smmu->dev, "probing hardware configuration...\n");
|
|
|
dev_notice(smmu->dev, "SMMUv%d with:\n",
|
|
@@ -1690,39 +1715,55 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
|
|
dev_notice(smmu->dev,
|
|
|
"\t(IDR0.CTTW overridden by dma-coherent property)\n");
|
|
|
|
|
|
+ /* Max. number of entries we have for stream matching/indexing */
|
|
|
+ size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
|
|
|
+ smmu->streamid_mask = size - 1;
|
|
|
if (id & ID0_SMS) {
|
|
|
- u32 smr, sid, mask;
|
|
|
+ u32 smr;
|
|
|
|
|
|
smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
|
|
|
- smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
|
|
|
- ID0_NUMSMRG_MASK;
|
|
|
- if (smmu->num_mapping_groups == 0) {
|
|
|
+ size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
|
|
|
+ if (size == 0) {
|
|
|
dev_err(smmu->dev,
|
|
|
"stream-matching supported, but no SMRs present!\n");
|
|
|
return -ENODEV;
|
|
|
}
|
|
|
|
|
|
- smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
|
|
|
- smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
|
|
|
+ /*
|
|
|
+ * SMR.ID bits may not be preserved if the corresponding MASK
|
|
|
+ * bits are set, so check each one separately. We can reject
|
|
|
+ * masters later if they try to claim IDs outside these masks.
|
|
|
+ */
|
|
|
+ smr = smmu->streamid_mask << SMR_ID_SHIFT;
|
|
|
writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
|
|
|
smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
|
|
|
+ smmu->streamid_mask = smr >> SMR_ID_SHIFT;
|
|
|
|
|
|
- mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
|
|
|
- sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
|
|
|
- if ((mask & sid) != sid) {
|
|
|
- dev_err(smmu->dev,
|
|
|
- "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
|
|
|
- mask, sid);
|
|
|
- return -ENODEV;
|
|
|
- }
|
|
|
+ smr = smmu->streamid_mask << SMR_MASK_SHIFT;
|
|
|
+ writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
|
|
|
+ smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
|
|
|
+ smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
|
|
|
+
|
|
|
+ /* Zero-initialised to mark as invalid */
|
|
|
+ smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!smmu->smrs)
|
|
|
+ return -ENOMEM;
|
|
|
|
|
|
dev_notice(smmu->dev,
|
|
|
- "\tstream matching with %u register groups, mask 0x%x",
|
|
|
- smmu->num_mapping_groups, mask);
|
|
|
- } else {
|
|
|
- smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
|
|
|
- ID0_NUMSIDB_MASK;
|
|
|
+ "\tstream matching with %lu register groups, mask 0x%x",
|
|
|
+ size, smmu->smr_mask_mask);
|
|
|
}
|
|
|
+ /* s2cr->type == 0 means translation, so initialise explicitly */
|
|
|
+ smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!smmu->s2crs)
|
|
|
+ return -ENOMEM;
|
|
|
+ for (i = 0; i < size; i++)
|
|
|
+ smmu->s2crs[i] = s2cr_init_val;
|
|
|
+
|
|
|
+ smmu->num_mapping_groups = size;
|
|
|
+ mutex_init(&smmu->stream_map_mutex);
|
|
|
|
|
|
if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
|
|
|
smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
|
|
@@ -1855,15 +1896,24 @@ MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
|
|
|
|
|
|
static int arm_smmu_device_dt_probe(struct platform_device *pdev)
|
|
|
{
|
|
|
- const struct of_device_id *of_id;
|
|
|
const struct arm_smmu_match_data *data;
|
|
|
struct resource *res;
|
|
|
struct arm_smmu_device *smmu;
|
|
|
struct device *dev = &pdev->dev;
|
|
|
- struct rb_node *node;
|
|
|
- struct of_phandle_iterator it;
|
|
|
- struct arm_smmu_phandle_args *masterspec;
|
|
|
int num_irqs, i, err;
|
|
|
+ bool legacy_binding;
|
|
|
+
|
|
|
+ legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
|
|
|
+ if (legacy_binding && !using_generic_binding) {
|
|
|
+ if (!using_legacy_binding)
|
|
|
+ pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
|
|
|
+ using_legacy_binding = true;
|
|
|
+ } else if (!legacy_binding && !using_legacy_binding) {
|
|
|
+ using_generic_binding = true;
|
|
|
+ } else {
|
|
|
+ dev_err(dev, "not probing due to mismatched DT properties\n");
|
|
|
+ return -ENODEV;
|
|
|
+ }
|
|
|
|
|
|
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
|
|
|
if (!smmu) {
|
|
@@ -1872,8 +1922,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
|
|
|
}
|
|
|
smmu->dev = dev;
|
|
|
|
|
|
- of_id = of_match_node(arm_smmu_of_match, dev->of_node);
|
|
|
- data = of_id->data;
|
|
|
+ data = of_device_get_match_data(dev);
|
|
|
smmu->version = data->version;
|
|
|
smmu->model = data->model;
|
|
|
|
|
@@ -1923,37 +1972,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
|
|
|
if (err)
|
|
|
return err;
|
|
|
|
|
|
- i = 0;
|
|
|
- smmu->masters = RB_ROOT;
|
|
|
-
|
|
|
- err = -ENOMEM;
|
|
|
- /* No need to zero the memory for masterspec */
|
|
|
- masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
|
|
|
- if (!masterspec)
|
|
|
- goto out_put_masters;
|
|
|
-
|
|
|
- of_for_each_phandle(&it, err, dev->of_node,
|
|
|
- "mmu-masters", "#stream-id-cells", 0) {
|
|
|
- int count = of_phandle_iterator_args(&it, masterspec->args,
|
|
|
- MAX_MASTER_STREAMIDS);
|
|
|
- masterspec->np = of_node_get(it.node);
|
|
|
- masterspec->args_count = count;
|
|
|
-
|
|
|
- err = register_smmu_master(smmu, dev, masterspec);
|
|
|
- if (err) {
|
|
|
- dev_err(dev, "failed to add master %s\n",
|
|
|
- masterspec->np->name);
|
|
|
- kfree(masterspec);
|
|
|
- goto out_put_masters;
|
|
|
- }
|
|
|
-
|
|
|
- i++;
|
|
|
- }
|
|
|
-
|
|
|
- dev_notice(dev, "registered %d master devices\n", i);
|
|
|
-
|
|
|
- kfree(masterspec);
|
|
|
-
|
|
|
parse_driver_options(smmu);
|
|
|
|
|
|
if (smmu->version == ARM_SMMU_V2 &&
|
|
@@ -1961,8 +1979,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
|
|
|
dev_err(dev,
|
|
|
"found only %d context interrupt(s) but %d required\n",
|
|
|
smmu->num_context_irqs, smmu->num_context_banks);
|
|
|
- err = -ENODEV;
|
|
|
- goto out_put_masters;
|
|
|
+ return -ENODEV;
|
|
|
}
|
|
|
|
|
|
for (i = 0; i < smmu->num_global_irqs; ++i) {
|
|
@@ -1974,59 +1991,39 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
|
|
|
if (err) {
|
|
|
dev_err(dev, "failed to request global IRQ %d (%u)\n",
|
|
|
i, smmu->irqs[i]);
|
|
|
- goto out_put_masters;
|
|
|
+ return err;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- INIT_LIST_HEAD(&smmu->list);
|
|
|
- spin_lock(&arm_smmu_devices_lock);
|
|
|
- list_add(&smmu->list, &arm_smmu_devices);
|
|
|
- spin_unlock(&arm_smmu_devices_lock);
|
|
|
-
|
|
|
+ of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
|
|
|
+ platform_set_drvdata(pdev, smmu);
|
|
|
arm_smmu_device_reset(smmu);
|
|
|
- return 0;
|
|
|
|
|
|
-out_put_masters:
|
|
|
- for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
|
|
|
- struct arm_smmu_master *master
|
|
|
- = container_of(node, struct arm_smmu_master, node);
|
|
|
- of_node_put(master->of_node);
|
|
|
+ /* Oh, for a proper bus abstraction */
|
|
|
+ if (!iommu_present(&platform_bus_type))
|
|
|
+ bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
|
|
|
+#ifdef CONFIG_ARM_AMBA
|
|
|
+ if (!iommu_present(&amba_bustype))
|
|
|
+ bus_set_iommu(&amba_bustype, &arm_smmu_ops);
|
|
|
+#endif
|
|
|
+#ifdef CONFIG_PCI
|
|
|
+ if (!iommu_present(&pci_bus_type)) {
|
|
|
+ pci_request_acs();
|
|
|
+ bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
|
|
|
}
|
|
|
-
|
|
|
- return err;
|
|
|
+#endif
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
static int arm_smmu_device_remove(struct platform_device *pdev)
|
|
|
{
|
|
|
- int i;
|
|
|
- struct device *dev = &pdev->dev;
|
|
|
- struct arm_smmu_device *curr, *smmu = NULL;
|
|
|
- struct rb_node *node;
|
|
|
-
|
|
|
- spin_lock(&arm_smmu_devices_lock);
|
|
|
- list_for_each_entry(curr, &arm_smmu_devices, list) {
|
|
|
- if (curr->dev == dev) {
|
|
|
- smmu = curr;
|
|
|
- list_del(&smmu->list);
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- spin_unlock(&arm_smmu_devices_lock);
|
|
|
+ struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
|
|
|
|
|
|
if (!smmu)
|
|
|
return -ENODEV;
|
|
|
|
|
|
- for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
|
|
|
- struct arm_smmu_master *master
|
|
|
- = container_of(node, struct arm_smmu_master, node);
|
|
|
- of_node_put(master->of_node);
|
|
|
- }
|
|
|
-
|
|
|
if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
|
|
|
- dev_err(dev, "removing device with active domains!\n");
|
|
|
-
|
|
|
- for (i = 0; i < smmu->num_global_irqs; ++i)
|
|
|
- devm_free_irq(smmu->dev, smmu->irqs[i], smmu);
|
|
|
+ dev_err(&pdev->dev, "removing device with active domains!\n");
|
|
|
|
|
|
/* Turn the thing off */
|
|
|
writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
|
|
@@ -2044,41 +2041,14 @@ static struct platform_driver arm_smmu_driver = {
|
|
|
|
|
|
static int __init arm_smmu_init(void)
|
|
|
{
|
|
|
- struct device_node *np;
|
|
|
- int ret;
|
|
|
-
|
|
|
- /*
|
|
|
- * Play nice with systems that don't have an ARM SMMU by checking that
|
|
|
- * an ARM SMMU exists in the system before proceeding with the driver
|
|
|
- * and IOMMU bus operation registration.
|
|
|
- */
|
|
|
- np = of_find_matching_node(NULL, arm_smmu_of_match);
|
|
|
- if (!np)
|
|
|
- return 0;
|
|
|
-
|
|
|
- of_node_put(np);
|
|
|
-
|
|
|
- ret = platform_driver_register(&arm_smmu_driver);
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
-
|
|
|
- /* Oh, for a proper bus abstraction */
|
|
|
- if (!iommu_present(&platform_bus_type))
|
|
|
- bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
|
|
|
-
|
|
|
-#ifdef CONFIG_ARM_AMBA
|
|
|
- if (!iommu_present(&amba_bustype))
|
|
|
- bus_set_iommu(&amba_bustype, &arm_smmu_ops);
|
|
|
-#endif
|
|
|
+ static bool registered;
|
|
|
+ int ret = 0;
|
|
|
|
|
|
-#ifdef CONFIG_PCI
|
|
|
- if (!iommu_present(&pci_bus_type)) {
|
|
|
- pci_request_acs();
|
|
|
- bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
|
|
|
+ if (!registered) {
|
|
|
+ ret = platform_driver_register(&arm_smmu_driver);
|
|
|
+ registered = !ret;
|
|
|
}
|
|
|
-#endif
|
|
|
-
|
|
|
- return 0;
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
static void __exit arm_smmu_exit(void)
|
|
@@ -2089,6 +2059,25 @@ static void __exit arm_smmu_exit(void)
|
|
|
subsys_initcall(arm_smmu_init);
|
|
|
module_exit(arm_smmu_exit);
|
|
|
|
|
|
+static int __init arm_smmu_of_init(struct device_node *np)
|
|
|
+{
|
|
|
+ int ret = arm_smmu_init();
|
|
|
+
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
|
|
|
+ return -ENODEV;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
|
|
|
+IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
|
|
|
+IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
|
|
|
+IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
|
|
|
+IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
|
|
|
+IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
|
|
|
+
|
|
|
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
|
|
|
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
|
|
|
MODULE_LICENSE("GPL v2");
|