|
@@ -70,6 +70,36 @@ static short PG_ENT_SHIFT = -1;
|
|
#define SYSMMU_PG_ENT_SHIFT 0
|
|
#define SYSMMU_PG_ENT_SHIFT 0
|
|
#define SYSMMU_V5_PG_ENT_SHIFT 4
|
|
#define SYSMMU_V5_PG_ENT_SHIFT 4
|
|
|
|
|
|
|
|
+static const sysmmu_pte_t *LV1_PROT;
|
|
|
|
+static const sysmmu_pte_t SYSMMU_LV1_PROT[] = {
|
|
|
|
+ ((0 << 15) | (0 << 10)), /* no access */
|
|
|
|
+ ((1 << 15) | (1 << 10)), /* IOMMU_READ only */
|
|
|
|
+ ((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */
|
|
|
|
+ ((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */
|
|
|
|
+};
|
|
|
|
+static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = {
|
|
|
|
+ (0 << 4), /* no access */
|
|
|
|
+ (1 << 4), /* IOMMU_READ only */
|
|
|
|
+ (2 << 4), /* IOMMU_WRITE only */
|
|
|
|
+ (3 << 4), /* IOMMU_READ | IOMMU_WRITE */
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static const sysmmu_pte_t *LV2_PROT;
|
|
|
|
+static const sysmmu_pte_t SYSMMU_LV2_PROT[] = {
|
|
|
|
+ ((0 << 9) | (0 << 4)), /* no access */
|
|
|
|
+ ((1 << 9) | (1 << 4)), /* IOMMU_READ only */
|
|
|
|
+ ((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */
|
|
|
|
+ ((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */
|
|
|
|
+};
|
|
|
|
+static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = {
|
|
|
|
+ (0 << 2), /* no access */
|
|
|
|
+ (1 << 2), /* IOMMU_READ only */
|
|
|
|
+ (2 << 2), /* IOMMU_WRITE only */
|
|
|
|
+ (3 << 2), /* IOMMU_READ | IOMMU_WRITE */
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+#define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE)
|
|
|
|
+
|
|
#define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
|
|
#define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
|
|
#define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
|
|
#define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
|
|
#define section_offs(iova) (iova & (SECT_SIZE - 1))
|
|
#define section_offs(iova) (iova & (SECT_SIZE - 1))
|
|
@@ -97,16 +127,17 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
|
|
#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
|
|
#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
|
|
#define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
|
|
#define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
|
|
|
|
|
|
-#define mk_lv1ent_sect(pa) ((pa >> PG_ENT_SHIFT) | 2)
|
|
|
|
|
|
+#define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2)
|
|
#define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
|
|
#define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
|
|
-#define mk_lv2ent_lpage(pa) ((pa >> PG_ENT_SHIFT) | 1)
|
|
|
|
-#define mk_lv2ent_spage(pa) ((pa >> PG_ENT_SHIFT) | 2)
|
|
|
|
|
|
+#define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1)
|
|
|
|
+#define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2)
|
|
|
|
|
|
#define CTRL_ENABLE 0x5
|
|
#define CTRL_ENABLE 0x5
|
|
#define CTRL_BLOCK 0x7
|
|
#define CTRL_BLOCK 0x7
|
|
#define CTRL_DISABLE 0x0
|
|
#define CTRL_DISABLE 0x0
|
|
|
|
|
|
#define CFG_LRU 0x1
|
|
#define CFG_LRU 0x1
|
|
|
|
+#define CFG_EAP (1 << 2)
|
|
#define CFG_QOS(n) ((n & 0xF) << 7)
|
|
#define CFG_QOS(n) ((n & 0xF) << 7)
|
|
#define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
|
|
#define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
|
|
#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
|
|
#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
|
|
@@ -206,6 +237,7 @@ static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
|
|
struct exynos_iommu_owner {
|
|
struct exynos_iommu_owner {
|
|
struct list_head controllers; /* list of sysmmu_drvdata.owner_node */
|
|
struct list_head controllers; /* list of sysmmu_drvdata.owner_node */
|
|
struct iommu_domain *domain; /* domain this device is attached */
|
|
struct iommu_domain *domain; /* domain this device is attached */
|
|
|
|
+ struct mutex rpm_lock; /* for runtime pm of all sysmmus */
|
|
};
|
|
};
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -237,8 +269,8 @@ struct sysmmu_drvdata {
|
|
struct clk *aclk; /* SYSMMU's aclk clock */
|
|
struct clk *aclk; /* SYSMMU's aclk clock */
|
|
struct clk *pclk; /* SYSMMU's pclk clock */
|
|
struct clk *pclk; /* SYSMMU's pclk clock */
|
|
struct clk *clk_master; /* master's device clock */
|
|
struct clk *clk_master; /* master's device clock */
|
|
- int activations; /* number of calls to sysmmu_enable */
|
|
|
|
spinlock_t lock; /* lock for modyfying state */
|
|
spinlock_t lock; /* lock for modyfying state */
|
|
|
|
+ bool active; /* current status */
|
|
struct exynos_iommu_domain *domain; /* domain we belong to */
|
|
struct exynos_iommu_domain *domain; /* domain we belong to */
|
|
struct list_head domain_node; /* node for domain clients list */
|
|
struct list_head domain_node; /* node for domain clients list */
|
|
struct list_head owner_node; /* node for owner controllers list */
|
|
struct list_head owner_node; /* node for owner controllers list */
|
|
@@ -251,25 +283,6 @@ static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
|
|
return container_of(dom, struct exynos_iommu_domain, domain);
|
|
return container_of(dom, struct exynos_iommu_domain, domain);
|
|
}
|
|
}
|
|
|
|
|
|
-static bool set_sysmmu_active(struct sysmmu_drvdata *data)
|
|
|
|
-{
|
|
|
|
- /* return true if the System MMU was not active previously
|
|
|
|
- and it needs to be initialized */
|
|
|
|
- return ++data->activations == 1;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
|
|
|
|
-{
|
|
|
|
- /* return true if the System MMU is needed to be disabled */
|
|
|
|
- BUG_ON(data->activations < 1);
|
|
|
|
- return --data->activations == 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static bool is_sysmmu_active(struct sysmmu_drvdata *data)
|
|
|
|
-{
|
|
|
|
- return data->activations > 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static void sysmmu_unblock(struct sysmmu_drvdata *data)
|
|
static void sysmmu_unblock(struct sysmmu_drvdata *data)
|
|
{
|
|
{
|
|
writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
|
|
writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
|
|
@@ -388,7 +401,7 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
|
|
unsigned short reg_status, reg_clear;
|
|
unsigned short reg_status, reg_clear;
|
|
int ret = -ENOSYS;
|
|
int ret = -ENOSYS;
|
|
|
|
|
|
- WARN_ON(!is_sysmmu_active(data));
|
|
|
|
|
|
+ WARN_ON(!data->active);
|
|
|
|
|
|
if (MMU_MAJ_VER(data->version) < 5) {
|
|
if (MMU_MAJ_VER(data->version) < 5) {
|
|
reg_status = REG_INT_STATUS;
|
|
reg_status = REG_INT_STATUS;
|
|
@@ -434,40 +447,19 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
|
|
return IRQ_HANDLED;
|
|
return IRQ_HANDLED;
|
|
}
|
|
}
|
|
|
|
|
|
-static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
|
|
|
|
|
|
+static void __sysmmu_disable(struct sysmmu_drvdata *data)
|
|
{
|
|
{
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
clk_enable(data->clk_master);
|
|
clk_enable(data->clk_master);
|
|
|
|
|
|
|
|
+ spin_lock_irqsave(&data->lock, flags);
|
|
writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
|
|
writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
|
|
writel(0, data->sfrbase + REG_MMU_CFG);
|
|
writel(0, data->sfrbase + REG_MMU_CFG);
|
|
-
|
|
|
|
- __sysmmu_disable_clocks(data);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static bool __sysmmu_disable(struct sysmmu_drvdata *data)
|
|
|
|
-{
|
|
|
|
- bool disabled;
|
|
|
|
- unsigned long flags;
|
|
|
|
-
|
|
|
|
- spin_lock_irqsave(&data->lock, flags);
|
|
|
|
-
|
|
|
|
- disabled = set_sysmmu_inactive(data);
|
|
|
|
-
|
|
|
|
- if (disabled) {
|
|
|
|
- data->pgtable = 0;
|
|
|
|
- data->domain = NULL;
|
|
|
|
-
|
|
|
|
- __sysmmu_disable_nocount(data);
|
|
|
|
-
|
|
|
|
- dev_dbg(data->sysmmu, "Disabled\n");
|
|
|
|
- } else {
|
|
|
|
- dev_dbg(data->sysmmu, "%d times left to disable\n",
|
|
|
|
- data->activations);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
|
|
+ data->active = false;
|
|
spin_unlock_irqrestore(&data->lock, flags);
|
|
spin_unlock_irqrestore(&data->lock, flags);
|
|
|
|
|
|
- return disabled;
|
|
|
|
|
|
+ __sysmmu_disable_clocks(data);
|
|
}
|
|
}
|
|
|
|
|
|
static void __sysmmu_init_config(struct sysmmu_drvdata *data)
|
|
static void __sysmmu_init_config(struct sysmmu_drvdata *data)
|
|
@@ -481,20 +473,24 @@ static void __sysmmu_init_config(struct sysmmu_drvdata *data)
|
|
else
|
|
else
|
|
cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
|
|
cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
|
|
|
|
|
|
|
|
+ cfg |= CFG_EAP; /* enable access protection bits check */
|
|
|
|
+
|
|
writel(cfg, data->sfrbase + REG_MMU_CFG);
|
|
writel(cfg, data->sfrbase + REG_MMU_CFG);
|
|
}
|
|
}
|
|
|
|
|
|
-static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
|
|
|
|
|
|
+static void __sysmmu_enable(struct sysmmu_drvdata *data)
|
|
{
|
|
{
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
__sysmmu_enable_clocks(data);
|
|
__sysmmu_enable_clocks(data);
|
|
|
|
|
|
|
|
+ spin_lock_irqsave(&data->lock, flags);
|
|
writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
|
|
writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
|
|
-
|
|
|
|
__sysmmu_init_config(data);
|
|
__sysmmu_init_config(data);
|
|
-
|
|
|
|
__sysmmu_set_ptbase(data, data->pgtable);
|
|
__sysmmu_set_ptbase(data, data->pgtable);
|
|
-
|
|
|
|
writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
|
|
writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
|
|
|
|
+ data->active = true;
|
|
|
|
+ spin_unlock_irqrestore(&data->lock, flags);
|
|
|
|
|
|
/*
|
|
/*
|
|
* SYSMMU driver keeps master's clock enabled only for the short
|
|
* SYSMMU driver keeps master's clock enabled only for the short
|
|
@@ -505,48 +501,18 @@ static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
|
|
clk_disable(data->clk_master);
|
|
clk_disable(data->clk_master);
|
|
}
|
|
}
|
|
|
|
|
|
-static int __sysmmu_enable(struct sysmmu_drvdata *data, phys_addr_t pgtable,
|
|
|
|
- struct exynos_iommu_domain *domain)
|
|
|
|
-{
|
|
|
|
- int ret = 0;
|
|
|
|
- unsigned long flags;
|
|
|
|
-
|
|
|
|
- spin_lock_irqsave(&data->lock, flags);
|
|
|
|
- if (set_sysmmu_active(data)) {
|
|
|
|
- data->pgtable = pgtable;
|
|
|
|
- data->domain = domain;
|
|
|
|
-
|
|
|
|
- __sysmmu_enable_nocount(data);
|
|
|
|
-
|
|
|
|
- dev_dbg(data->sysmmu, "Enabled\n");
|
|
|
|
- } else {
|
|
|
|
- ret = (pgtable == data->pgtable) ? 1 : -EBUSY;
|
|
|
|
-
|
|
|
|
- dev_dbg(data->sysmmu, "already enabled\n");
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (WARN_ON(ret < 0))
|
|
|
|
- set_sysmmu_inactive(data); /* decrement count */
|
|
|
|
-
|
|
|
|
- spin_unlock_irqrestore(&data->lock, flags);
|
|
|
|
-
|
|
|
|
- return ret;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
|
|
static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
|
|
sysmmu_iova_t iova)
|
|
sysmmu_iova_t iova)
|
|
{
|
|
{
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
-
|
|
|
|
spin_lock_irqsave(&data->lock, flags);
|
|
spin_lock_irqsave(&data->lock, flags);
|
|
- if (is_sysmmu_active(data) && data->version >= MAKE_MMU_VER(3, 3)) {
|
|
|
|
|
|
+ if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
|
|
clk_enable(data->clk_master);
|
|
clk_enable(data->clk_master);
|
|
__sysmmu_tlb_invalidate_entry(data, iova, 1);
|
|
__sysmmu_tlb_invalidate_entry(data, iova, 1);
|
|
clk_disable(data->clk_master);
|
|
clk_disable(data->clk_master);
|
|
}
|
|
}
|
|
spin_unlock_irqrestore(&data->lock, flags);
|
|
spin_unlock_irqrestore(&data->lock, flags);
|
|
-
|
|
|
|
}
|
|
}
|
|
|
|
|
|
static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
|
|
static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
|
|
@@ -555,7 +521,7 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
spin_lock_irqsave(&data->lock, flags);
|
|
spin_lock_irqsave(&data->lock, flags);
|
|
- if (is_sysmmu_active(data)) {
|
|
|
|
|
|
+ if (data->active) {
|
|
unsigned int num_inv = 1;
|
|
unsigned int num_inv = 1;
|
|
|
|
|
|
clk_enable(data->clk_master);
|
|
clk_enable(data->clk_master);
|
|
@@ -578,9 +544,6 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
|
|
sysmmu_unblock(data);
|
|
sysmmu_unblock(data);
|
|
}
|
|
}
|
|
clk_disable(data->clk_master);
|
|
clk_disable(data->clk_master);
|
|
- } else {
|
|
|
|
- dev_dbg(data->master,
|
|
|
|
- "disabled. Skipping TLB invalidation @ %#x\n", iova);
|
|
|
|
}
|
|
}
|
|
spin_unlock_irqrestore(&data->lock, flags);
|
|
spin_unlock_irqrestore(&data->lock, flags);
|
|
}
|
|
}
|
|
@@ -652,10 +615,15 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
|
|
|
|
|
|
__sysmmu_get_version(data);
|
|
__sysmmu_get_version(data);
|
|
if (PG_ENT_SHIFT < 0) {
|
|
if (PG_ENT_SHIFT < 0) {
|
|
- if (MMU_MAJ_VER(data->version) < 5)
|
|
|
|
|
|
+ if (MMU_MAJ_VER(data->version) < 5) {
|
|
PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
|
|
PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
|
|
- else
|
|
|
|
|
|
+ LV1_PROT = SYSMMU_LV1_PROT;
|
|
|
|
+ LV2_PROT = SYSMMU_LV2_PROT;
|
|
|
|
+ } else {
|
|
PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
|
|
PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
|
|
|
|
+ LV1_PROT = SYSMMU_V5_LV1_PROT;
|
|
|
|
+ LV2_PROT = SYSMMU_V5_LV2_PROT;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
pm_runtime_enable(dev);
|
|
pm_runtime_enable(dev);
|
|
@@ -665,34 +633,46 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_PM_SLEEP
|
|
|
|
-static int exynos_sysmmu_suspend(struct device *dev)
|
|
|
|
|
|
+static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
|
|
{
|
|
{
|
|
struct sysmmu_drvdata *data = dev_get_drvdata(dev);
|
|
struct sysmmu_drvdata *data = dev_get_drvdata(dev);
|
|
|
|
+ struct device *master = data->master;
|
|
|
|
+
|
|
|
|
+ if (master) {
|
|
|
|
+ struct exynos_iommu_owner *owner = master->archdata.iommu;
|
|
|
|
|
|
- dev_dbg(dev, "suspend\n");
|
|
|
|
- if (is_sysmmu_active(data)) {
|
|
|
|
- __sysmmu_disable_nocount(data);
|
|
|
|
- pm_runtime_put(dev);
|
|
|
|
|
|
+ mutex_lock(&owner->rpm_lock);
|
|
|
|
+ if (data->domain) {
|
|
|
|
+ dev_dbg(data->sysmmu, "saving state\n");
|
|
|
|
+ __sysmmu_disable(data);
|
|
|
|
+ }
|
|
|
|
+ mutex_unlock(&owner->rpm_lock);
|
|
}
|
|
}
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static int exynos_sysmmu_resume(struct device *dev)
|
|
|
|
|
|
+static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
|
|
{
|
|
{
|
|
struct sysmmu_drvdata *data = dev_get_drvdata(dev);
|
|
struct sysmmu_drvdata *data = dev_get_drvdata(dev);
|
|
|
|
+ struct device *master = data->master;
|
|
|
|
+
|
|
|
|
+ if (master) {
|
|
|
|
+ struct exynos_iommu_owner *owner = master->archdata.iommu;
|
|
|
|
|
|
- dev_dbg(dev, "resume\n");
|
|
|
|
- if (is_sysmmu_active(data)) {
|
|
|
|
- pm_runtime_get_sync(dev);
|
|
|
|
- __sysmmu_enable_nocount(data);
|
|
|
|
|
|
+ mutex_lock(&owner->rpm_lock);
|
|
|
|
+ if (data->domain) {
|
|
|
|
+ dev_dbg(data->sysmmu, "restoring state\n");
|
|
|
|
+ __sysmmu_enable(data);
|
|
|
|
+ }
|
|
|
|
+ mutex_unlock(&owner->rpm_lock);
|
|
}
|
|
}
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
-#endif
|
|
|
|
|
|
|
|
static const struct dev_pm_ops sysmmu_pm_ops = {
|
|
static const struct dev_pm_ops sysmmu_pm_ops = {
|
|
- SET_LATE_SYSTEM_SLEEP_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume)
|
|
|
|
|
|
+ SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL)
|
|
|
|
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
|
|
|
|
+ pm_runtime_force_resume)
|
|
};
|
|
};
|
|
|
|
|
|
static const struct of_device_id sysmmu_of_match[] __initconst = {
|
|
static const struct of_device_id sysmmu_of_match[] __initconst = {
|
|
@@ -796,9 +776,12 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
|
|
spin_lock_irqsave(&domain->lock, flags);
|
|
spin_lock_irqsave(&domain->lock, flags);
|
|
|
|
|
|
list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
|
|
list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
|
|
- if (__sysmmu_disable(data))
|
|
|
|
- data->master = NULL;
|
|
|
|
|
|
+ spin_lock(&data->lock);
|
|
|
|
+ __sysmmu_disable(data);
|
|
|
|
+ data->pgtable = 0;
|
|
|
|
+ data->domain = NULL;
|
|
list_del_init(&data->domain_node);
|
|
list_del_init(&data->domain_node);
|
|
|
|
+ spin_unlock(&data->lock);
|
|
}
|
|
}
|
|
|
|
|
|
spin_unlock_irqrestore(&domain->lock, flags);
|
|
spin_unlock_irqrestore(&domain->lock, flags);
|
|
@@ -832,31 +815,34 @@ static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
|
|
phys_addr_t pagetable = virt_to_phys(domain->pgtable);
|
|
phys_addr_t pagetable = virt_to_phys(domain->pgtable);
|
|
struct sysmmu_drvdata *data, *next;
|
|
struct sysmmu_drvdata *data, *next;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
- bool found = false;
|
|
|
|
|
|
|
|
if (!has_sysmmu(dev) || owner->domain != iommu_domain)
|
|
if (!has_sysmmu(dev) || owner->domain != iommu_domain)
|
|
return;
|
|
return;
|
|
|
|
|
|
|
|
+ mutex_lock(&owner->rpm_lock);
|
|
|
|
+
|
|
|
|
+ list_for_each_entry(data, &owner->controllers, owner_node) {
|
|
|
|
+ pm_runtime_get_noresume(data->sysmmu);
|
|
|
|
+ if (pm_runtime_active(data->sysmmu))
|
|
|
|
+ __sysmmu_disable(data);
|
|
|
|
+ pm_runtime_put(data->sysmmu);
|
|
|
|
+ }
|
|
|
|
+
|
|
spin_lock_irqsave(&domain->lock, flags);
|
|
spin_lock_irqsave(&domain->lock, flags);
|
|
list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
|
|
list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
|
|
- if (data->master == dev) {
|
|
|
|
- if (__sysmmu_disable(data)) {
|
|
|
|
- data->master = NULL;
|
|
|
|
- list_del_init(&data->domain_node);
|
|
|
|
- }
|
|
|
|
- pm_runtime_put(data->sysmmu);
|
|
|
|
- found = true;
|
|
|
|
- }
|
|
|
|
|
|
+ spin_lock(&data->lock);
|
|
|
|
+ data->pgtable = 0;
|
|
|
|
+ data->domain = NULL;
|
|
|
|
+ list_del_init(&data->domain_node);
|
|
|
|
+ spin_unlock(&data->lock);
|
|
}
|
|
}
|
|
|
|
+ owner->domain = NULL;
|
|
spin_unlock_irqrestore(&domain->lock, flags);
|
|
spin_unlock_irqrestore(&domain->lock, flags);
|
|
|
|
|
|
- owner->domain = NULL;
|
|
|
|
|
|
+ mutex_unlock(&owner->rpm_lock);
|
|
|
|
|
|
- if (found)
|
|
|
|
- dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
|
|
|
|
- __func__, &pagetable);
|
|
|
|
- else
|
|
|
|
- dev_err(dev, "%s: No IOMMU is attached\n", __func__);
|
|
|
|
|
|
+ dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__,
|
|
|
|
+ &pagetable);
|
|
}
|
|
}
|
|
|
|
|
|
static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
|
|
static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
|
|
@@ -867,7 +853,6 @@ static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
|
|
struct sysmmu_drvdata *data;
|
|
struct sysmmu_drvdata *data;
|
|
phys_addr_t pagetable = virt_to_phys(domain->pgtable);
|
|
phys_addr_t pagetable = virt_to_phys(domain->pgtable);
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
- int ret = -ENODEV;
|
|
|
|
|
|
|
|
if (!has_sysmmu(dev))
|
|
if (!has_sysmmu(dev))
|
|
return -ENODEV;
|
|
return -ENODEV;
|
|
@@ -875,29 +860,32 @@ static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
|
|
if (owner->domain)
|
|
if (owner->domain)
|
|
exynos_iommu_detach_device(owner->domain, dev);
|
|
exynos_iommu_detach_device(owner->domain, dev);
|
|
|
|
|
|
|
|
+ mutex_lock(&owner->rpm_lock);
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&domain->lock, flags);
|
|
list_for_each_entry(data, &owner->controllers, owner_node) {
|
|
list_for_each_entry(data, &owner->controllers, owner_node) {
|
|
- pm_runtime_get_sync(data->sysmmu);
|
|
|
|
- ret = __sysmmu_enable(data, pagetable, domain);
|
|
|
|
- if (ret >= 0) {
|
|
|
|
- data->master = dev;
|
|
|
|
-
|
|
|
|
- spin_lock_irqsave(&domain->lock, flags);
|
|
|
|
- list_add_tail(&data->domain_node, &domain->clients);
|
|
|
|
- spin_unlock_irqrestore(&domain->lock, flags);
|
|
|
|
- }
|
|
|
|
|
|
+ spin_lock(&data->lock);
|
|
|
|
+ data->pgtable = pagetable;
|
|
|
|
+ data->domain = domain;
|
|
|
|
+ list_add_tail(&data->domain_node, &domain->clients);
|
|
|
|
+ spin_unlock(&data->lock);
|
|
}
|
|
}
|
|
|
|
+ owner->domain = iommu_domain;
|
|
|
|
+ spin_unlock_irqrestore(&domain->lock, flags);
|
|
|
|
|
|
- if (ret < 0) {
|
|
|
|
- dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
|
|
|
|
- __func__, &pagetable);
|
|
|
|
- return ret;
|
|
|
|
|
|
+ list_for_each_entry(data, &owner->controllers, owner_node) {
|
|
|
|
+ pm_runtime_get_noresume(data->sysmmu);
|
|
|
|
+ if (pm_runtime_active(data->sysmmu))
|
|
|
|
+ __sysmmu_enable(data);
|
|
|
|
+ pm_runtime_put(data->sysmmu);
|
|
}
|
|
}
|
|
|
|
|
|
- owner->domain = iommu_domain;
|
|
|
|
- dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
|
|
|
|
- __func__, &pagetable, (ret == 0) ? "" : ", again");
|
|
|
|
|
|
+ mutex_unlock(&owner->rpm_lock);
|
|
|
|
|
|
- return ret;
|
|
|
|
|
|
+ dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa\n", __func__,
|
|
|
|
+ &pagetable);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
}
|
|
}
|
|
|
|
|
|
static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
|
|
static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
|
|
@@ -954,7 +942,7 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
|
|
|
|
|
|
static int lv1set_section(struct exynos_iommu_domain *domain,
|
|
static int lv1set_section(struct exynos_iommu_domain *domain,
|
|
sysmmu_pte_t *sent, sysmmu_iova_t iova,
|
|
sysmmu_pte_t *sent, sysmmu_iova_t iova,
|
|
- phys_addr_t paddr, short *pgcnt)
|
|
|
|
|
|
+ phys_addr_t paddr, int prot, short *pgcnt)
|
|
{
|
|
{
|
|
if (lv1ent_section(sent)) {
|
|
if (lv1ent_section(sent)) {
|
|
WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
|
|
WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
|
|
@@ -973,7 +961,7 @@ static int lv1set_section(struct exynos_iommu_domain *domain,
|
|
*pgcnt = 0;
|
|
*pgcnt = 0;
|
|
}
|
|
}
|
|
|
|
|
|
- update_pte(sent, mk_lv1ent_sect(paddr));
|
|
|
|
|
|
+ update_pte(sent, mk_lv1ent_sect(paddr, prot));
|
|
|
|
|
|
spin_lock(&domain->lock);
|
|
spin_lock(&domain->lock);
|
|
if (lv1ent_page_zero(sent)) {
|
|
if (lv1ent_page_zero(sent)) {
|
|
@@ -991,13 +979,13 @@ static int lv1set_section(struct exynos_iommu_domain *domain,
|
|
}
|
|
}
|
|
|
|
|
|
static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
|
|
static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
|
|
- short *pgcnt)
|
|
|
|
|
|
+ int prot, short *pgcnt)
|
|
{
|
|
{
|
|
if (size == SPAGE_SIZE) {
|
|
if (size == SPAGE_SIZE) {
|
|
if (WARN_ON(!lv2ent_fault(pent)))
|
|
if (WARN_ON(!lv2ent_fault(pent)))
|
|
return -EADDRINUSE;
|
|
return -EADDRINUSE;
|
|
|
|
|
|
- update_pte(pent, mk_lv2ent_spage(paddr));
|
|
|
|
|
|
+ update_pte(pent, mk_lv2ent_spage(paddr, prot));
|
|
*pgcnt -= 1;
|
|
*pgcnt -= 1;
|
|
} else { /* size == LPAGE_SIZE */
|
|
} else { /* size == LPAGE_SIZE */
|
|
int i;
|
|
int i;
|
|
@@ -1013,7 +1001,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
|
|
return -EADDRINUSE;
|
|
return -EADDRINUSE;
|
|
}
|
|
}
|
|
|
|
|
|
- *pent = mk_lv2ent_lpage(paddr);
|
|
|
|
|
|
+ *pent = mk_lv2ent_lpage(paddr, prot);
|
|
}
|
|
}
|
|
dma_sync_single_for_device(dma_dev, pent_base,
|
|
dma_sync_single_for_device(dma_dev, pent_base,
|
|
sizeof(*pent) * SPAGES_PER_LPAGE,
|
|
sizeof(*pent) * SPAGES_PER_LPAGE,
|
|
@@ -1061,13 +1049,14 @@ static int exynos_iommu_map(struct iommu_domain *iommu_domain,
|
|
int ret = -ENOMEM;
|
|
int ret = -ENOMEM;
|
|
|
|
|
|
BUG_ON(domain->pgtable == NULL);
|
|
BUG_ON(domain->pgtable == NULL);
|
|
|
|
+ prot &= SYSMMU_SUPPORTED_PROT_BITS;
|
|
|
|
|
|
spin_lock_irqsave(&domain->pgtablelock, flags);
|
|
spin_lock_irqsave(&domain->pgtablelock, flags);
|
|
|
|
|
|
entry = section_entry(domain->pgtable, iova);
|
|
entry = section_entry(domain->pgtable, iova);
|
|
|
|
|
|
if (size == SECT_SIZE) {
|
|
if (size == SECT_SIZE) {
|
|
- ret = lv1set_section(domain, entry, iova, paddr,
|
|
|
|
|
|
+ ret = lv1set_section(domain, entry, iova, paddr, prot,
|
|
&domain->lv2entcnt[lv1ent_offset(iova)]);
|
|
&domain->lv2entcnt[lv1ent_offset(iova)]);
|
|
} else {
|
|
} else {
|
|
sysmmu_pte_t *pent;
|
|
sysmmu_pte_t *pent;
|
|
@@ -1078,7 +1067,7 @@ static int exynos_iommu_map(struct iommu_domain *iommu_domain,
|
|
if (IS_ERR(pent))
|
|
if (IS_ERR(pent))
|
|
ret = PTR_ERR(pent);
|
|
ret = PTR_ERR(pent);
|
|
else
|
|
else
|
|
- ret = lv2set_page(pent, paddr, size,
|
|
|
|
|
|
+ ret = lv2set_page(pent, paddr, size, prot,
|
|
&domain->lv2entcnt[lv1ent_offset(iova)]);
|
|
&domain->lv2entcnt[lv1ent_offset(iova)]);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1268,10 +1257,20 @@ static int exynos_iommu_of_xlate(struct device *dev,
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
|
|
|
|
INIT_LIST_HEAD(&owner->controllers);
|
|
INIT_LIST_HEAD(&owner->controllers);
|
|
|
|
+ mutex_init(&owner->rpm_lock);
|
|
dev->archdata.iommu = owner;
|
|
dev->archdata.iommu = owner;
|
|
}
|
|
}
|
|
|
|
|
|
list_add_tail(&data->owner_node, &owner->controllers);
|
|
list_add_tail(&data->owner_node, &owner->controllers);
|
|
|
|
+ data->master = dev;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * SYSMMU will be runtime activated via device link (dependency) to its
|
|
|
|
+ * master device, so there are no direct calls to pm_runtime_get/put
|
|
|
|
+ * in this driver.
|
|
|
|
+ */
|
|
|
|
+ device_link_add(dev, data->sysmmu, DL_FLAG_PM_RUNTIME);
|
|
|
|
+
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|