|
@@ -27,6 +27,7 @@ struct tegra_smmu {
|
|
|
const struct tegra_smmu_soc *soc;
|
|
|
|
|
|
unsigned long pfn_mask;
|
|
|
+ unsigned long tlb_mask;
|
|
|
|
|
|
unsigned long *asids;
|
|
|
struct mutex lock;
|
|
@@ -40,8 +41,10 @@ struct tegra_smmu_as {
|
|
|
struct iommu_domain domain;
|
|
|
struct tegra_smmu *smmu;
|
|
|
unsigned int use_count;
|
|
|
- struct page *count;
|
|
|
+ u32 *count;
|
|
|
+ struct page **pts;
|
|
|
struct page *pd;
|
|
|
+ dma_addr_t pd_dma;
|
|
|
unsigned id;
|
|
|
u32 attr;
|
|
|
};
|
|
@@ -68,7 +71,8 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
|
|
|
#define SMMU_TLB_CONFIG 0x14
|
|
|
#define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
|
|
|
#define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
|
|
|
-#define SMMU_TLB_CONFIG_ACTIVE_LINES(x) ((x) & 0x3f)
|
|
|
+#define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
|
|
|
+ ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
|
|
|
|
|
|
#define SMMU_PTC_CONFIG 0x18
|
|
|
#define SMMU_PTC_CONFIG_ENABLE (1 << 29)
|
|
@@ -79,9 +83,9 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
|
|
|
#define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
|
|
|
|
|
|
#define SMMU_PTB_DATA 0x020
|
|
|
-#define SMMU_PTB_DATA_VALUE(page, attr) (page_to_phys(page) >> 12 | (attr))
|
|
|
+#define SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr))
|
|
|
|
|
|
-#define SMMU_MK_PDE(page, attr) (page_to_phys(page) >> SMMU_PTE_SHIFT | (attr))
|
|
|
+#define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr))
|
|
|
|
|
|
#define SMMU_TLB_FLUSH 0x030
|
|
|
#define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
|
|
@@ -134,29 +138,49 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
|
|
|
#define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
|
|
|
SMMU_PTE_NONSECURE)
|
|
|
|
|
|
-static inline void smmu_flush_ptc(struct tegra_smmu *smmu, struct page *page,
|
|
|
+static unsigned int iova_pd_index(unsigned long iova)
|
|
|
+{
|
|
|
+ return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
|
|
|
+}
|
|
|
+
|
|
|
+static unsigned int iova_pt_index(unsigned long iova)
|
|
|
+{
|
|
|
+ return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1);
|
|
|
+}
|
|
|
+
|
|
|
+static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
|
|
|
+{
|
|
|
+ addr >>= 12;
|
|
|
+ return (addr & smmu->pfn_mask) == addr;
|
|
|
+}
|
|
|
+
|
|
|
+static dma_addr_t smmu_pde_to_dma(u32 pde)
|
|
|
+{
|
|
|
+ return pde << 12;
|
|
|
+}
|
|
|
+
|
|
|
+static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
|
|
|
+{
|
|
|
+ smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma,
|
|
|
unsigned long offset)
|
|
|
{
|
|
|
- phys_addr_t phys = page ? page_to_phys(page) : 0;
|
|
|
u32 value;
|
|
|
|
|
|
- if (page) {
|
|
|
- offset &= ~(smmu->mc->soc->atom_size - 1);
|
|
|
+ offset &= ~(smmu->mc->soc->atom_size - 1);
|
|
|
|
|
|
- if (smmu->mc->soc->num_address_bits > 32) {
|
|
|
-#ifdef CONFIG_PHYS_ADDR_T_64BIT
|
|
|
- value = (phys >> 32) & SMMU_PTC_FLUSH_HI_MASK;
|
|
|
+ if (smmu->mc->soc->num_address_bits > 32) {
|
|
|
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
|
|
+ value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK;
|
|
|
#else
|
|
|
- value = 0;
|
|
|
+ value = 0;
|
|
|
#endif
|
|
|
- smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
|
|
|
- }
|
|
|
-
|
|
|
- value = (phys + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
|
|
|
- } else {
|
|
|
- value = SMMU_PTC_FLUSH_TYPE_ALL;
|
|
|
+ smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
|
|
|
}
|
|
|
|
|
|
+ value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
|
|
|
smmu_writel(smmu, value, SMMU_PTC_FLUSH);
|
|
|
}
|
|
|
|
|
@@ -236,8 +260,6 @@ static bool tegra_smmu_capable(enum iommu_cap cap)
|
|
|
static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
|
|
|
{
|
|
|
struct tegra_smmu_as *as;
|
|
|
- unsigned int i;
|
|
|
- uint32_t *pd;
|
|
|
|
|
|
if (type != IOMMU_DOMAIN_UNMANAGED)
|
|
|
return NULL;
|
|
@@ -248,32 +270,26 @@ static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
|
|
|
|
|
|
as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
|
|
|
|
|
|
- as->pd = alloc_page(GFP_KERNEL | __GFP_DMA);
|
|
|
+ as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
|
|
|
if (!as->pd) {
|
|
|
kfree(as);
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
- as->count = alloc_page(GFP_KERNEL);
|
|
|
+ as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
|
|
|
if (!as->count) {
|
|
|
__free_page(as->pd);
|
|
|
kfree(as);
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
- /* clear PDEs */
|
|
|
- pd = page_address(as->pd);
|
|
|
- SetPageReserved(as->pd);
|
|
|
-
|
|
|
- for (i = 0; i < SMMU_NUM_PDE; i++)
|
|
|
- pd[i] = 0;
|
|
|
-
|
|
|
- /* clear PDE usage counters */
|
|
|
- pd = page_address(as->count);
|
|
|
- SetPageReserved(as->count);
|
|
|
-
|
|
|
- for (i = 0; i < SMMU_NUM_PDE; i++)
|
|
|
- pd[i] = 0;
|
|
|
+ as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
|
|
|
+ if (!as->pts) {
|
|
|
+ kfree(as->count);
|
|
|
+ __free_page(as->pd);
|
|
|
+ kfree(as);
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
|
|
|
/* setup aperture */
|
|
|
as->domain.geometry.aperture_start = 0;
|
|
@@ -288,7 +304,6 @@ static void tegra_smmu_domain_free(struct iommu_domain *domain)
|
|
|
struct tegra_smmu_as *as = to_smmu_as(domain);
|
|
|
|
|
|
/* TODO: free page directory and page tables */
|
|
|
- ClearPageReserved(as->pd);
|
|
|
|
|
|
kfree(as);
|
|
|
}
|
|
@@ -376,16 +391,26 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+ as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
|
|
|
+ DMA_TO_DEVICE);
|
|
|
+ if (dma_mapping_error(smmu->dev, as->pd_dma))
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ /* We can't handle 64-bit DMA addresses */
|
|
|
+ if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
|
|
|
+ err = -ENOMEM;
|
|
|
+ goto err_unmap;
|
|
|
+ }
|
|
|
+
|
|
|
err = tegra_smmu_alloc_asid(smmu, &as->id);
|
|
|
if (err < 0)
|
|
|
- return err;
|
|
|
+ goto err_unmap;
|
|
|
|
|
|
- smmu->soc->ops->flush_dcache(as->pd, 0, SMMU_SIZE_PD);
|
|
|
- smmu_flush_ptc(smmu, as->pd, 0);
|
|
|
+ smmu_flush_ptc(smmu, as->pd_dma, 0);
|
|
|
smmu_flush_tlb_asid(smmu, as->id);
|
|
|
|
|
|
smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
|
|
|
- value = SMMU_PTB_DATA_VALUE(as->pd, as->attr);
|
|
|
+ value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr);
|
|
|
smmu_writel(smmu, value, SMMU_PTB_DATA);
|
|
|
smmu_flush(smmu);
|
|
|
|
|
@@ -393,6 +418,10 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
|
|
|
as->use_count++;
|
|
|
|
|
|
return 0;
|
|
|
+
|
|
|
+err_unmap:
|
|
|
+ dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
|
|
|
+ return err;
|
|
|
}
|
|
|
|
|
|
static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
|
|
@@ -402,6 +431,9 @@ static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
|
|
|
return;
|
|
|
|
|
|
tegra_smmu_free_asid(smmu, as->id);
|
|
|
+
|
|
|
+ dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
|
|
|
+
|
|
|
as->smmu = NULL;
|
|
|
}
|
|
|
|
|
@@ -465,96 +497,155 @@ static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *de
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
|
|
|
+ u32 value)
|
|
|
+{
|
|
|
+ unsigned int pd_index = iova_pd_index(iova);
|
|
|
+ struct tegra_smmu *smmu = as->smmu;
|
|
|
+ u32 *pd = page_address(as->pd);
|
|
|
+ unsigned long offset = pd_index * sizeof(*pd);
|
|
|
+
|
|
|
+ /* Set the page directory entry first */
|
|
|
+ pd[pd_index] = value;
|
|
|
+
|
|
|
+ /* The flush the page directory entry from caches */
|
|
|
+ dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
|
|
|
+ sizeof(*pd), DMA_TO_DEVICE);
|
|
|
+
|
|
|
+ /* And flush the iommu */
|
|
|
+ smmu_flush_ptc(smmu, as->pd_dma, offset);
|
|
|
+ smmu_flush_tlb_section(smmu, as->id, iova);
|
|
|
+ smmu_flush(smmu);
|
|
|
+}
|
|
|
+
|
|
|
+static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
|
|
|
+{
|
|
|
+ u32 *pt = page_address(pt_page);
|
|
|
+
|
|
|
+ return pt + iova_pt_index(iova);
|
|
|
+}
|
|
|
+
|
|
|
+static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
|
|
|
+ dma_addr_t *dmap)
|
|
|
+{
|
|
|
+ unsigned int pd_index = iova_pd_index(iova);
|
|
|
+ struct page *pt_page;
|
|
|
+ u32 *pd;
|
|
|
+
|
|
|
+ pt_page = as->pts[pd_index];
|
|
|
+ if (!pt_page)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ pd = page_address(as->pd);
|
|
|
+ *dmap = smmu_pde_to_dma(pd[pd_index]);
|
|
|
+
|
|
|
+ return tegra_smmu_pte_offset(pt_page, iova);
|
|
|
+}
|
|
|
+
|
|
|
static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
|
|
|
- struct page **pagep)
|
|
|
+ dma_addr_t *dmap)
|
|
|
{
|
|
|
- u32 *pd = page_address(as->pd), *pt, *count;
|
|
|
- u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff;
|
|
|
- u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff;
|
|
|
+ unsigned int pde = iova_pd_index(iova);
|
|
|
struct tegra_smmu *smmu = as->smmu;
|
|
|
- struct page *page;
|
|
|
- unsigned int i;
|
|
|
|
|
|
- if (pd[pde] == 0) {
|
|
|
- page = alloc_page(GFP_KERNEL | __GFP_DMA);
|
|
|
+ if (!as->pts[pde]) {
|
|
|
+ struct page *page;
|
|
|
+ dma_addr_t dma;
|
|
|
+
|
|
|
+ page = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
|
|
|
if (!page)
|
|
|
return NULL;
|
|
|
|
|
|
- pt = page_address(page);
|
|
|
- SetPageReserved(page);
|
|
|
+ dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
|
|
|
+ DMA_TO_DEVICE);
|
|
|
+ if (dma_mapping_error(smmu->dev, dma)) {
|
|
|
+ __free_page(page);
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
|
|
|
- for (i = 0; i < SMMU_NUM_PTE; i++)
|
|
|
- pt[i] = 0;
|
|
|
+ if (!smmu_dma_addr_valid(smmu, dma)) {
|
|
|
+ dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
|
|
|
+ DMA_TO_DEVICE);
|
|
|
+ __free_page(page);
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
|
|
|
- smmu->soc->ops->flush_dcache(page, 0, SMMU_SIZE_PT);
|
|
|
+ as->pts[pde] = page;
|
|
|
|
|
|
- pd[pde] = SMMU_MK_PDE(page, SMMU_PDE_ATTR | SMMU_PDE_NEXT);
|
|
|
+ tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
|
|
|
+ SMMU_PDE_NEXT));
|
|
|
|
|
|
- smmu->soc->ops->flush_dcache(as->pd, pde << 2, 4);
|
|
|
- smmu_flush_ptc(smmu, as->pd, pde << 2);
|
|
|
- smmu_flush_tlb_section(smmu, as->id, iova);
|
|
|
- smmu_flush(smmu);
|
|
|
+ *dmap = dma;
|
|
|
} else {
|
|
|
- page = pfn_to_page(pd[pde] & smmu->pfn_mask);
|
|
|
- pt = page_address(page);
|
|
|
+ u32 *pd = page_address(as->pd);
|
|
|
+
|
|
|
+ *dmap = smmu_pde_to_dma(pd[pde]);
|
|
|
}
|
|
|
|
|
|
- *pagep = page;
|
|
|
+ return tegra_smmu_pte_offset(as->pts[pde], iova);
|
|
|
+}
|
|
|
|
|
|
- /* Keep track of entries in this page table. */
|
|
|
- count = page_address(as->count);
|
|
|
- if (pt[pte] == 0)
|
|
|
- count[pde]++;
|
|
|
+static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
|
|
|
+{
|
|
|
+ unsigned int pd_index = iova_pd_index(iova);
|
|
|
|
|
|
- return &pt[pte];
|
|
|
+ as->count[pd_index]++;
|
|
|
}
|
|
|
|
|
|
-static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova)
|
|
|
+static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
|
|
|
{
|
|
|
- u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff;
|
|
|
- u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff;
|
|
|
- u32 *count = page_address(as->count);
|
|
|
- u32 *pd = page_address(as->pd), *pt;
|
|
|
- struct page *page;
|
|
|
-
|
|
|
- page = pfn_to_page(pd[pde] & as->smmu->pfn_mask);
|
|
|
- pt = page_address(page);
|
|
|
+ unsigned int pde = iova_pd_index(iova);
|
|
|
+ struct page *page = as->pts[pde];
|
|
|
|
|
|
/*
|
|
|
* When no entries in this page table are used anymore, return the
|
|
|
* memory page to the system.
|
|
|
*/
|
|
|
- if (pt[pte] != 0) {
|
|
|
- if (--count[pde] == 0) {
|
|
|
- ClearPageReserved(page);
|
|
|
- __free_page(page);
|
|
|
- pd[pde] = 0;
|
|
|
- }
|
|
|
+ if (--as->count[pde] == 0) {
|
|
|
+ struct tegra_smmu *smmu = as->smmu;
|
|
|
+ u32 *pd = page_address(as->pd);
|
|
|
+ dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]);
|
|
|
+
|
|
|
+ tegra_smmu_set_pde(as, iova, 0);
|
|
|
|
|
|
- pt[pte] = 0;
|
|
|
+ dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
|
|
|
+ __free_page(page);
|
|
|
+ as->pts[pde] = NULL;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
|
|
|
+ u32 *pte, dma_addr_t pte_dma, u32 val)
|
|
|
+{
|
|
|
+ struct tegra_smmu *smmu = as->smmu;
|
|
|
+ unsigned long offset = offset_in_page(pte);
|
|
|
+
|
|
|
+ *pte = val;
|
|
|
+
|
|
|
+ dma_sync_single_range_for_device(smmu->dev, pte_dma, offset,
|
|
|
+ 4, DMA_TO_DEVICE);
|
|
|
+ smmu_flush_ptc(smmu, pte_dma, offset);
|
|
|
+ smmu_flush_tlb_group(smmu, as->id, iova);
|
|
|
+ smmu_flush(smmu);
|
|
|
+}
|
|
|
+
|
|
|
static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
|
|
|
phys_addr_t paddr, size_t size, int prot)
|
|
|
{
|
|
|
struct tegra_smmu_as *as = to_smmu_as(domain);
|
|
|
- struct tegra_smmu *smmu = as->smmu;
|
|
|
- unsigned long offset;
|
|
|
- struct page *page;
|
|
|
+ dma_addr_t pte_dma;
|
|
|
u32 *pte;
|
|
|
|
|
|
- pte = as_get_pte(as, iova, &page);
|
|
|
+ pte = as_get_pte(as, iova, &pte_dma);
|
|
|
if (!pte)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- *pte = __phys_to_pfn(paddr) | SMMU_PTE_ATTR;
|
|
|
- offset = offset_in_page(pte);
|
|
|
+ /* If we aren't overwriting a pre-existing entry, increment use */
|
|
|
+ if (*pte == 0)
|
|
|
+ tegra_smmu_pte_get_use(as, iova);
|
|
|
|
|
|
- smmu->soc->ops->flush_dcache(page, offset, 4);
|
|
|
- smmu_flush_ptc(smmu, page, offset);
|
|
|
- smmu_flush_tlb_group(smmu, as->id, iova);
|
|
|
- smmu_flush(smmu);
|
|
|
+ tegra_smmu_set_pte(as, iova, pte, pte_dma,
|
|
|
+ __phys_to_pfn(paddr) | SMMU_PTE_ATTR);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -563,22 +654,15 @@ static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
|
|
|
size_t size)
|
|
|
{
|
|
|
struct tegra_smmu_as *as = to_smmu_as(domain);
|
|
|
- struct tegra_smmu *smmu = as->smmu;
|
|
|
- unsigned long offset;
|
|
|
- struct page *page;
|
|
|
+ dma_addr_t pte_dma;
|
|
|
u32 *pte;
|
|
|
|
|
|
- pte = as_get_pte(as, iova, &page);
|
|
|
- if (!pte)
|
|
|
+ pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
|
|
|
+ if (!pte || !*pte)
|
|
|
return 0;
|
|
|
|
|
|
- offset = offset_in_page(pte);
|
|
|
- as_put_pte(as, iova);
|
|
|
-
|
|
|
- smmu->soc->ops->flush_dcache(page, offset, 4);
|
|
|
- smmu_flush_ptc(smmu, page, offset);
|
|
|
- smmu_flush_tlb_group(smmu, as->id, iova);
|
|
|
- smmu_flush(smmu);
|
|
|
+ tegra_smmu_set_pte(as, iova, pte, pte_dma, 0);
|
|
|
+ tegra_smmu_pte_put_use(as, iova);
|
|
|
|
|
|
return size;
|
|
|
}
|
|
@@ -587,11 +671,14 @@ static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
|
|
|
dma_addr_t iova)
|
|
|
{
|
|
|
struct tegra_smmu_as *as = to_smmu_as(domain);
|
|
|
- struct page *page;
|
|
|
unsigned long pfn;
|
|
|
+ dma_addr_t pte_dma;
|
|
|
u32 *pte;
|
|
|
|
|
|
- pte = as_get_pte(as, iova, &page);
|
|
|
+ pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
|
|
|
+ if (!pte || !*pte)
|
|
|
+ return 0;
|
|
|
+
|
|
|
pfn = *pte & as->smmu->pfn_mask;
|
|
|
|
|
|
return PFN_PHYS(pfn);
|
|
@@ -816,6 +903,9 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
|
|
|
smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
|
|
|
dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
|
|
|
mc->soc->num_address_bits, smmu->pfn_mask);
|
|
|
+ smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1;
|
|
|
+ dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
|
|
|
+ smmu->tlb_mask);
|
|
|
|
|
|
value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
|
|
|
|
|
@@ -825,14 +915,14 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
|
|
|
smmu_writel(smmu, value, SMMU_PTC_CONFIG);
|
|
|
|
|
|
value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
|
|
|
- SMMU_TLB_CONFIG_ACTIVE_LINES(0x20);
|
|
|
+ SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
|
|
|
|
|
|
if (soc->supports_round_robin_arbitration)
|
|
|
value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
|
|
|
|
|
|
smmu_writel(smmu, value, SMMU_TLB_CONFIG);
|
|
|
|
|
|
- smmu_flush_ptc(smmu, NULL, 0);
|
|
|
+ smmu_flush_ptc_all(smmu);
|
|
|
smmu_flush_tlb(smmu);
|
|
|
smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
|
|
|
smmu_flush(smmu);
|