|
@@ -113,6 +113,8 @@ static inline void __tlb_entry_erase(void)
|
|
|
write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
|
|
|
}
|
|
|
|
|
|
+#if (CONFIG_ARC_MMU_VER < 4)
|
|
|
+
|
|
|
static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)
|
|
|
{
|
|
|
unsigned int idx;
|
|
@@ -210,6 +212,28 @@ static void tlb_entry_insert(unsigned int pd0, unsigned int pd1)
|
|
|
write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
|
|
|
}
|
|
|
|
|
|
+#else /* CONFIG_ARC_MMU_VER >= 4) */
|
|
|
+
|
|
|
+static void utlb_invalidate(void)
|
|
|
+{
|
|
|
+ /* No need since uTLB is always in sync with JTLB */
|
|
|
+}
|
|
|
+
|
|
|
+static void tlb_entry_erase(unsigned int vaddr_n_asid)
|
|
|
+{
|
|
|
+ write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid | _PAGE_PRESENT);
|
|
|
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry);
|
|
|
+}
|
|
|
+
|
|
|
+static void tlb_entry_insert(unsigned int pd0, unsigned int pd1)
|
|
|
+{
|
|
|
+ write_aux_reg(ARC_REG_TLBPD0, pd0);
|
|
|
+ write_aux_reg(ARC_REG_TLBPD1, pd1);
|
|
|
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry);
|
|
|
+}
|
|
|
+
|
|
|
+#endif
|
|
|
+
|
|
|
/*
|
|
|
* Un-conditionally (without lookup) erase the entire MMU contents
|
|
|
*/
|
|
@@ -582,6 +606,17 @@ void read_decode_mmu_bcr(void)
|
|
|
#endif
|
|
|
} *mmu3;
|
|
|
|
|
|
+ struct bcr_mmu_4 {
|
|
|
+#ifdef CONFIG_CPU_BIG_ENDIAN
|
|
|
+ unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1,
|
|
|
+ n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3;
|
|
|
+#else
|
|
|
+ /* DTLB ITLB JES JE JA */
|
|
|
+ unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2,
|
|
|
+ pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8;
|
|
|
+#endif
|
|
|
+ } *mmu4;
|
|
|
+
|
|
|
tmp = read_aux_reg(ARC_REG_MMU_BCR);
|
|
|
mmu->ver = (tmp >> 24);
|
|
|
|
|
@@ -592,13 +627,21 @@ void read_decode_mmu_bcr(void)
|
|
|
mmu->ways = 1 << mmu2->ways;
|
|
|
mmu->u_dtlb = mmu2->u_dtlb;
|
|
|
mmu->u_itlb = mmu2->u_itlb;
|
|
|
- } else {
|
|
|
+ } else if (mmu->ver == 3) {
|
|
|
mmu3 = (struct bcr_mmu_3 *)&tmp;
|
|
|
mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
|
|
|
mmu->sets = 1 << mmu3->sets;
|
|
|
mmu->ways = 1 << mmu3->ways;
|
|
|
mmu->u_dtlb = mmu3->u_dtlb;
|
|
|
mmu->u_itlb = mmu3->u_itlb;
|
|
|
+ } else {
|
|
|
+ mmu4 = (struct bcr_mmu_4 *)&tmp;
|
|
|
+ mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
|
|
|
+ mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11);
|
|
|
+ mmu->sets = 64 << mmu4->n_entry;
|
|
|
+ mmu->ways = mmu4->n_ways * 2;
|
|
|
+ mmu->u_dtlb = mmu4->u_dtlb * 4;
|
|
|
+ mmu->u_itlb = mmu4->u_itlb * 4;
|
|
|
}
|
|
|
|
|
|
mmu->num_tlb = mmu->sets * mmu->ways;
|
|
@@ -608,10 +651,15 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
|
|
|
{
|
|
|
int n = 0;
|
|
|
struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu;
|
|
|
+ char super_pg[64] = "";
|
|
|
+
|
|
|
+ if (p_mmu->s_pg_sz_m)
|
|
|
+ scnprintf(super_pg, 64, "%dM Super Page%s, ",
|
|
|
+ p_mmu->s_pg_sz_m, " (not used)");
|
|
|
|
|
|
n += scnprintf(buf + n, len - n,
|
|
|
- "MMU [v%x]\t: %dk PAGE, JTLB %d (%dx%d), uDTLB %d, uITLB %d %s\n",
|
|
|
- p_mmu->ver, p_mmu->pg_sz_k,
|
|
|
+ "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d %s\n",
|
|
|
+ p_mmu->ver, p_mmu->pg_sz_k, super_pg,
|
|
|
p_mmu->num_tlb, p_mmu->sets, p_mmu->ways,
|
|
|
p_mmu->u_dtlb, p_mmu->u_itlb,
|
|
|
IS_ENABLED(CONFIG_ARC_MMU_SASID) ? ",SASID" : "");
|