|
@@ -40,6 +40,15 @@
|
|
|
#define L1_CACHE_SHIFT (6)
|
|
|
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
|
|
|
|
|
|
+
|
|
|
+#define CLIDR_LOUU_SHIFT 27
|
|
|
+#define CLIDR_LOC_SHIFT 24
|
|
|
+#define CLIDR_LOUIS_SHIFT 21
|
|
|
+
|
|
|
+#define CLIDR_LOUU(clidr) (((clidr) >> CLIDR_LOUU_SHIFT) & 0x7)
|
|
|
+#define CLIDR_LOC(clidr) (((clidr) >> CLIDR_LOC_SHIFT) & 0x7)
|
|
|
+#define CLIDR_LOUIS(clidr) (((clidr) >> CLIDR_LOUIS_SHIFT) & 0x7)
|
|
|
+
|
|
|
/*
|
|
|
* Memory returned by kmalloc() may be used for DMA, so we must make
|
|
|
* sure that all such allocations are cache aligned. Otherwise,
|
|
@@ -84,6 +93,37 @@ static inline int cache_line_size(void)
|
|
|
return cwg ? 4 << cwg : ARCH_DMA_MINALIGN;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Read the effective value of CTR_EL0.
|
|
|
+ *
|
|
|
+ * According to ARM ARM for ARMv8-A (ARM DDI 0487C.a),
|
|
|
+ * section D10.2.33 "CTR_EL0, Cache Type Register" :
|
|
|
+ *
|
|
|
+ * CTR_EL0.IDC reports the data cache clean requirements for
|
|
|
+ * instruction to data coherence.
|
|
|
+ *
|
|
|
+ * 0 - dcache clean to PoU is required unless :
|
|
|
+ * (CLIDR_EL1.LoC == 0) || (CLIDR_EL1.LoUIS == 0 && CLIDR_EL1.LoUU == 0)
|
|
|
+ * 1 - dcache clean to PoU is not required for i-to-d coherence.
|
|
|
+ *
|
|
|
+ * This routine provides the CTR_EL0 with the IDC field updated to the
|
|
|
+ * effective state.
|
|
|
+ */
|
|
|
+static inline u32 __attribute_const__ read_cpuid_effective_cachetype(void)
|
|
|
+{
|
|
|
+ u32 ctr = read_cpuid_cachetype();
|
|
|
+
|
|
|
+ if (!(ctr & BIT(CTR_IDC_SHIFT))) {
|
|
|
+ u64 clidr = read_sysreg(clidr_el1);
|
|
|
+
|
|
|
+ if (CLIDR_LOC(clidr) == 0 ||
|
|
|
+ (CLIDR_LOUIS(clidr) == 0 && CLIDR_LOUU(clidr) == 0))
|
|
|
+ ctr |= BIT(CTR_IDC_SHIFT);
|
|
|
+ }
|
|
|
+
|
|
|
+ return ctr;
|
|
|
+}
|
|
|
+
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
|
|
#endif
|