|
@@ -35,6 +35,7 @@
|
|
*/
|
|
*/
|
|
DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
|
|
DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
|
|
static struct cpuinfo_arm64 boot_cpu_data;
|
|
static struct cpuinfo_arm64 boot_cpu_data;
|
|
|
|
+static bool mixed_endian_el0 = true;
|
|
|
|
|
|
static char *icache_policy_str[] = {
|
|
static char *icache_policy_str[] = {
|
|
[ICACHE_POLICY_RESERVED] = "RESERVED/UNKNOWN",
|
|
[ICACHE_POLICY_RESERVED] = "RESERVED/UNKNOWN",
|
|
@@ -68,6 +69,26 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
|
|
pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
|
|
pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+bool cpu_supports_mixed_endian_el0(void)
|
|
|
|
+{
|
|
|
|
+ return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+bool system_supports_mixed_endian_el0(void)
|
|
|
|
+{
|
|
|
|
+ return mixed_endian_el0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void update_mixed_endian_el0_support(struct cpuinfo_arm64 *info)
|
|
|
|
+{
|
|
|
|
+ mixed_endian_el0 &= id_aa64mmfr0_mixed_endian_el0(info->reg_id_aa64mmfr0);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void update_cpu_features(struct cpuinfo_arm64 *info)
|
|
|
|
+{
|
|
|
|
+ update_mixed_endian_el0_support(info);
|
|
|
|
+}
|
|
|
|
+
|
|
static int check_reg_mask(char *name, u64 mask, u64 boot, u64 cur, int cpu)
|
|
static int check_reg_mask(char *name, u64 mask, u64 boot, u64 cur, int cpu)
|
|
{
|
|
{
|
|
if ((boot & mask) == (cur & mask))
|
|
if ((boot & mask) == (cur & mask))
|
|
@@ -215,6 +236,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
|
|
cpuinfo_detect_icache_policy(info);
|
|
cpuinfo_detect_icache_policy(info);
|
|
|
|
|
|
check_local_cpu_errata();
|
|
check_local_cpu_errata();
|
|
|
|
+ update_cpu_features(info);
|
|
}
|
|
}
|
|
|
|
|
|
void cpuinfo_store_cpu(void)
|
|
void cpuinfo_store_cpu(void)
|