|
@@ -123,6 +123,7 @@ cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
|
|
* sync with the documentation of the CPU feature register ABI.
|
|
* sync with the documentation of the CPU feature register ABI.
|
|
*/
|
|
*/
|
|
static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
|
|
static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
|
|
|
|
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
|
|
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
|
|
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
|
|
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
|
|
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
|
|
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
|
|
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
|
|
@@ -148,6 +149,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
|
|
static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
|
|
static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
|
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
|
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
|
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
|
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
|
|
|
|
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0),
|
|
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
|
|
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
|
|
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
|
|
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
|
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0),
|
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0),
|
|
@@ -190,6 +192,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
|
|
};
|
|
};
|
|
|
|
|
|
static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
|
|
static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
|
|
|
|
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
|
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
|
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
|
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
|
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
|
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
|
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
|
|
@@ -199,12 +202,12 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
|
|
};
|
|
};
|
|
|
|
|
|
static const struct arm64_ftr_bits ftr_ctr[] = {
|
|
static const struct arm64_ftr_bits ftr_ctr[] = {
|
|
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
|
|
|
|
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 29, 1, 1), /* DIC */
|
|
|
|
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */
|
|
|
|
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
|
|
|
|
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 20, 4, 0), /* ERG */
|
|
|
|
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
|
|
|
|
|
|
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
|
|
|
|
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
|
|
|
|
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
|
|
|
|
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_CWG_SHIFT, 4, 0),
|
|
|
|
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_ERG_SHIFT, 4, 0),
|
|
|
|
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
|
|
/*
|
|
/*
|
|
* Linux can handle differing I-cache policies. Userspace JITs will
|
|
* Linux can handle differing I-cache policies. Userspace JITs will
|
|
* make use of *minLine.
|
|
* make use of *minLine.
|
|
@@ -506,6 +509,9 @@ static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
|
|
reg->user_mask = user_mask;
|
|
reg->user_mask = user_mask;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+extern const struct arm64_cpu_capabilities arm64_errata[];
|
|
|
|
+static void __init setup_boot_cpu_capabilities(void);
|
|
|
|
+
|
|
void __init init_cpu_features(struct cpuinfo_arm64 *info)
|
|
void __init init_cpu_features(struct cpuinfo_arm64 *info)
|
|
{
|
|
{
|
|
/* Before we start using the tables, make sure it is sorted */
|
|
/* Before we start using the tables, make sure it is sorted */
|
|
@@ -548,6 +554,12 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
|
|
init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
|
|
init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
|
|
sve_init_vq_map();
|
|
sve_init_vq_map();
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Detect and enable early CPU capabilities based on the boot CPU,
|
|
|
|
+ * after we have initialised the CPU feature infrastructure.
|
|
|
|
+ */
|
|
|
|
+ setup_boot_cpu_capabilities();
|
|
}
|
|
}
|
|
|
|
|
|
static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
|
|
static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
|
|
@@ -826,11 +838,6 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int _
|
|
MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
|
|
MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
|
|
}
|
|
}
|
|
|
|
|
|
-static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
|
|
|
|
-{
|
|
|
|
- return is_kernel_in_hyp_mode();
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry,
|
|
static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry,
|
|
int __unused)
|
|
int __unused)
|
|
{
|
|
{
|
|
@@ -852,14 +859,30 @@ static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unus
|
|
ID_AA64PFR0_FP_SHIFT) < 0;
|
|
ID_AA64PFR0_FP_SHIFT) < 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
|
|
|
|
+ int __unused)
|
|
|
|
+{
|
|
|
|
+ return read_sanitised_ftr_reg(SYS_CTR_EL0) & BIT(CTR_IDC_SHIFT);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
|
|
|
|
+ int __unused)
|
|
|
|
+{
|
|
|
|
+ return read_sanitised_ftr_reg(SYS_CTR_EL0) & BIT(CTR_DIC_SHIFT);
|
|
|
|
+}
|
|
|
|
+
|
|
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
|
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
|
static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
|
|
static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
|
|
|
|
|
|
static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
|
|
static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
|
|
- int __unused)
|
|
|
|
|
|
+ int scope)
|
|
{
|
|
{
|
|
|
|
+ /* List of CPUs that are not vulnerable and don't need KPTI */
|
|
|
|
+ static const struct midr_range kpti_safe_list[] = {
|
|
|
|
+ MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
|
|
|
|
+ MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
|
|
|
|
+ };
|
|
char const *str = "command line option";
|
|
char const *str = "command line option";
|
|
- u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
|
|
|
|
|
|
|
|
/*
|
|
/*
|
|
* For reasons that aren't entirely clear, enabling KPTI on Cavium
|
|
* For reasons that aren't entirely clear, enabling KPTI on Cavium
|
|
@@ -883,18 +906,15 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
|
|
return true;
|
|
return true;
|
|
|
|
|
|
/* Don't force KPTI for CPUs that are not vulnerable */
|
|
/* Don't force KPTI for CPUs that are not vulnerable */
|
|
- switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) {
|
|
|
|
- case MIDR_CAVIUM_THUNDERX2:
|
|
|
|
- case MIDR_BRCM_VULCAN:
|
|
|
|
|
|
+ if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
|
|
return false;
|
|
return false;
|
|
- }
|
|
|
|
|
|
|
|
/* Defer to CPU feature registers */
|
|
/* Defer to CPU feature registers */
|
|
- return !cpuid_feature_extract_unsigned_field(pfr0,
|
|
|
|
- ID_AA64PFR0_CSV3_SHIFT);
|
|
|
|
|
|
+ return !has_cpuid_feature(entry, scope);
|
|
}
|
|
}
|
|
|
|
|
|
-static int kpti_install_ng_mappings(void *__unused)
|
|
|
|
|
|
+static void
|
|
|
|
+kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
|
|
{
|
|
{
|
|
typedef void (kpti_remap_fn)(int, int, phys_addr_t);
|
|
typedef void (kpti_remap_fn)(int, int, phys_addr_t);
|
|
extern kpti_remap_fn idmap_kpti_install_ng_mappings;
|
|
extern kpti_remap_fn idmap_kpti_install_ng_mappings;
|
|
@@ -904,7 +924,7 @@ static int kpti_install_ng_mappings(void *__unused)
|
|
int cpu = smp_processor_id();
|
|
int cpu = smp_processor_id();
|
|
|
|
|
|
if (kpti_applied)
|
|
if (kpti_applied)
|
|
- return 0;
|
|
|
|
|
|
+ return;
|
|
|
|
|
|
remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
|
|
remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
|
|
|
|
|
|
@@ -915,7 +935,7 @@ static int kpti_install_ng_mappings(void *__unused)
|
|
if (!cpu)
|
|
if (!cpu)
|
|
kpti_applied = true;
|
|
kpti_applied = true;
|
|
|
|
|
|
- return 0;
|
|
|
|
|
|
+ return;
|
|
}
|
|
}
|
|
|
|
|
|
static int __init parse_kpti(char *str)
|
|
static int __init parse_kpti(char *str)
|
|
@@ -932,7 +952,78 @@ static int __init parse_kpti(char *str)
|
|
__setup("kpti=", parse_kpti);
|
|
__setup("kpti=", parse_kpti);
|
|
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
|
|
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
|
|
|
|
|
|
-static int cpu_copy_el2regs(void *__unused)
|
|
|
|
|
|
+#ifdef CONFIG_ARM64_HW_AFDBM
|
|
|
|
+static inline void __cpu_enable_hw_dbm(void)
|
|
|
|
+{
|
|
|
|
+ u64 tcr = read_sysreg(tcr_el1) | TCR_HD;
|
|
|
|
+
|
|
|
|
+ write_sysreg(tcr, tcr_el1);
|
|
|
|
+ isb();
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static bool cpu_has_broken_dbm(void)
|
|
|
|
+{
|
|
|
|
+ /* List of CPUs which have broken DBM support. */
|
|
|
|
+ static const struct midr_range cpus[] = {
|
|
|
|
+#ifdef CONFIG_ARM64_ERRATUM_1024718
|
|
|
|
+ MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0), // A55 r0p0 -r1p0
|
|
|
|
+#endif
|
|
|
|
+ {},
|
|
|
|
+ };
|
|
|
|
+
|
|
|
|
+ return is_midr_in_range_list(read_cpuid_id(), cpus);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap)
|
|
|
|
+{
|
|
|
|
+ return has_cpuid_feature(cap, SCOPE_LOCAL_CPU) &&
|
|
|
|
+ !cpu_has_broken_dbm();
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void cpu_enable_hw_dbm(struct arm64_cpu_capabilities const *cap)
|
|
|
|
+{
|
|
|
|
+ if (cpu_can_use_dbm(cap))
|
|
|
|
+ __cpu_enable_hw_dbm();
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap,
|
|
|
|
+ int __unused)
|
|
|
|
+{
|
|
|
|
+ static bool detected = false;
|
|
|
|
+ /*
|
|
|
|
+ * DBM is a non-conflicting feature. i.e, the kernel can safely
|
|
|
|
+ * run a mix of CPUs with and without the feature. So, we
|
|
|
|
+ * unconditionally enable the capability to allow any late CPU
|
|
|
|
+ * to use the feature. We only enable the control bits on the
|
|
|
|
+ * CPU, if it actually supports.
|
|
|
|
+ *
|
|
|
|
+ * We have to make sure we print the "feature" detection only
|
|
|
|
+ * when at least one CPU actually uses it. So check if this CPU
|
|
|
|
+ * can actually use it and print the message exactly once.
|
|
|
|
+ *
|
|
|
|
+ * This is safe as all CPUs (including secondary CPUs - due to the
|
|
|
|
+ * LOCAL_CPU scope - and the hotplugged CPUs - via verification)
|
|
|
|
+ * goes through the "matches" check exactly once. Also if a CPU
|
|
|
|
+ * matches the criteria, it is guaranteed that the CPU will turn
|
|
|
|
+ * the DBM on, as the capability is unconditionally enabled.
|
|
|
|
+ */
|
|
|
|
+ if (!detected && cpu_can_use_dbm(cap)) {
|
|
|
|
+ detected = true;
|
|
|
|
+ pr_info("detected: Hardware dirty bit management\n");
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return true;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_ARM64_VHE
|
|
|
|
+static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
|
|
|
|
+{
|
|
|
|
+ return is_kernel_in_hyp_mode();
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
|
|
{
|
|
{
|
|
/*
|
|
/*
|
|
* Copy register values that aren't redirected by hardware.
|
|
* Copy register values that aren't redirected by hardware.
|
|
@@ -944,15 +1035,14 @@ static int cpu_copy_el2regs(void *__unused)
|
|
*/
|
|
*/
|
|
if (!alternatives_applied)
|
|
if (!alternatives_applied)
|
|
write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
|
|
write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
|
|
-
|
|
|
|
- return 0;
|
|
|
|
}
|
|
}
|
|
|
|
+#endif
|
|
|
|
|
|
static const struct arm64_cpu_capabilities arm64_features[] = {
|
|
static const struct arm64_cpu_capabilities arm64_features[] = {
|
|
{
|
|
{
|
|
.desc = "GIC system register CPU interface",
|
|
.desc = "GIC system register CPU interface",
|
|
.capability = ARM64_HAS_SYSREG_GIC_CPUIF,
|
|
.capability = ARM64_HAS_SYSREG_GIC_CPUIF,
|
|
- .def_scope = SCOPE_SYSTEM,
|
|
|
|
|
|
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
|
.matches = has_useable_gicv3_cpuif,
|
|
.matches = has_useable_gicv3_cpuif,
|
|
.sys_reg = SYS_ID_AA64PFR0_EL1,
|
|
.sys_reg = SYS_ID_AA64PFR0_EL1,
|
|
.field_pos = ID_AA64PFR0_GIC_SHIFT,
|
|
.field_pos = ID_AA64PFR0_GIC_SHIFT,
|
|
@@ -963,20 +1053,20 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|
{
|
|
{
|
|
.desc = "Privileged Access Never",
|
|
.desc = "Privileged Access Never",
|
|
.capability = ARM64_HAS_PAN,
|
|
.capability = ARM64_HAS_PAN,
|
|
- .def_scope = SCOPE_SYSTEM,
|
|
|
|
|
|
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
|
.matches = has_cpuid_feature,
|
|
.matches = has_cpuid_feature,
|
|
.sys_reg = SYS_ID_AA64MMFR1_EL1,
|
|
.sys_reg = SYS_ID_AA64MMFR1_EL1,
|
|
.field_pos = ID_AA64MMFR1_PAN_SHIFT,
|
|
.field_pos = ID_AA64MMFR1_PAN_SHIFT,
|
|
.sign = FTR_UNSIGNED,
|
|
.sign = FTR_UNSIGNED,
|
|
.min_field_value = 1,
|
|
.min_field_value = 1,
|
|
- .enable = cpu_enable_pan,
|
|
|
|
|
|
+ .cpu_enable = cpu_enable_pan,
|
|
},
|
|
},
|
|
#endif /* CONFIG_ARM64_PAN */
|
|
#endif /* CONFIG_ARM64_PAN */
|
|
#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
|
|
#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
|
|
{
|
|
{
|
|
.desc = "LSE atomic instructions",
|
|
.desc = "LSE atomic instructions",
|
|
.capability = ARM64_HAS_LSE_ATOMICS,
|
|
.capability = ARM64_HAS_LSE_ATOMICS,
|
|
- .def_scope = SCOPE_SYSTEM,
|
|
|
|
|
|
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
|
.matches = has_cpuid_feature,
|
|
.matches = has_cpuid_feature,
|
|
.sys_reg = SYS_ID_AA64ISAR0_EL1,
|
|
.sys_reg = SYS_ID_AA64ISAR0_EL1,
|
|
.field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
|
|
.field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
|
|
@@ -987,14 +1077,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|
{
|
|
{
|
|
.desc = "Software prefetching using PRFM",
|
|
.desc = "Software prefetching using PRFM",
|
|
.capability = ARM64_HAS_NO_HW_PREFETCH,
|
|
.capability = ARM64_HAS_NO_HW_PREFETCH,
|
|
- .def_scope = SCOPE_SYSTEM,
|
|
|
|
|
|
+ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
|
|
.matches = has_no_hw_prefetch,
|
|
.matches = has_no_hw_prefetch,
|
|
},
|
|
},
|
|
#ifdef CONFIG_ARM64_UAO
|
|
#ifdef CONFIG_ARM64_UAO
|
|
{
|
|
{
|
|
.desc = "User Access Override",
|
|
.desc = "User Access Override",
|
|
.capability = ARM64_HAS_UAO,
|
|
.capability = ARM64_HAS_UAO,
|
|
- .def_scope = SCOPE_SYSTEM,
|
|
|
|
|
|
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
|
.matches = has_cpuid_feature,
|
|
.matches = has_cpuid_feature,
|
|
.sys_reg = SYS_ID_AA64MMFR2_EL1,
|
|
.sys_reg = SYS_ID_AA64MMFR2_EL1,
|
|
.field_pos = ID_AA64MMFR2_UAO_SHIFT,
|
|
.field_pos = ID_AA64MMFR2_UAO_SHIFT,
|
|
@@ -1008,21 +1098,23 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|
#ifdef CONFIG_ARM64_PAN
|
|
#ifdef CONFIG_ARM64_PAN
|
|
{
|
|
{
|
|
.capability = ARM64_ALT_PAN_NOT_UAO,
|
|
.capability = ARM64_ALT_PAN_NOT_UAO,
|
|
- .def_scope = SCOPE_SYSTEM,
|
|
|
|
|
|
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
|
.matches = cpufeature_pan_not_uao,
|
|
.matches = cpufeature_pan_not_uao,
|
|
},
|
|
},
|
|
#endif /* CONFIG_ARM64_PAN */
|
|
#endif /* CONFIG_ARM64_PAN */
|
|
|
|
+#ifdef CONFIG_ARM64_VHE
|
|
{
|
|
{
|
|
.desc = "Virtualization Host Extensions",
|
|
.desc = "Virtualization Host Extensions",
|
|
.capability = ARM64_HAS_VIRT_HOST_EXTN,
|
|
.capability = ARM64_HAS_VIRT_HOST_EXTN,
|
|
- .def_scope = SCOPE_SYSTEM,
|
|
|
|
|
|
+ .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
|
|
.matches = runs_at_el2,
|
|
.matches = runs_at_el2,
|
|
- .enable = cpu_copy_el2regs,
|
|
|
|
|
|
+ .cpu_enable = cpu_copy_el2regs,
|
|
},
|
|
},
|
|
|
|
+#endif /* CONFIG_ARM64_VHE */
|
|
{
|
|
{
|
|
.desc = "32-bit EL0 Support",
|
|
.desc = "32-bit EL0 Support",
|
|
.capability = ARM64_HAS_32BIT_EL0,
|
|
.capability = ARM64_HAS_32BIT_EL0,
|
|
- .def_scope = SCOPE_SYSTEM,
|
|
|
|
|
|
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
|
.matches = has_cpuid_feature,
|
|
.matches = has_cpuid_feature,
|
|
.sys_reg = SYS_ID_AA64PFR0_EL1,
|
|
.sys_reg = SYS_ID_AA64PFR0_EL1,
|
|
.sign = FTR_UNSIGNED,
|
|
.sign = FTR_UNSIGNED,
|
|
@@ -1032,22 +1124,30 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|
{
|
|
{
|
|
.desc = "Reduced HYP mapping offset",
|
|
.desc = "Reduced HYP mapping offset",
|
|
.capability = ARM64_HYP_OFFSET_LOW,
|
|
.capability = ARM64_HYP_OFFSET_LOW,
|
|
- .def_scope = SCOPE_SYSTEM,
|
|
|
|
|
|
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
|
.matches = hyp_offset_low,
|
|
.matches = hyp_offset_low,
|
|
},
|
|
},
|
|
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
|
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
|
{
|
|
{
|
|
.desc = "Kernel page table isolation (KPTI)",
|
|
.desc = "Kernel page table isolation (KPTI)",
|
|
.capability = ARM64_UNMAP_KERNEL_AT_EL0,
|
|
.capability = ARM64_UNMAP_KERNEL_AT_EL0,
|
|
- .def_scope = SCOPE_SYSTEM,
|
|
|
|
|
|
+ .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
|
|
|
|
+ /*
|
|
|
|
+ * The ID feature fields below are used to indicate that
|
|
|
|
+ * the CPU doesn't need KPTI. See unmap_kernel_at_el0 for
|
|
|
|
+ * more details.
|
|
|
|
+ */
|
|
|
|
+ .sys_reg = SYS_ID_AA64PFR0_EL1,
|
|
|
|
+ .field_pos = ID_AA64PFR0_CSV3_SHIFT,
|
|
|
|
+ .min_field_value = 1,
|
|
.matches = unmap_kernel_at_el0,
|
|
.matches = unmap_kernel_at_el0,
|
|
- .enable = kpti_install_ng_mappings,
|
|
|
|
|
|
+ .cpu_enable = kpti_install_ng_mappings,
|
|
},
|
|
},
|
|
#endif
|
|
#endif
|
|
{
|
|
{
|
|
/* FP/SIMD is not implemented */
|
|
/* FP/SIMD is not implemented */
|
|
.capability = ARM64_HAS_NO_FPSIMD,
|
|
.capability = ARM64_HAS_NO_FPSIMD,
|
|
- .def_scope = SCOPE_SYSTEM,
|
|
|
|
|
|
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
|
.min_field_value = 0,
|
|
.min_field_value = 0,
|
|
.matches = has_no_fpsimd,
|
|
.matches = has_no_fpsimd,
|
|
},
|
|
},
|
|
@@ -1055,7 +1155,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|
{
|
|
{
|
|
.desc = "Data cache clean to Point of Persistence",
|
|
.desc = "Data cache clean to Point of Persistence",
|
|
.capability = ARM64_HAS_DCPOP,
|
|
.capability = ARM64_HAS_DCPOP,
|
|
- .def_scope = SCOPE_SYSTEM,
|
|
|
|
|
|
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
|
.matches = has_cpuid_feature,
|
|
.matches = has_cpuid_feature,
|
|
.sys_reg = SYS_ID_AA64ISAR1_EL1,
|
|
.sys_reg = SYS_ID_AA64ISAR1_EL1,
|
|
.field_pos = ID_AA64ISAR1_DPB_SHIFT,
|
|
.field_pos = ID_AA64ISAR1_DPB_SHIFT,
|
|
@@ -1065,42 +1165,74 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|
#ifdef CONFIG_ARM64_SVE
|
|
#ifdef CONFIG_ARM64_SVE
|
|
{
|
|
{
|
|
.desc = "Scalable Vector Extension",
|
|
.desc = "Scalable Vector Extension",
|
|
|
|
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
|
.capability = ARM64_SVE,
|
|
.capability = ARM64_SVE,
|
|
- .def_scope = SCOPE_SYSTEM,
|
|
|
|
.sys_reg = SYS_ID_AA64PFR0_EL1,
|
|
.sys_reg = SYS_ID_AA64PFR0_EL1,
|
|
.sign = FTR_UNSIGNED,
|
|
.sign = FTR_UNSIGNED,
|
|
.field_pos = ID_AA64PFR0_SVE_SHIFT,
|
|
.field_pos = ID_AA64PFR0_SVE_SHIFT,
|
|
.min_field_value = ID_AA64PFR0_SVE,
|
|
.min_field_value = ID_AA64PFR0_SVE,
|
|
.matches = has_cpuid_feature,
|
|
.matches = has_cpuid_feature,
|
|
- .enable = sve_kernel_enable,
|
|
|
|
|
|
+ .cpu_enable = sve_kernel_enable,
|
|
},
|
|
},
|
|
#endif /* CONFIG_ARM64_SVE */
|
|
#endif /* CONFIG_ARM64_SVE */
|
|
#ifdef CONFIG_ARM64_RAS_EXTN
|
|
#ifdef CONFIG_ARM64_RAS_EXTN
|
|
{
|
|
{
|
|
.desc = "RAS Extension Support",
|
|
.desc = "RAS Extension Support",
|
|
.capability = ARM64_HAS_RAS_EXTN,
|
|
.capability = ARM64_HAS_RAS_EXTN,
|
|
- .def_scope = SCOPE_SYSTEM,
|
|
|
|
|
|
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
|
.matches = has_cpuid_feature,
|
|
.matches = has_cpuid_feature,
|
|
.sys_reg = SYS_ID_AA64PFR0_EL1,
|
|
.sys_reg = SYS_ID_AA64PFR0_EL1,
|
|
.sign = FTR_UNSIGNED,
|
|
.sign = FTR_UNSIGNED,
|
|
.field_pos = ID_AA64PFR0_RAS_SHIFT,
|
|
.field_pos = ID_AA64PFR0_RAS_SHIFT,
|
|
.min_field_value = ID_AA64PFR0_RAS_V1,
|
|
.min_field_value = ID_AA64PFR0_RAS_V1,
|
|
- .enable = cpu_clear_disr,
|
|
|
|
|
|
+ .cpu_enable = cpu_clear_disr,
|
|
},
|
|
},
|
|
#endif /* CONFIG_ARM64_RAS_EXTN */
|
|
#endif /* CONFIG_ARM64_RAS_EXTN */
|
|
|
|
+ {
|
|
|
|
+ .desc = "Data cache clean to the PoU not required for I/D coherence",
|
|
|
|
+ .capability = ARM64_HAS_CACHE_IDC,
|
|
|
|
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
|
|
|
+ .matches = has_cache_idc,
|
|
|
|
+ },
|
|
|
|
+ {
|
|
|
|
+ .desc = "Instruction cache invalidation not required for I/D coherence",
|
|
|
|
+ .capability = ARM64_HAS_CACHE_DIC,
|
|
|
|
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
|
|
|
+ .matches = has_cache_dic,
|
|
|
|
+ },
|
|
|
|
+#ifdef CONFIG_ARM64_HW_AFDBM
|
|
|
|
+ {
|
|
|
|
+ /*
|
|
|
|
+ * Since we turn this on always, we don't want the user to
|
|
|
|
+ * think that the feature is available when it may not be.
|
|
|
|
+ * So hide the description.
|
|
|
|
+ *
|
|
|
|
+ * .desc = "Hardware pagetable Dirty Bit Management",
|
|
|
|
+ *
|
|
|
|
+ */
|
|
|
|
+ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
|
|
|
|
+ .capability = ARM64_HW_DBM,
|
|
|
|
+ .sys_reg = SYS_ID_AA64MMFR1_EL1,
|
|
|
|
+ .sign = FTR_UNSIGNED,
|
|
|
|
+ .field_pos = ID_AA64MMFR1_HADBS_SHIFT,
|
|
|
|
+ .min_field_value = 2,
|
|
|
|
+ .matches = has_hw_dbm,
|
|
|
|
+ .cpu_enable = cpu_enable_hw_dbm,
|
|
|
|
+ },
|
|
|
|
+#endif
|
|
{},
|
|
{},
|
|
};
|
|
};
|
|
|
|
|
|
-#define HWCAP_CAP(reg, field, s, min_value, type, cap) \
|
|
|
|
|
|
+#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
|
|
{ \
|
|
{ \
|
|
.desc = #cap, \
|
|
.desc = #cap, \
|
|
- .def_scope = SCOPE_SYSTEM, \
|
|
|
|
|
|
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE, \
|
|
.matches = has_cpuid_feature, \
|
|
.matches = has_cpuid_feature, \
|
|
.sys_reg = reg, \
|
|
.sys_reg = reg, \
|
|
.field_pos = field, \
|
|
.field_pos = field, \
|
|
.sign = s, \
|
|
.sign = s, \
|
|
.min_field_value = min_value, \
|
|
.min_field_value = min_value, \
|
|
- .hwcap_type = type, \
|
|
|
|
|
|
+ .hwcap_type = cap_type, \
|
|
.hwcap = cap, \
|
|
.hwcap = cap, \
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1118,14 +1250,18 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
|
|
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4),
|
|
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4),
|
|
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP),
|
|
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP),
|
|
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDFHM),
|
|
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDFHM),
|
|
|
|
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FLAGM),
|
|
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
|
|
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
|
|
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
|
|
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
|
|
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
|
|
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
|
|
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
|
|
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
|
|
|
|
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_DIT),
|
|
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_DCPOP),
|
|
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_DCPOP),
|
|
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT),
|
|
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT),
|
|
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
|
|
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
|
|
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
|
|
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
|
|
|
|
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC),
|
|
|
|
+ HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT),
|
|
#ifdef CONFIG_ARM64_SVE
|
|
#ifdef CONFIG_ARM64_SVE
|
|
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
|
|
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
|
|
#endif
|
|
#endif
|
|
@@ -1193,7 +1329,7 @@ static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
|
|
/* We support emulation of accesses to CPU ID feature registers */
|
|
/* We support emulation of accesses to CPU ID feature registers */
|
|
elf_hwcap |= HWCAP_CPUID;
|
|
elf_hwcap |= HWCAP_CPUID;
|
|
for (; hwcaps->matches; hwcaps++)
|
|
for (; hwcaps->matches; hwcaps++)
|
|
- if (hwcaps->matches(hwcaps, hwcaps->def_scope))
|
|
|
|
|
|
+ if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps)))
|
|
cap_set_elf_hwcap(hwcaps);
|
|
cap_set_elf_hwcap(hwcaps);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1210,17 +1346,19 @@ static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
|
|
return false;
|
|
return false;
|
|
|
|
|
|
for (caps = cap_array; caps->matches; caps++)
|
|
for (caps = cap_array; caps->matches; caps++)
|
|
- if (caps->capability == cap &&
|
|
|
|
- caps->matches(caps, SCOPE_LOCAL_CPU))
|
|
|
|
- return true;
|
|
|
|
|
|
+ if (caps->capability == cap)
|
|
|
|
+ return caps->matches(caps, SCOPE_LOCAL_CPU);
|
|
|
|
+
|
|
return false;
|
|
return false;
|
|
}
|
|
}
|
|
|
|
|
|
-void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
|
|
|
|
- const char *info)
|
|
|
|
|
|
+static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
|
|
|
|
+ u16 scope_mask, const char *info)
|
|
{
|
|
{
|
|
|
|
+ scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
|
|
for (; caps->matches; caps++) {
|
|
for (; caps->matches; caps++) {
|
|
- if (!caps->matches(caps, caps->def_scope))
|
|
|
|
|
|
+ if (!(caps->type & scope_mask) ||
|
|
|
|
+ !caps->matches(caps, cpucap_default_scope(caps)))
|
|
continue;
|
|
continue;
|
|
|
|
|
|
if (!cpus_have_cap(caps->capability) && caps->desc)
|
|
if (!cpus_have_cap(caps->capability) && caps->desc)
|
|
@@ -1229,41 +1367,145 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void update_cpu_capabilities(u16 scope_mask)
|
|
|
|
+{
|
|
|
|
+ __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
|
|
|
|
+ __update_cpu_capabilities(arm64_errata, scope_mask,
|
|
|
|
+ "enabling workaround for");
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int __enable_cpu_capability(void *arg)
|
|
|
|
+{
|
|
|
|
+ const struct arm64_cpu_capabilities *cap = arg;
|
|
|
|
+
|
|
|
|
+ cap->cpu_enable(cap);
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Run through the enabled capabilities and enable() it on all active
|
|
* Run through the enabled capabilities and enable() it on all active
|
|
* CPUs
|
|
* CPUs
|
|
*/
|
|
*/
|
|
-void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
|
|
|
|
|
|
+static void __init
|
|
|
|
+__enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
|
|
|
|
+ u16 scope_mask)
|
|
{
|
|
{
|
|
|
|
+ scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
|
|
for (; caps->matches; caps++) {
|
|
for (; caps->matches; caps++) {
|
|
unsigned int num = caps->capability;
|
|
unsigned int num = caps->capability;
|
|
|
|
|
|
- if (!cpus_have_cap(num))
|
|
|
|
|
|
+ if (!(caps->type & scope_mask) || !cpus_have_cap(num))
|
|
continue;
|
|
continue;
|
|
|
|
|
|
/* Ensure cpus_have_const_cap(num) works */
|
|
/* Ensure cpus_have_const_cap(num) works */
|
|
static_branch_enable(&cpu_hwcap_keys[num]);
|
|
static_branch_enable(&cpu_hwcap_keys[num]);
|
|
|
|
|
|
- if (caps->enable) {
|
|
|
|
|
|
+ if (caps->cpu_enable) {
|
|
/*
|
|
/*
|
|
- * Use stop_machine() as it schedules the work allowing
|
|
|
|
- * us to modify PSTATE, instead of on_each_cpu() which
|
|
|
|
- * uses an IPI, giving us a PSTATE that disappears when
|
|
|
|
- * we return.
|
|
|
|
|
|
+ * Capabilities with SCOPE_BOOT_CPU scope are finalised
|
|
|
|
+ * before any secondary CPU boots. Thus, each secondary
|
|
|
|
+ * will enable the capability as appropriate via
|
|
|
|
+ * check_local_cpu_capabilities(). The only exception is
|
|
|
|
+ * the boot CPU, for which the capability must be
|
|
|
|
+ * enabled here. This approach avoids costly
|
|
|
|
+ * stop_machine() calls for this case.
|
|
|
|
+ *
|
|
|
|
+ * Otherwise, use stop_machine() as it schedules the
|
|
|
|
+ * work allowing us to modify PSTATE, instead of
|
|
|
|
+ * on_each_cpu() which uses an IPI, giving us a PSTATE
|
|
|
|
+ * that disappears when we return.
|
|
*/
|
|
*/
|
|
- stop_machine(caps->enable, (void *)caps, cpu_online_mask);
|
|
|
|
|
|
+ if (scope_mask & SCOPE_BOOT_CPU)
|
|
|
|
+ caps->cpu_enable(caps);
|
|
|
|
+ else
|
|
|
|
+ stop_machine(__enable_cpu_capability,
|
|
|
|
+ (void *)caps, cpu_online_mask);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void __init enable_cpu_capabilities(u16 scope_mask)
|
|
|
|
+{
|
|
|
|
+ __enable_cpu_capabilities(arm64_features, scope_mask);
|
|
|
|
+ __enable_cpu_capabilities(arm64_errata, scope_mask);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Run through the list of capabilities to check for conflicts.
|
|
|
|
+ * If the system has already detected a capability, take necessary
|
|
|
|
+ * action on this CPU.
|
|
|
|
+ *
|
|
|
|
+ * Returns "false" on conflicts.
|
|
|
|
+ */
|
|
|
|
+static bool
|
|
|
|
+__verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps,
|
|
|
|
+ u16 scope_mask)
|
|
|
|
+{
|
|
|
|
+ bool cpu_has_cap, system_has_cap;
|
|
|
|
+
|
|
|
|
+ scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
|
|
|
|
+
|
|
|
|
+ for (; caps->matches; caps++) {
|
|
|
|
+ if (!(caps->type & scope_mask))
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU);
|
|
|
|
+ system_has_cap = cpus_have_cap(caps->capability);
|
|
|
|
+
|
|
|
|
+ if (system_has_cap) {
|
|
|
|
+ /*
|
|
|
|
+ * Check if the new CPU misses an advertised feature,
|
|
|
|
+ * which is not safe to miss.
|
|
|
|
+ */
|
|
|
|
+ if (!cpu_has_cap && !cpucap_late_cpu_optional(caps))
|
|
|
|
+ break;
|
|
|
|
+ /*
|
|
|
|
+ * We have to issue cpu_enable() irrespective of
|
|
|
|
+ * whether the CPU has it or not, as it is enabeld
|
|
|
|
+ * system wide. It is upto the call back to take
|
|
|
|
+ * appropriate action on this CPU.
|
|
|
|
+ */
|
|
|
|
+ if (caps->cpu_enable)
|
|
|
|
+ caps->cpu_enable(caps);
|
|
|
|
+ } else {
|
|
|
|
+ /*
|
|
|
|
+ * Check if the CPU has this capability if it isn't
|
|
|
|
+ * safe to have when the system doesn't.
|
|
|
|
+ */
|
|
|
|
+ if (cpu_has_cap && !cpucap_late_cpu_permitted(caps))
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (caps->matches) {
|
|
|
|
+ pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n",
|
|
|
|
+ smp_processor_id(), caps->capability,
|
|
|
|
+ caps->desc, system_has_cap, cpu_has_cap);
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return true;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static bool verify_local_cpu_caps(u16 scope_mask)
|
|
|
|
+{
|
|
|
|
+ return __verify_local_cpu_caps(arm64_errata, scope_mask) &&
|
|
|
|
+ __verify_local_cpu_caps(arm64_features, scope_mask);
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Check for CPU features that are used in early boot
|
|
* Check for CPU features that are used in early boot
|
|
* based on the Boot CPU value.
|
|
* based on the Boot CPU value.
|
|
*/
|
|
*/
|
|
static void check_early_cpu_features(void)
|
|
static void check_early_cpu_features(void)
|
|
{
|
|
{
|
|
- verify_cpu_run_el();
|
|
|
|
verify_cpu_asid_bits();
|
|
verify_cpu_asid_bits();
|
|
|
|
+ /*
|
|
|
|
+ * Early features are used by the kernel already. If there
|
|
|
|
+ * is a conflict, we cannot proceed further.
|
|
|
|
+ */
|
|
|
|
+ if (!verify_local_cpu_caps(SCOPE_BOOT_CPU))
|
|
|
|
+ cpu_panic_kernel();
|
|
}
|
|
}
|
|
|
|
|
|
static void
|
|
static void
|
|
@@ -1278,27 +1520,6 @@ verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-static void
|
|
|
|
-verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list)
|
|
|
|
-{
|
|
|
|
- const struct arm64_cpu_capabilities *caps = caps_list;
|
|
|
|
- for (; caps->matches; caps++) {
|
|
|
|
- if (!cpus_have_cap(caps->capability))
|
|
|
|
- continue;
|
|
|
|
- /*
|
|
|
|
- * If the new CPU misses an advertised feature, we cannot proceed
|
|
|
|
- * further, park the cpu.
|
|
|
|
- */
|
|
|
|
- if (!__this_cpu_has_cap(caps_list, caps->capability)) {
|
|
|
|
- pr_crit("CPU%d: missing feature: %s\n",
|
|
|
|
- smp_processor_id(), caps->desc);
|
|
|
|
- cpu_die_early();
|
|
|
|
- }
|
|
|
|
- if (caps->enable)
|
|
|
|
- caps->enable((void *)caps);
|
|
|
|
- }
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static void verify_sve_features(void)
|
|
static void verify_sve_features(void)
|
|
{
|
|
{
|
|
u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
|
|
u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
|
|
@@ -1316,6 +1537,7 @@ static void verify_sve_features(void)
|
|
/* Add checks on other ZCR bits here if necessary */
|
|
/* Add checks on other ZCR bits here if necessary */
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Run through the enabled system capabilities and enable() it on this CPU.
|
|
* Run through the enabled system capabilities and enable() it on this CPU.
|
|
* The capabilities were decided based on the available CPUs at the boot time.
|
|
* The capabilities were decided based on the available CPUs at the boot time.
|
|
@@ -1326,8 +1548,14 @@ static void verify_sve_features(void)
|
|
*/
|
|
*/
|
|
static void verify_local_cpu_capabilities(void)
|
|
static void verify_local_cpu_capabilities(void)
|
|
{
|
|
{
|
|
- verify_local_cpu_errata_workarounds();
|
|
|
|
- verify_local_cpu_features(arm64_features);
|
|
|
|
|
|
+ /*
|
|
|
|
+ * The capabilities with SCOPE_BOOT_CPU are checked from
|
|
|
|
+ * check_early_cpu_features(), as they need to be verified
|
|
|
|
+ * on all secondary CPUs.
|
|
|
|
+ */
|
|
|
|
+ if (!verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU))
|
|
|
|
+ cpu_die_early();
|
|
|
|
+
|
|
verify_local_elf_hwcaps(arm64_elf_hwcaps);
|
|
verify_local_elf_hwcaps(arm64_elf_hwcaps);
|
|
|
|
|
|
if (system_supports_32bit_el0())
|
|
if (system_supports_32bit_el0())
|
|
@@ -1335,9 +1563,6 @@ static void verify_local_cpu_capabilities(void)
|
|
|
|
|
|
if (system_supports_sve())
|
|
if (system_supports_sve())
|
|
verify_sve_features();
|
|
verify_sve_features();
|
|
-
|
|
|
|
- if (system_uses_ttbr0_pan())
|
|
|
|
- pr_info("Emulating Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
|
|
|
|
}
|
|
}
|
|
|
|
|
|
void check_local_cpu_capabilities(void)
|
|
void check_local_cpu_capabilities(void)
|
|
@@ -1350,20 +1575,22 @@ void check_local_cpu_capabilities(void)
|
|
|
|
|
|
/*
|
|
/*
|
|
* If we haven't finalised the system capabilities, this CPU gets
|
|
* If we haven't finalised the system capabilities, this CPU gets
|
|
- * a chance to update the errata work arounds.
|
|
|
|
|
|
+ * a chance to update the errata work arounds and local features.
|
|
* Otherwise, this CPU should verify that it has all the system
|
|
* Otherwise, this CPU should verify that it has all the system
|
|
* advertised capabilities.
|
|
* advertised capabilities.
|
|
*/
|
|
*/
|
|
if (!sys_caps_initialised)
|
|
if (!sys_caps_initialised)
|
|
- update_cpu_errata_workarounds();
|
|
|
|
|
|
+ update_cpu_capabilities(SCOPE_LOCAL_CPU);
|
|
else
|
|
else
|
|
verify_local_cpu_capabilities();
|
|
verify_local_cpu_capabilities();
|
|
}
|
|
}
|
|
|
|
|
|
-static void __init setup_feature_capabilities(void)
|
|
|
|
|
|
+static void __init setup_boot_cpu_capabilities(void)
|
|
{
|
|
{
|
|
- update_cpu_capabilities(arm64_features, "detected feature:");
|
|
|
|
- enable_cpu_capabilities(arm64_features);
|
|
|
|
|
|
+ /* Detect capabilities with either SCOPE_BOOT_CPU or SCOPE_LOCAL_CPU */
|
|
|
|
+ update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU);
|
|
|
|
+ /* Enable the SCOPE_BOOT_CPU capabilities alone right away */
|
|
|
|
+ enable_cpu_capabilities(SCOPE_BOOT_CPU);
|
|
}
|
|
}
|
|
|
|
|
|
DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
|
|
DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
|
|
@@ -1382,20 +1609,33 @@ bool this_cpu_has_cap(unsigned int cap)
|
|
__this_cpu_has_cap(arm64_errata, cap));
|
|
__this_cpu_has_cap(arm64_errata, cap));
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void __init setup_system_capabilities(void)
|
|
|
|
+{
|
|
|
|
+ /*
|
|
|
|
+ * We have finalised the system-wide safe feature
|
|
|
|
+ * registers, finalise the capabilities that depend
|
|
|
|
+ * on it. Also enable all the available capabilities,
|
|
|
|
+ * that are not enabled already.
|
|
|
|
+ */
|
|
|
|
+ update_cpu_capabilities(SCOPE_SYSTEM);
|
|
|
|
+ enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
|
|
|
|
+}
|
|
|
|
+
|
|
void __init setup_cpu_features(void)
|
|
void __init setup_cpu_features(void)
|
|
{
|
|
{
|
|
u32 cwg;
|
|
u32 cwg;
|
|
int cls;
|
|
int cls;
|
|
|
|
|
|
- /* Set the CPU feature capabilies */
|
|
|
|
- setup_feature_capabilities();
|
|
|
|
- enable_errata_workarounds();
|
|
|
|
|
|
+ setup_system_capabilities();
|
|
mark_const_caps_ready();
|
|
mark_const_caps_ready();
|
|
setup_elf_hwcaps(arm64_elf_hwcaps);
|
|
setup_elf_hwcaps(arm64_elf_hwcaps);
|
|
|
|
|
|
if (system_supports_32bit_el0())
|
|
if (system_supports_32bit_el0())
|
|
setup_elf_hwcaps(compat_elf_hwcaps);
|
|
setup_elf_hwcaps(compat_elf_hwcaps);
|
|
|
|
|
|
|
|
+ if (system_uses_ttbr0_pan())
|
|
|
|
+ pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
|
|
|
|
+
|
|
sve_setup();
|
|
sve_setup();
|
|
|
|
|
|
/* Advertise that we have computed the system capabilities */
|
|
/* Advertise that we have computed the system capabilities */
|
|
@@ -1518,10 +1758,8 @@ static int __init enable_mrs_emulation(void)
|
|
|
|
|
|
core_initcall(enable_mrs_emulation);
|
|
core_initcall(enable_mrs_emulation);
|
|
|
|
|
|
-int cpu_clear_disr(void *__unused)
|
|
|
|
|
|
+void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
|
|
{
|
|
{
|
|
/* Firmware may have left a deferred SError in this register. */
|
|
/* Firmware may have left a deferred SError in this register. */
|
|
write_sysreg_s(0, SYS_DISR_EL1);
|
|
write_sysreg_s(0, SYS_DISR_EL1);
|
|
-
|
|
|
|
- return 0;
|
|
|
|
}
|
|
}
|