|
|
@@ -19,6 +19,7 @@
|
|
|
#include <linux/arm-smccc.h>
|
|
|
#include <linux/psci.h>
|
|
|
#include <linux/types.h>
|
|
|
+#include <linux/cpu.h>
|
|
|
#include <asm/cpu.h>
|
|
|
#include <asm/cputype.h>
|
|
|
#include <asm/cpufeature.h>
|
|
|
@@ -87,7 +88,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
|
|
|
|
|
|
atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
|
|
|
|
|
|
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
|
|
#include <asm/mmu_context.h>
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
|
|
@@ -109,9 +109,9 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
|
|
|
__flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
|
|
|
}
|
|
|
|
|
|
-static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
|
|
|
- const char *hyp_vecs_start,
|
|
|
- const char *hyp_vecs_end)
|
|
|
+static void install_bp_hardening_cb(bp_hardening_cb_t fn,
|
|
|
+ const char *hyp_vecs_start,
|
|
|
+ const char *hyp_vecs_end)
|
|
|
{
|
|
|
static DEFINE_SPINLOCK(bp_lock);
|
|
|
int cpu, slot = -1;
|
|
|
@@ -138,7 +138,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
|
|
|
#define __smccc_workaround_1_smc_start NULL
|
|
|
#define __smccc_workaround_1_smc_end NULL
|
|
|
|
|
|
-static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
|
|
|
+static void install_bp_hardening_cb(bp_hardening_cb_t fn,
|
|
|
const char *hyp_vecs_start,
|
|
|
const char *hyp_vecs_end)
|
|
|
{
|
|
|
@@ -146,23 +146,6 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
|
|
|
}
|
|
|
#endif /* CONFIG_KVM_INDIRECT_VECTORS */
|
|
|
|
|
|
-static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
|
|
|
- bp_hardening_cb_t fn,
|
|
|
- const char *hyp_vecs_start,
|
|
|
- const char *hyp_vecs_end)
|
|
|
-{
|
|
|
- u64 pfr0;
|
|
|
-
|
|
|
- if (!entry->matches(entry, SCOPE_LOCAL_CPU))
|
|
|
- return;
|
|
|
-
|
|
|
- pfr0 = read_cpuid(ID_AA64PFR0_EL1);
|
|
|
- if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
|
|
|
- return;
|
|
|
-
|
|
|
- __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
|
|
|
-}
|
|
|
-
|
|
|
#include <uapi/linux/psci.h>
|
|
|
#include <linux/arm-smccc.h>
|
|
|
#include <linux/psci.h>
|
|
|
@@ -189,60 +172,83 @@ static void qcom_link_stack_sanitization(void)
|
|
|
: "=&r" (tmp));
|
|
|
}
|
|
|
|
|
|
-static void
|
|
|
-enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
|
|
|
+static bool __nospectre_v2;
|
|
|
+static int __init parse_nospectre_v2(char *str)
|
|
|
+{
|
|
|
+ __nospectre_v2 = true;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+early_param("nospectre_v2", parse_nospectre_v2);
|
|
|
+
|
|
|
+/*
|
|
|
+ * -1: No workaround
|
|
|
+ * 0: No workaround required
|
|
|
+ * 1: Workaround installed
|
|
|
+ */
|
|
|
+static int detect_harden_bp_fw(void)
|
|
|
{
|
|
|
bp_hardening_cb_t cb;
|
|
|
void *smccc_start, *smccc_end;
|
|
|
struct arm_smccc_res res;
|
|
|
u32 midr = read_cpuid_id();
|
|
|
|
|
|
- if (!entry->matches(entry, SCOPE_LOCAL_CPU))
|
|
|
- return;
|
|
|
-
|
|
|
if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
|
|
|
- return;
|
|
|
+ return -1;
|
|
|
|
|
|
switch (psci_ops.conduit) {
|
|
|
case PSCI_CONDUIT_HVC:
|
|
|
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
|
|
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
|
|
- if ((int)res.a0 < 0)
|
|
|
- return;
|
|
|
- cb = call_hvc_arch_workaround_1;
|
|
|
- /* This is a guest, no need to patch KVM vectors */
|
|
|
- smccc_start = NULL;
|
|
|
- smccc_end = NULL;
|
|
|
+ switch ((int)res.a0) {
|
|
|
+ case 1:
|
|
|
+ /* Firmware says we're just fine */
|
|
|
+ return 0;
|
|
|
+ case 0:
|
|
|
+ cb = call_hvc_arch_workaround_1;
|
|
|
+ /* This is a guest, no need to patch KVM vectors */
|
|
|
+ smccc_start = NULL;
|
|
|
+ smccc_end = NULL;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ return -1;
|
|
|
+ }
|
|
|
break;
|
|
|
|
|
|
case PSCI_CONDUIT_SMC:
|
|
|
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
|
|
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
|
|
- if ((int)res.a0 < 0)
|
|
|
- return;
|
|
|
- cb = call_smc_arch_workaround_1;
|
|
|
- smccc_start = __smccc_workaround_1_smc_start;
|
|
|
- smccc_end = __smccc_workaround_1_smc_end;
|
|
|
+ switch ((int)res.a0) {
|
|
|
+ case 1:
|
|
|
+ /* Firmware says we're just fine */
|
|
|
+ return 0;
|
|
|
+ case 0:
|
|
|
+ cb = call_smc_arch_workaround_1;
|
|
|
+ smccc_start = __smccc_workaround_1_smc_start;
|
|
|
+ smccc_end = __smccc_workaround_1_smc_end;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ return -1;
|
|
|
+ }
|
|
|
break;
|
|
|
|
|
|
default:
|
|
|
- return;
|
|
|
+ return -1;
|
|
|
}
|
|
|
|
|
|
if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
|
|
|
((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
|
|
|
cb = qcom_link_stack_sanitization;
|
|
|
|
|
|
- install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
|
|
|
+ if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
|
|
|
+ install_bp_hardening_cb(cb, smccc_start, smccc_end);
|
|
|
|
|
|
- return;
|
|
|
+ return 1;
|
|
|
}
|
|
|
-#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
|
|
|
|
|
|
-#ifdef CONFIG_ARM64_SSBD
|
|
|
DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
|
|
|
|
|
|
int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
|
|
|
+static bool __ssb_safe = true;
|
|
|
|
|
|
static const struct ssbd_options {
|
|
|
const char *str;
|
|
|
@@ -312,6 +318,19 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt,
|
|
|
|
|
|
void arm64_set_ssbd_mitigation(bool state)
|
|
|
{
|
|
|
+ if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
|
|
|
+ pr_info_once("SSBD disabled by kernel configuration\n");
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (this_cpu_has_cap(ARM64_SSBS)) {
|
|
|
+ if (state)
|
|
|
+ asm volatile(SET_PSTATE_SSBS(0));
|
|
|
+ else
|
|
|
+ asm volatile(SET_PSTATE_SSBS(1));
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
switch (psci_ops.conduit) {
|
|
|
case PSCI_CONDUIT_HVC:
|
|
|
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
|
|
|
@@ -333,11 +352,28 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
|
|
|
struct arm_smccc_res res;
|
|
|
bool required = true;
|
|
|
s32 val;
|
|
|
+ bool this_cpu_safe = false;
|
|
|
|
|
|
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
|
|
|
|
|
|
+ if (cpu_mitigations_off())
|
|
|
+ ssbd_state = ARM64_SSBD_FORCE_DISABLE;
|
|
|
+
|
|
|
+ /* delay setting __ssb_safe until we get a firmware response */
|
|
|
+ if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
|
|
|
+ this_cpu_safe = true;
|
|
|
+
|
|
|
+ if (this_cpu_has_cap(ARM64_SSBS)) {
|
|
|
+ if (!this_cpu_safe)
|
|
|
+ __ssb_safe = false;
|
|
|
+ required = false;
|
|
|
+ goto out_printmsg;
|
|
|
+ }
|
|
|
+
|
|
|
if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
|
|
|
ssbd_state = ARM64_SSBD_UNKNOWN;
|
|
|
+ if (!this_cpu_safe)
|
|
|
+ __ssb_safe = false;
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
@@ -354,6 +390,8 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
|
|
|
|
|
|
default:
|
|
|
ssbd_state = ARM64_SSBD_UNKNOWN;
|
|
|
+ if (!this_cpu_safe)
|
|
|
+ __ssb_safe = false;
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
@@ -362,14 +400,18 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
|
|
|
switch (val) {
|
|
|
case SMCCC_RET_NOT_SUPPORTED:
|
|
|
ssbd_state = ARM64_SSBD_UNKNOWN;
|
|
|
+ if (!this_cpu_safe)
|
|
|
+ __ssb_safe = false;
|
|
|
return false;
|
|
|
|
|
|
+ /* machines with mixed mitigation requirements must not return this */
|
|
|
case SMCCC_RET_NOT_REQUIRED:
|
|
|
pr_info_once("%s mitigation not required\n", entry->desc);
|
|
|
ssbd_state = ARM64_SSBD_MITIGATED;
|
|
|
return false;
|
|
|
|
|
|
case SMCCC_RET_SUCCESS:
|
|
|
+ __ssb_safe = false;
|
|
|
required = true;
|
|
|
break;
|
|
|
|
|
|
@@ -379,12 +421,13 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
|
|
|
|
|
|
default:
|
|
|
WARN_ON(1);
|
|
|
+ if (!this_cpu_safe)
|
|
|
+ __ssb_safe = false;
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
switch (ssbd_state) {
|
|
|
case ARM64_SSBD_FORCE_DISABLE:
|
|
|
- pr_info_once("%s disabled from command-line\n", entry->desc);
|
|
|
arm64_set_ssbd_mitigation(false);
|
|
|
required = false;
|
|
|
break;
|
|
|
@@ -397,7 +440,6 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
|
|
|
break;
|
|
|
|
|
|
case ARM64_SSBD_FORCE_ENABLE:
|
|
|
- pr_info_once("%s forced from command-line\n", entry->desc);
|
|
|
arm64_set_ssbd_mitigation(true);
|
|
|
required = true;
|
|
|
break;
|
|
|
@@ -407,9 +449,27 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
+out_printmsg:
|
|
|
+ switch (ssbd_state) {
|
|
|
+ case ARM64_SSBD_FORCE_DISABLE:
|
|
|
+ pr_info_once("%s disabled from command-line\n", entry->desc);
|
|
|
+ break;
|
|
|
+
|
|
|
+ case ARM64_SSBD_FORCE_ENABLE:
|
|
|
+ pr_info_once("%s forced from command-line\n", entry->desc);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
return required;
|
|
|
}
|
|
|
-#endif /* CONFIG_ARM64_SSBD */
|
|
|
+
|
|
|
+/* known invulnerable cores */
|
|
|
+static const struct midr_range arm64_ssb_cpus[] = {
|
|
|
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
|
|
|
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
|
|
|
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
|
|
|
+ {},
|
|
|
+};
|
|
|
|
|
|
#ifdef CONFIG_ARM64_ERRATUM_1463225
|
|
|
DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
|
|
|
@@ -464,6 +524,10 @@ has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
|
|
|
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
|
|
|
CAP_MIDR_RANGE_LIST(midr_list)
|
|
|
|
|
|
+/* Track overall mitigation state. We are only mitigated if all cores are ok */
|
|
|
+static bool __hardenbp_enab = true;
|
|
|
+static bool __spectrev2_safe = true;
|
|
|
+
|
|
|
/*
|
|
|
* Generic helper for handling capabilties with multiple (match,enable) pairs
|
|
|
* of call backs, sharing the same capability bit.
|
|
|
@@ -496,26 +560,63 @@ multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
|
|
|
caps->cpu_enable(caps);
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
|
|
-
|
|
|
/*
|
|
|
- * List of CPUs where we need to issue a psci call to
|
|
|
- * harden the branch predictor.
|
|
|
+ * List of CPUs that do not need any Spectre-v2 mitigation at all.
|
|
|
*/
|
|
|
-static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
|
|
|
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
|
|
|
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
|
|
|
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
|
|
|
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
|
|
|
- MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
|
|
|
- MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
|
|
|
- MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
|
|
|
- MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
|
|
|
- MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER),
|
|
|
- {},
|
|
|
+static const struct midr_range spectre_v2_safe_list[] = {
|
|
|
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
|
|
|
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
|
|
|
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
|
|
|
+ { /* sentinel */ }
|
|
|
};
|
|
|
|
|
|
-#endif
|
|
|
+/*
|
|
|
+ * Track overall bp hardening for all heterogeneous cores in the machine.
|
|
|
+ * We are only considered "safe" if all booted cores are known safe.
|
|
|
+ */
|
|
|
+static bool __maybe_unused
|
|
|
+check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
|
|
|
+{
|
|
|
+ int need_wa;
|
|
|
+
|
|
|
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
|
|
|
+
|
|
|
+ /* If the CPU has CSV2 set, we're safe */
|
|
|
+ if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
|
|
|
+ ID_AA64PFR0_CSV2_SHIFT))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ /* Alternatively, we have a list of unaffected CPUs */
|
|
|
+ if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ /* Fallback to firmware detection */
|
|
|
+ need_wa = detect_harden_bp_fw();
|
|
|
+ if (!need_wa)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ __spectrev2_safe = false;
|
|
|
+
|
|
|
+ if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
|
|
|
+ pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
|
|
|
+ __hardenbp_enab = false;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* forced off */
|
|
|
+ if (__nospectre_v2 || cpu_mitigations_off()) {
|
|
|
+ pr_info_once("spectrev2 mitigation disabled by command line option\n");
|
|
|
+ __hardenbp_enab = false;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (need_wa < 0) {
|
|
|
+ pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
|
|
|
+ __hardenbp_enab = false;
|
|
|
+ }
|
|
|
+
|
|
|
+ return (need_wa > 0);
|
|
|
+}
|
|
|
|
|
|
#ifdef CONFIG_HARDEN_EL2_VECTORS
|
|
|
|
|
|
@@ -674,13 +775,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|
|
ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
|
|
|
},
|
|
|
#endif
|
|
|
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
|
|
{
|
|
|
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
|
|
|
- .cpu_enable = enable_smccc_arch_workaround_1,
|
|
|
- ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
|
|
|
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
|
|
|
+ .matches = check_branch_predictor,
|
|
|
},
|
|
|
-#endif
|
|
|
#ifdef CONFIG_HARDEN_EL2_VECTORS
|
|
|
{
|
|
|
.desc = "EL2 vector hardening",
|
|
|
@@ -688,14 +787,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|
|
ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
|
|
|
},
|
|
|
#endif
|
|
|
-#ifdef CONFIG_ARM64_SSBD
|
|
|
{
|
|
|
.desc = "Speculative Store Bypass Disable",
|
|
|
.capability = ARM64_SSBD,
|
|
|
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
|
|
|
.matches = has_ssbd_mitigation,
|
|
|
+ .midr_range_list = arm64_ssb_cpus,
|
|
|
},
|
|
|
-#endif
|
|
|
#ifdef CONFIG_ARM64_ERRATUM_1463225
|
|
|
{
|
|
|
.desc = "ARM erratum 1463225",
|
|
|
@@ -707,3 +805,38 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|
|
{
|
|
|
}
|
|
|
};
|
|
|
+
|
|
|
+ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
|
|
|
+ char *buf)
|
|
|
+{
|
|
|
+ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
|
|
|
+}
|
|
|
+
|
|
|
+ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
|
|
|
+ char *buf)
|
|
|
+{
|
|
|
+ if (__spectrev2_safe)
|
|
|
+ return sprintf(buf, "Not affected\n");
|
|
|
+
|
|
|
+ if (__hardenbp_enab)
|
|
|
+ return sprintf(buf, "Mitigation: Branch predictor hardening\n");
|
|
|
+
|
|
|
+ return sprintf(buf, "Vulnerable\n");
|
|
|
+}
|
|
|
+
|
|
|
+ssize_t cpu_show_spec_store_bypass(struct device *dev,
|
|
|
+ struct device_attribute *attr, char *buf)
|
|
|
+{
|
|
|
+ if (__ssb_safe)
|
|
|
+ return sprintf(buf, "Not affected\n");
|
|
|
+
|
|
|
+ switch (ssbd_state) {
|
|
|
+ case ARM64_SSBD_KERNEL:
|
|
|
+ case ARM64_SSBD_FORCE_ENABLE:
|
|
|
+ if (IS_ENABLED(CONFIG_ARM64_SSBD))
|
|
|
+ return sprintf(buf,
|
|
|
+ "Mitigation: Speculative Store Bypass disabled via prctl\n");
|
|
|
+ }
|
|
|
+
|
|
|
+ return sprintf(buf, "Vulnerable\n");
|
|
|
+}
|