|
@@ -360,31 +360,91 @@ static inline unsigned int kvm_get_vmid_bits(void)
|
|
|
return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
|
|
+#ifdef CONFIG_KVM_INDIRECT_VECTORS
|
|
|
+/*
|
|
|
+ * EL2 vectors can be mapped and rerouted in a number of ways,
|
|
|
+ * depending on the kernel configuration and CPU present:
|
|
|
+ *
|
|
|
+ * - If the CPU has the ARM64_HARDEN_BRANCH_PREDICTOR cap, the
|
|
|
+ * hardening sequence is placed in one of the vector slots, which is
|
|
|
+ * executed before jumping to the real vectors.
|
|
|
+ *
|
|
|
+ * - If the CPU has both the ARM64_HARDEN_EL2_VECTORS cap and the
|
|
|
+ * ARM64_HARDEN_BRANCH_PREDICTOR cap, the slot containing the
|
|
|
+ * hardening sequence is mapped next to the idmap page, and executed
|
|
|
+ * before jumping to the real vectors.
|
|
|
+ *
|
|
|
+ * - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an
|
|
|
+ * empty slot is selected, mapped next to the idmap page, and
|
|
|
+ * executed before jumping to the real vectors.
|
|
|
+ *
|
|
|
+ * Note that ARM64_HARDEN_EL2_VECTORS is somewhat incompatible with
|
|
|
+ * VHE, as we don't have hypervisor-specific mappings. If the system
|
|
|
+ * is VHE and yet selects this capability, it will be ignored.
|
|
|
+ */
|
|
|
#include <asm/mmu.h>
|
|
|
|
|
|
+extern void *__kvm_bp_vect_base;
|
|
|
+extern int __kvm_harden_el2_vector_slot;
|
|
|
+
|
|
|
static inline void *kvm_get_hyp_vector(void)
|
|
|
{
|
|
|
struct bp_hardening_data *data = arm64_get_bp_hardening_data();
|
|
|
- void *vect = kvm_ksym_ref(__kvm_hyp_vector);
|
|
|
+ void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
|
|
|
+ int slot = -1;
|
|
|
|
|
|
- if (data->fn) {
|
|
|
- vect = __bp_harden_hyp_vecs_start +
|
|
|
- data->hyp_vectors_slot * SZ_2K;
|
|
|
+ if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
|
|
|
+ vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs_start));
|
|
|
+ slot = data->hyp_vectors_slot;
|
|
|
+ }
|
|
|
|
|
|
- if (!has_vhe())
|
|
|
- vect = lm_alias(vect);
|
|
|
+ if (this_cpu_has_cap(ARM64_HARDEN_EL2_VECTORS) && !has_vhe()) {
|
|
|
+ vect = __kvm_bp_vect_base;
|
|
|
+ if (slot == -1)
|
|
|
+ slot = __kvm_harden_el2_vector_slot;
|
|
|
}
|
|
|
|
|
|
- vect = kern_hyp_va(vect);
|
|
|
+ if (slot != -1)
|
|
|
+ vect += slot * SZ_2K;
|
|
|
+
|
|
|
return vect;
|
|
|
}
|
|
|
|
|
|
+/* This is only called on a !VHE system */
|
|
|
static inline int kvm_map_vectors(void)
|
|
|
{
|
|
|
+ /*
|
|
|
+ * HBP = ARM64_HARDEN_BRANCH_PREDICTOR
|
|
|
+ * HEL2 = ARM64_HARDEN_EL2_VECTORS
|
|
|
+ *
|
|
|
+ * !HBP + !HEL2 -> use direct vectors
|
|
|
+ * HBP + !HEL2 -> use hardened vectors in place
|
|
|
+ * !HBP + HEL2 -> allocate one vector slot and use exec mapping
|
|
|
+ * HBP + HEL2 -> use hardened vertors and use exec mapping
|
|
|
+ */
|
|
|
+ if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
|
|
|
+ __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs_start);
|
|
|
+ __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
|
|
|
+ phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs_start);
|
|
|
+ unsigned long size = (__bp_harden_hyp_vecs_end -
|
|
|
+ __bp_harden_hyp_vecs_start);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Always allocate a spare vector slot, as we don't
|
|
|
+ * know yet which CPUs have a BP hardening slot that
|
|
|
+ * we can reuse.
|
|
|
+ */
|
|
|
+ __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
|
|
|
+ BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
|
|
|
+ return create_hyp_exec_mappings(vect_pa, size,
|
|
|
+ &__kvm_bp_vect_base);
|
|
|
+ }
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
-
|
|
|
#else
|
|
|
static inline void *kvm_get_hyp_vector(void)
|
|
|
{
|