|
@@ -358,10 +358,15 @@ int kvm_perf_teardown(void);
|
|
|
|
|
|
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
|
|
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
|
|
|
|
|
|
|
|
+void __kvm_set_tpidr_el2(u64 tpidr_el2);
|
|
|
|
+DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
|
|
|
|
+
|
|
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
|
|
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
|
|
unsigned long hyp_stack_ptr,
|
|
unsigned long hyp_stack_ptr,
|
|
unsigned long vector_ptr)
|
|
unsigned long vector_ptr)
|
|
{
|
|
{
|
|
|
|
+ u64 tpidr_el2;
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Call initialization code, and switch to the full blown HYP code.
|
|
* Call initialization code, and switch to the full blown HYP code.
|
|
* If the cpucaps haven't been finalized yet, something has gone very
|
|
* If the cpucaps haven't been finalized yet, something has gone very
|
|
@@ -370,6 +375,16 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
|
|
*/
|
|
*/
|
|
BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
|
|
BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
|
|
__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
|
|
__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Calculate the raw per-cpu offset without a translation from the
|
|
|
|
+ * kernel's mapping to the linear mapping, and store it in tpidr_el2
|
|
|
|
+ * so that we can use adr_l to access per-cpu variables in EL2.
|
|
|
|
+ */
|
|
|
|
+ tpidr_el2 = (u64)this_cpu_ptr(&kvm_host_cpu_state)
|
|
|
|
+ - (u64)kvm_ksym_ref(kvm_host_cpu_state);
|
|
|
|
+
|
|
|
|
+ kvm_call_hyp(__kvm_set_tpidr_el2, tpidr_el2);
|
|
}
|
|
}
|
|
|
|
|
|
static inline void kvm_arch_hardware_unsetup(void) {}
|
|
static inline void kvm_arch_hardware_unsetup(void) {}
|