|
@@ -74,6 +74,8 @@ static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
|
|
|
__this_cpu_write(kvm_arm_running_vcpu, vcpu);
|
|
|
}
|
|
|
|
|
|
+DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
|
|
|
+
|
|
|
/**
|
|
|
* kvm_arm_get_running_vcpu - get the vcpu running on the current CPU.
|
|
|
* Must be called from non-preemptible context
|
|
@@ -302,6 +304,8 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
+ if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm)))
|
|
|
+ static_branch_dec(&userspace_irqchip_in_use);
|
|
|
kvm_arch_vcpu_free(vcpu);
|
|
|
}
|
|
|
|
|
@@ -522,14 +526,22 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
vcpu->arch.has_run_once = true;
|
|
|
|
|
|
- /*
|
|
|
- * Map the VGIC hardware resources before running a vcpu the first
|
|
|
- * time on this VM.
|
|
|
- */
|
|
|
- if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) {
|
|
|
- ret = kvm_vgic_map_resources(kvm);
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
+ if (likely(irqchip_in_kernel(kvm))) {
|
|
|
+ /*
|
|
|
+ * Map the VGIC hardware resources before running a vcpu the
|
|
|
+ * first time on this VM.
|
|
|
+ */
|
|
|
+ if (unlikely(!vgic_ready(kvm))) {
|
|
|
+ ret = kvm_vgic_map_resources(kvm);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ /*
|
|
|
+ * Tell the rest of the code that there are userspace irqchip
|
|
|
+ * VMs in the wild.
|
|
|
+ */
|
|
|
+ static_branch_inc(&userspace_irqchip_in_use);
|
|
|
}
|
|
|
|
|
|
ret = kvm_timer_enable(vcpu);
|
|
@@ -664,18 +676,29 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
|
kvm_vgic_flush_hwstate(vcpu);
|
|
|
|
|
|
/*
|
|
|
- * If we have a singal pending, or need to notify a userspace
|
|
|
- * irqchip about timer or PMU level changes, then we exit (and
|
|
|
- * update the timer level state in kvm_timer_update_run
|
|
|
- * below).
|
|
|
+ * Exit if we have a signal pending so that we can deliver the
|
|
|
+ * signal to user space.
|
|
|
*/
|
|
|
- if (signal_pending(current) ||
|
|
|
- kvm_timer_should_notify_user(vcpu) ||
|
|
|
- kvm_pmu_should_notify_user(vcpu)) {
|
|
|
+ if (signal_pending(current)) {
|
|
|
ret = -EINTR;
|
|
|
run->exit_reason = KVM_EXIT_INTR;
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * If we're using a userspace irqchip, then check if we need
|
|
|
+ * to tell a userspace irqchip about timer or PMU level
|
|
|
+ * changes and if so, exit to userspace (the actual level
|
|
|
+ * state gets updated in kvm_timer_update_run and
|
|
|
+ * kvm_pmu_update_run below).
|
|
|
+ */
|
|
|
+ if (static_branch_unlikely(&userspace_irqchip_in_use)) {
|
|
|
+ if (kvm_timer_should_notify_user(vcpu) ||
|
|
|
+ kvm_pmu_should_notify_user(vcpu)) {
|
|
|
+ ret = -EINTR;
|
|
|
+ run->exit_reason = KVM_EXIT_INTR;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* Ensure we set mode to IN_GUEST_MODE after we disable
|
|
|
* interrupts and before the final VCPU requests check.
|
|
@@ -688,7 +711,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
|
kvm_request_pending(vcpu)) {
|
|
|
vcpu->mode = OUTSIDE_GUEST_MODE;
|
|
|
kvm_pmu_sync_hwstate(vcpu);
|
|
|
- kvm_timer_sync_hwstate(vcpu);
|
|
|
+ if (static_branch_unlikely(&userspace_irqchip_in_use))
|
|
|
+ kvm_timer_sync_hwstate(vcpu);
|
|
|
kvm_vgic_sync_hwstate(vcpu);
|
|
|
local_irq_enable();
|
|
|
preempt_enable();
|
|
@@ -732,7 +756,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
|
* we don't want vtimer interrupts to race with syncing the
|
|
|
* timer virtual interrupt state.
|
|
|
*/
|
|
|
- kvm_timer_sync_hwstate(vcpu);
|
|
|
+ if (static_branch_unlikely(&userspace_irqchip_in_use))
|
|
|
+ kvm_timer_sync_hwstate(vcpu);
|
|
|
|
|
|
/*
|
|
|
* We may have taken a host interrupt in HYP mode (ie
|