|
@@ -594,6 +594,7 @@ retry:
|
|
|
|
|
|
list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
|
|
list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
|
|
struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
|
|
struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
|
|
|
|
+ bool target_vcpu_needs_kick = false;
|
|
|
|
|
|
spin_lock(&irq->irq_lock);
|
|
spin_lock(&irq->irq_lock);
|
|
|
|
|
|
@@ -664,11 +665,18 @@ retry:
|
|
list_del(&irq->ap_list);
|
|
list_del(&irq->ap_list);
|
|
irq->vcpu = target_vcpu;
|
|
irq->vcpu = target_vcpu;
|
|
list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
|
|
list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
|
|
|
|
+ target_vcpu_needs_kick = true;
|
|
}
|
|
}
|
|
|
|
|
|
spin_unlock(&irq->irq_lock);
|
|
spin_unlock(&irq->irq_lock);
|
|
spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
|
|
spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
|
|
spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags);
|
|
spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags);
|
|
|
|
+
|
|
|
|
+ if (target_vcpu_needs_kick) {
|
|
|
|
+ kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
|
|
|
|
+ kvm_vcpu_kick(target_vcpu);
|
|
|
|
+ }
|
|
|
|
+
|
|
goto retry;
|
|
goto retry;
|
|
}
|
|
}
|
|
|
|
|