|
@@ -421,6 +421,36 @@ static void evtchn_unbind_from_user(struct per_user_data *u,
|
|
|
del_evtchn(u, evtchn);
|
|
|
}
|
|
|
|
|
|
+static DEFINE_PER_CPU(int, bind_last_selected_cpu);
|
|
|
+
|
|
|
+static void evtchn_bind_interdom_next_vcpu(int evtchn)
|
|
|
+{
|
|
|
+ unsigned int selected_cpu, irq;
|
|
|
+ struct irq_desc *desc;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ irq = irq_from_evtchn(evtchn);
|
|
|
+ desc = irq_to_desc(irq);
|
|
|
+
|
|
|
+ if (!desc)
|
|
|
+ return;
|
|
|
+
|
|
|
+ raw_spin_lock_irqsave(&desc->lock, flags);
|
|
|
+ selected_cpu = this_cpu_read(bind_last_selected_cpu);
|
|
|
+ selected_cpu = cpumask_next_and(selected_cpu,
|
|
|
+ desc->irq_common_data.affinity, cpu_online_mask);
|
|
|
+
|
|
|
+ if (unlikely(selected_cpu >= nr_cpu_ids))
|
|
|
+ selected_cpu = cpumask_first_and(desc->irq_common_data.affinity,
|
|
|
+ cpu_online_mask);
|
|
|
+
|
|
|
+ this_cpu_write(bind_last_selected_cpu, selected_cpu);
|
|
|
+
|
|
|
+ /* unmask expects irqs to be disabled */
|
|
|
+ xen_rebind_evtchn_to_cpu(evtchn, selected_cpu);
|
|
|
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
|
|
|
+}
|
|
|
+
|
|
|
static long evtchn_ioctl(struct file *file,
|
|
|
unsigned int cmd, unsigned long arg)
|
|
|
{
|
|
@@ -478,8 +508,10 @@ static long evtchn_ioctl(struct file *file,
|
|
|
break;
|
|
|
|
|
|
rc = evtchn_bind_to_user(u, bind_interdomain.local_port);
|
|
|
- if (rc == 0)
|
|
|
+ if (rc == 0) {
|
|
|
rc = bind_interdomain.local_port;
|
|
|
+ evtchn_bind_interdom_next_vcpu(rc);
|
|
|
+ }
|
|
|
break;
|
|
|
}
|
|
|
|