|
@@ -204,6 +204,39 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_GENERIC_PENDING_IRQ
|
|
|
+static inline int irq_set_affinity_pending(struct irq_data *data,
|
|
|
+ const struct cpumask *dest)
|
|
|
+{
|
|
|
+ struct irq_desc *desc = irq_data_to_desc(data);
|
|
|
+
|
|
|
+ irqd_set_move_pending(data);
|
|
|
+ irq_copy_pending(desc, dest);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+#else
|
|
|
+static inline int irq_set_affinity_pending(struct irq_data *data,
|
|
|
+ const struct cpumask *dest)
|
|
|
+{
|
|
|
+ return -EBUSY;
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
+static int irq_try_set_affinity(struct irq_data *data,
|
|
|
+ const struct cpumask *dest, bool force)
|
|
|
+{
|
|
|
+ int ret = irq_do_set_affinity(data, dest, force);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * In case that the underlying vector management is busy and the
|
|
|
+ * architecture supports the generic pending mechanism then utilize
|
|
|
+ * this to avoid returning an error to user space.
|
|
|
+ */
|
|
|
+ if (ret == -EBUSY && !force)
|
|
|
+ ret = irq_set_affinity_pending(data, dest);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
|
|
|
bool force)
|
|
|
{
|
|
@@ -214,8 +247,8 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
|
|
|
if (!chip || !chip->irq_set_affinity)
|
|
|
return -EINVAL;
|
|
|
|
|
|
- if (irq_can_move_pcntxt(data)) {
|
|
|
- ret = irq_do_set_affinity(data, mask, force);
|
|
|
+ if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
|
|
|
+ ret = irq_try_set_affinity(data, mask, force);
|
|
|
} else {
|
|
|
irqd_set_move_pending(data);
|
|
|
irq_copy_pending(desc, mask);
|