|
@@ -455,6 +455,31 @@ void irq_set_default_host(struct irq_domain *domain)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(irq_set_default_host);
|
|
|
|
|
|
+static void irq_domain_clear_mapping(struct irq_domain *domain,
|
|
|
+ irq_hw_number_t hwirq)
|
|
|
+{
|
|
|
+ if (hwirq < domain->revmap_size) {
|
|
|
+ domain->linear_revmap[hwirq] = 0;
|
|
|
+ } else {
|
|
|
+ mutex_lock(&revmap_trees_mutex);
|
|
|
+ radix_tree_delete(&domain->revmap_tree, hwirq);
|
|
|
+ mutex_unlock(&revmap_trees_mutex);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void irq_domain_set_mapping(struct irq_domain *domain,
|
|
|
+ irq_hw_number_t hwirq,
|
|
|
+ struct irq_data *irq_data)
|
|
|
+{
|
|
|
+ if (hwirq < domain->revmap_size) {
|
|
|
+ domain->linear_revmap[hwirq] = irq_data->irq;
|
|
|
+ } else {
|
|
|
+ mutex_lock(&revmap_trees_mutex);
|
|
|
+ radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
|
|
|
+ mutex_unlock(&revmap_trees_mutex);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
|
|
|
{
|
|
|
struct irq_data *irq_data = irq_get_irq_data(irq);
|
|
@@ -483,13 +508,7 @@ void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
|
|
|
domain->mapcount--;
|
|
|
|
|
|
/* Clear reverse map for this hwirq */
|
|
|
- if (hwirq < domain->revmap_size) {
|
|
|
- domain->linear_revmap[hwirq] = 0;
|
|
|
- } else {
|
|
|
- mutex_lock(&revmap_trees_mutex);
|
|
|
- radix_tree_delete(&domain->revmap_tree, hwirq);
|
|
|
- mutex_unlock(&revmap_trees_mutex);
|
|
|
- }
|
|
|
+ irq_domain_clear_mapping(domain, hwirq);
|
|
|
}
|
|
|
|
|
|
int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
|
|
@@ -533,13 +552,7 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
|
|
|
}
|
|
|
|
|
|
domain->mapcount++;
|
|
|
- if (hwirq < domain->revmap_size) {
|
|
|
- domain->linear_revmap[hwirq] = virq;
|
|
|
- } else {
|
|
|
- mutex_lock(&revmap_trees_mutex);
|
|
|
- radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
|
|
|
- mutex_unlock(&revmap_trees_mutex);
|
|
|
- }
|
|
|
+ irq_domain_set_mapping(domain, hwirq, irq_data);
|
|
|
mutex_unlock(&irq_domain_mutex);
|
|
|
|
|
|
irq_clear_status_flags(virq, IRQ_NOREQUEST);
|
|
@@ -1138,16 +1151,9 @@ static void irq_domain_insert_irq(int virq)
|
|
|
|
|
|
for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
|
|
|
struct irq_domain *domain = data->domain;
|
|
|
- irq_hw_number_t hwirq = data->hwirq;
|
|
|
|
|
|
domain->mapcount++;
|
|
|
- if (hwirq < domain->revmap_size) {
|
|
|
- domain->linear_revmap[hwirq] = virq;
|
|
|
- } else {
|
|
|
- mutex_lock(&revmap_trees_mutex);
|
|
|
- radix_tree_insert(&domain->revmap_tree, hwirq, data);
|
|
|
- mutex_unlock(&revmap_trees_mutex);
|
|
|
- }
|
|
|
+ irq_domain_set_mapping(domain, data->hwirq, data);
|
|
|
|
|
|
/* If not already assigned, give the domain the chip's name */
|
|
|
if (!domain->name && data->chip)
|
|
@@ -1171,13 +1177,7 @@ static void irq_domain_remove_irq(int virq)
|
|
|
irq_hw_number_t hwirq = data->hwirq;
|
|
|
|
|
|
domain->mapcount--;
|
|
|
- if (hwirq < domain->revmap_size) {
|
|
|
- domain->linear_revmap[hwirq] = 0;
|
|
|
- } else {
|
|
|
- mutex_lock(&revmap_trees_mutex);
|
|
|
- radix_tree_delete(&domain->revmap_tree, hwirq);
|
|
|
- mutex_unlock(&revmap_trees_mutex);
|
|
|
- }
|
|
|
+ irq_domain_clear_mapping(domain, hwirq);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1362,7 +1362,8 @@ static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
|
|
|
unsigned int irq_base,
|
|
|
unsigned int nr_irqs)
|
|
|
{
|
|
|
- domain->ops->free(domain, irq_base, nr_irqs);
|
|
|
+ if (domain->ops->free)
|
|
|
+ domain->ops->free(domain, irq_base, nr_irqs);
|
|
|
}
|
|
|
|
|
|
int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
|
|
@@ -1448,6 +1449,175 @@ out_free_desc:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+/* The irq_data was moved, fix the revmap to refer to the new location */
|
|
|
+static void irq_domain_fix_revmap(struct irq_data *d)
|
|
|
+{
|
|
|
+ void **slot;
|
|
|
+
|
|
|
+ if (d->hwirq < d->domain->revmap_size)
|
|
|
+ return; /* Not using radix tree. */
|
|
|
+
|
|
|
+ /* Fix up the revmap. */
|
|
|
+ mutex_lock(&revmap_trees_mutex);
|
|
|
+ slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq);
|
|
|
+ if (slot)
|
|
|
+ radix_tree_replace_slot(&d->domain->revmap_tree, slot, d);
|
|
|
+ mutex_unlock(&revmap_trees_mutex);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * irq_domain_push_irq() - Push a domain in to the top of a hierarchy.
|
|
|
+ * @domain: Domain to push.
|
|
|
+ * @virq: Irq to push the domain in to.
|
|
|
+ * @arg: Passed to the irq_domain_ops alloc() function.
|
|
|
+ *
|
|
|
+ * For an already existing irqdomain hierarchy, as might be obtained
|
|
|
+ * via a call to pci_enable_msix(), add an additional domain to the
|
|
|
+ * head of the processing chain. Must be called before request_irq()
|
|
|
+ * has been called.
|
|
|
+ */
|
|
|
+int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg)
|
|
|
+{
|
|
|
+ struct irq_data *child_irq_data;
|
|
|
+ struct irq_data *root_irq_data = irq_get_irq_data(virq);
|
|
|
+ struct irq_desc *desc;
|
|
|
+ int rv = 0;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Check that no action has been set, which indicates the virq
|
|
|
+ * is in a state where this function doesn't have to deal with
|
|
|
+ * races between interrupt handling and maintaining the
|
|
|
+ * hierarchy. This will catch gross misuse. Attempting to
|
|
|
+ * make the check race free would require holding locks across
|
|
|
+ * calls to struct irq_domain_ops->alloc(), which could lead
|
|
|
+ * to deadlock, so we just do a simple check before starting.
|
|
|
+ */
|
|
|
+ desc = irq_to_desc(virq);
|
|
|
+ if (!desc)
|
|
|
+ return -EINVAL;
|
|
|
+ if (WARN_ON(desc->action))
|
|
|
+ return -EBUSY;
|
|
|
+
|
|
|
+ if (domain == NULL)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (WARN_ON(!irq_domain_is_hierarchy(domain)))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (domain->parent != root_irq_data->domain)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (!root_irq_data)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ child_irq_data = kzalloc_node(sizeof(*child_irq_data), GFP_KERNEL,
|
|
|
+ irq_data_get_node(root_irq_data));
|
|
|
+ if (!child_irq_data)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ mutex_lock(&irq_domain_mutex);
|
|
|
+
|
|
|
+ /* Copy the original irq_data. */
|
|
|
+ *child_irq_data = *root_irq_data;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Overwrite the root_irq_data, which is embedded in struct
|
|
|
+ * irq_desc, with values for this domain.
|
|
|
+ */
|
|
|
+ root_irq_data->parent_data = child_irq_data;
|
|
|
+ root_irq_data->domain = domain;
|
|
|
+ root_irq_data->mask = 0;
|
|
|
+ root_irq_data->hwirq = 0;
|
|
|
+ root_irq_data->chip = NULL;
|
|
|
+ root_irq_data->chip_data = NULL;
|
|
|
+
|
|
|
+ /* May (probably does) set hwirq, chip, etc. */
|
|
|
+ rv = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
|
|
|
+ if (rv) {
|
|
|
+ /* Restore the original irq_data. */
|
|
|
+ *root_irq_data = *child_irq_data;
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+
|
|
|
+ irq_domain_fix_revmap(child_irq_data);
|
|
|
+ irq_domain_set_mapping(domain, root_irq_data->hwirq, root_irq_data);
|
|
|
+
|
|
|
+error:
|
|
|
+ mutex_unlock(&irq_domain_mutex);
|
|
|
+
|
|
|
+ return rv;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(irq_domain_push_irq);
|
|
|
+
|
|
|
+/**
|
|
|
+ * irq_domain_pop_irq() - Remove a domain from the top of a hierarchy.
|
|
|
+ * @domain: Domain to remove.
|
|
|
+ * @virq: Irq to remove the domain from.
|
|
|
+ *
|
|
|
+ * Undo the effects of a call to irq_domain_push_irq(). Must be
|
|
|
+ * called either before request_irq() or after free_irq().
|
|
|
+ */
|
|
|
+int irq_domain_pop_irq(struct irq_domain *domain, int virq)
|
|
|
+{
|
|
|
+ struct irq_data *root_irq_data = irq_get_irq_data(virq);
|
|
|
+ struct irq_data *child_irq_data;
|
|
|
+ struct irq_data *tmp_irq_data;
|
|
|
+ struct irq_desc *desc;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Check that no action is set, which indicates the virq is in
|
|
|
+ * a state where this function doesn't have to deal with races
|
|
|
+ * between interrupt handling and maintaining the hierarchy.
|
|
|
+ * This will catch gross misuse. Attempting to make the check
|
|
|
+ * race free would require holding locks across calls to
|
|
|
+ * struct irq_domain_ops->free(), which could lead to
|
|
|
+ * deadlock, so we just do a simple check before starting.
|
|
|
+ */
|
|
|
+ desc = irq_to_desc(virq);
|
|
|
+ if (!desc)
|
|
|
+ return -EINVAL;
|
|
|
+ if (WARN_ON(desc->action))
|
|
|
+ return -EBUSY;
|
|
|
+
|
|
|
+ if (domain == NULL)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (!root_irq_data)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ tmp_irq_data = irq_domain_get_irq_data(domain, virq);
|
|
|
+
|
|
|
+ /* We can only "pop" if this domain is at the top of the list */
|
|
|
+ if (WARN_ON(root_irq_data != tmp_irq_data))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (WARN_ON(root_irq_data->domain != domain))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ child_irq_data = root_irq_data->parent_data;
|
|
|
+ if (WARN_ON(!child_irq_data))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ mutex_lock(&irq_domain_mutex);
|
|
|
+
|
|
|
+ root_irq_data->parent_data = NULL;
|
|
|
+
|
|
|
+ irq_domain_clear_mapping(domain, root_irq_data->hwirq);
|
|
|
+ irq_domain_free_irqs_hierarchy(domain, virq, 1);
|
|
|
+
|
|
|
+ /* Restore the original irq_data. */
|
|
|
+ *root_irq_data = *child_irq_data;
|
|
|
+
|
|
|
+ irq_domain_fix_revmap(root_irq_data);
|
|
|
+
|
|
|
+ mutex_unlock(&irq_domain_mutex);
|
|
|
+
|
|
|
+ kfree(child_irq_data);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(irq_domain_pop_irq);
|
|
|
+
|
|
|
/**
|
|
|
* irq_domain_free_irqs - Free IRQ number and associated data structures
|
|
|
* @virq: base IRQ number
|