|
@@ -15,6 +15,7 @@
|
|
|
#include <linux/radix-tree.h>
|
|
|
#include <linux/bitmap.h>
|
|
|
#include <linux/irqdomain.h>
|
|
|
+#include <linux/sysfs.h>
|
|
|
|
|
|
#include "internals.h"
|
|
|
|
|
@@ -123,6 +124,181 @@ static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
|
|
|
|
|
|
#ifdef CONFIG_SPARSE_IRQ
|
|
|
|
|
|
+static void irq_kobj_release(struct kobject *kobj);
|
|
|
+
|
|
|
+#ifdef CONFIG_SYSFS
|
|
|
+static struct kobject *irq_kobj_base;
|
|
|
+
|
|
|
+#define IRQ_ATTR_RO(_name) \
|
|
|
+static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
|
|
|
+
|
|
|
+static ssize_t per_cpu_count_show(struct kobject *kobj,
|
|
|
+ struct kobj_attribute *attr, char *buf)
|
|
|
+{
|
|
|
+ struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
|
|
|
+ int cpu, irq = desc->irq_data.irq;
|
|
|
+ ssize_t ret = 0;
|
|
|
+ char *p = "";
|
|
|
+
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
|
+ unsigned int c = kstat_irqs_cpu(irq, cpu);
|
|
|
+
|
|
|
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c);
|
|
|
+ p = ",";
|
|
|
+ }
|
|
|
+
|
|
|
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+IRQ_ATTR_RO(per_cpu_count);
|
|
|
+
|
|
|
+static ssize_t chip_name_show(struct kobject *kobj,
|
|
|
+ struct kobj_attribute *attr, char *buf)
|
|
|
+{
|
|
|
+ struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
|
|
|
+ ssize_t ret = 0;
|
|
|
+
|
|
|
+ raw_spin_lock_irq(&desc->lock);
|
|
|
+ if (desc->irq_data.chip && desc->irq_data.chip->name) {
|
|
|
+ ret = scnprintf(buf, PAGE_SIZE, "%s\n",
|
|
|
+ desc->irq_data.chip->name);
|
|
|
+ }
|
|
|
+ raw_spin_unlock_irq(&desc->lock);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+IRQ_ATTR_RO(chip_name);
|
|
|
+
|
|
|
+static ssize_t hwirq_show(struct kobject *kobj,
|
|
|
+ struct kobj_attribute *attr, char *buf)
|
|
|
+{
|
|
|
+ struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
|
|
|
+ ssize_t ret = 0;
|
|
|
+
|
|
|
+ raw_spin_lock_irq(&desc->lock);
|
|
|
+ if (desc->irq_data.domain)
|
|
|
+ ret = sprintf(buf, "%d\n", (int)desc->irq_data.hwirq);
|
|
|
+ raw_spin_unlock_irq(&desc->lock);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+IRQ_ATTR_RO(hwirq);
|
|
|
+
|
|
|
+static ssize_t type_show(struct kobject *kobj,
|
|
|
+ struct kobj_attribute *attr, char *buf)
|
|
|
+{
|
|
|
+ struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
|
|
|
+ ssize_t ret = 0;
|
|
|
+
|
|
|
+ raw_spin_lock_irq(&desc->lock);
|
|
|
+ ret = sprintf(buf, "%s\n",
|
|
|
+ irqd_is_level_type(&desc->irq_data) ? "level" : "edge");
|
|
|
+ raw_spin_unlock_irq(&desc->lock);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+
|
|
|
+}
|
|
|
+IRQ_ATTR_RO(type);
|
|
|
+
|
|
|
+static ssize_t name_show(struct kobject *kobj,
|
|
|
+ struct kobj_attribute *attr, char *buf)
|
|
|
+{
|
|
|
+ struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
|
|
|
+ ssize_t ret = 0;
|
|
|
+
|
|
|
+ raw_spin_lock_irq(&desc->lock);
|
|
|
+ if (desc->name)
|
|
|
+ ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name);
|
|
|
+ raw_spin_unlock_irq(&desc->lock);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+IRQ_ATTR_RO(name);
|
|
|
+
|
|
|
+static ssize_t actions_show(struct kobject *kobj,
|
|
|
+ struct kobj_attribute *attr, char *buf)
|
|
|
+{
|
|
|
+ struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
|
|
|
+ struct irqaction *action;
|
|
|
+ ssize_t ret = 0;
|
|
|
+ char *p = "";
|
|
|
+
|
|
|
+ raw_spin_lock_irq(&desc->lock);
|
|
|
+ for (action = desc->action; action != NULL; action = action->next) {
|
|
|
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s",
|
|
|
+ p, action->name);
|
|
|
+ p = ",";
|
|
|
+ }
|
|
|
+ raw_spin_unlock_irq(&desc->lock);
|
|
|
+
|
|
|
+ if (ret)
|
|
|
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+IRQ_ATTR_RO(actions);
|
|
|
+
|
|
|
+static struct attribute *irq_attrs[] = {
|
|
|
+ &per_cpu_count_attr.attr,
|
|
|
+ &chip_name_attr.attr,
|
|
|
+ &hwirq_attr.attr,
|
|
|
+ &type_attr.attr,
|
|
|
+ &name_attr.attr,
|
|
|
+ &actions_attr.attr,
|
|
|
+ NULL
|
|
|
+};
|
|
|
+
|
|
|
+static struct kobj_type irq_kobj_type = {
|
|
|
+ .release = irq_kobj_release,
|
|
|
+ .sysfs_ops = &kobj_sysfs_ops,
|
|
|
+ .default_attrs = irq_attrs,
|
|
|
+};
|
|
|
+
|
|
|
+static void irq_sysfs_add(int irq, struct irq_desc *desc)
|
|
|
+{
|
|
|
+ if (irq_kobj_base) {
|
|
|
+ /*
|
|
|
+ * Continue even in case of failure as this is nothing
|
|
|
+ * crucial.
|
|
|
+ */
|
|
|
+ if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq))
|
|
|
+ pr_warn("Failed to add kobject for irq %d\n", irq);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static int __init irq_sysfs_init(void)
|
|
|
+{
|
|
|
+ struct irq_desc *desc;
|
|
|
+ int irq;
|
|
|
+
|
|
|
+ /* Prevent concurrent irq alloc/free */
|
|
|
+ irq_lock_sparse();
|
|
|
+
|
|
|
+ irq_kobj_base = kobject_create_and_add("irq", kernel_kobj);
|
|
|
+ if (!irq_kobj_base) {
|
|
|
+ irq_unlock_sparse();
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Add the already allocated interrupts */
|
|
|
+ for_each_irq_desc(irq, desc)
|
|
|
+ irq_sysfs_add(irq, desc);
|
|
|
+ irq_unlock_sparse();
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+postcore_initcall(irq_sysfs_init);
|
|
|
+
|
|
|
+#else /* !CONFIG_SYSFS */
|
|
|
+
|
|
|
+static struct kobj_type irq_kobj_type = {
|
|
|
+ .release = irq_kobj_release,
|
|
|
+};
|
|
|
+
|
|
|
+static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
|
|
|
+
|
|
|
+#endif /* CONFIG_SYSFS */
|
|
|
+
|
|
|
static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
|
|
|
|
|
|
static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
|
|
@@ -187,6 +363,7 @@ static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
|
|
|
|
|
|
desc_set_defaults(irq, desc, node, affinity, owner);
|
|
|
irqd_set(&desc->irq_data, flags);
|
|
|
+ kobject_init(&desc->kobj, &irq_kobj_type);
|
|
|
|
|
|
return desc;
|
|
|
|
|
@@ -197,15 +374,22 @@ err_desc:
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
-static void delayed_free_desc(struct rcu_head *rhp)
|
|
|
+static void irq_kobj_release(struct kobject *kobj)
|
|
|
{
|
|
|
- struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu);
|
|
|
+ struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
|
|
|
|
|
|
free_masks(desc);
|
|
|
free_percpu(desc->kstat_irqs);
|
|
|
kfree(desc);
|
|
|
}
|
|
|
|
|
|
+static void delayed_free_desc(struct rcu_head *rhp)
|
|
|
+{
|
|
|
+ struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu);
|
|
|
+
|
|
|
+ kobject_put(&desc->kobj);
|
|
|
+}
|
|
|
+
|
|
|
static void free_desc(unsigned int irq)
|
|
|
{
|
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
|
@@ -217,8 +401,12 @@ static void free_desc(unsigned int irq)
|
|
|
* kstat_irq_usr(). Once we deleted the descriptor from the
|
|
|
* sparse tree we can free it. Access in proc will fail to
|
|
|
* lookup the descriptor.
|
|
|
+ *
|
|
|
+ * The sysfs entry must be serialized against a concurrent
|
|
|
+ * irq_sysfs_init() as well.
|
|
|
*/
|
|
|
mutex_lock(&sparse_irq_lock);
|
|
|
+ kobject_del(&desc->kobj);
|
|
|
delete_irq_desc(irq);
|
|
|
mutex_unlock(&sparse_irq_lock);
|
|
|
|
|
@@ -261,6 +449,7 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node,
|
|
|
goto err;
|
|
|
mutex_lock(&sparse_irq_lock);
|
|
|
irq_insert_desc(start + i, desc);
|
|
|
+ irq_sysfs_add(start + i, desc);
|
|
|
mutex_unlock(&sparse_irq_lock);
|
|
|
}
|
|
|
return start;
|