proc.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504
  1. /*
  2. * linux/kernel/irq/proc.c
  3. *
  4. * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
  5. *
  6. * This file contains the /proc/irq/ handling code.
  7. */
  8. #include <linux/irq.h>
  9. #include <linux/gfp.h>
  10. #include <linux/proc_fs.h>
  11. #include <linux/seq_file.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/kernel_stat.h>
  14. #include "internals.h"
  15. /*
  16. * Access rules:
  17. *
  18. * procfs protects read/write of /proc/irq/N/ files against a
  19. * concurrent free of the interrupt descriptor. remove_proc_entry()
  20. * immediately prevents new read/writes to happen and waits for
  21. * already running read/write functions to complete.
  22. *
  23. * We remove the proc entries first and then delete the interrupt
  24. * descriptor from the radix tree and free it. So it is guaranteed
  25. * that irq_to_desc(N) is valid as long as the read/writes are
  26. * permitted by procfs.
  27. *
  28. * The read from /proc/interrupts is a different problem because there
  29. * is no protection. So the lookup and the access to irqdesc
  30. * information must be protected by sparse_irq_lock.
  31. */
  32. static struct proc_dir_entry *root_irq_dir;
  33. #ifdef CONFIG_SMP
  34. static int show_irq_affinity(int type, struct seq_file *m, void *v)
  35. {
  36. struct irq_desc *desc = irq_to_desc((long)m->private);
  37. const struct cpumask *mask = desc->irq_data.affinity;
  38. #ifdef CONFIG_GENERIC_PENDING_IRQ
  39. if (irqd_is_setaffinity_pending(&desc->irq_data))
  40. mask = desc->pending_mask;
  41. #endif
  42. if (type)
  43. seq_cpumask_list(m, mask);
  44. else
  45. seq_cpumask(m, mask);
  46. seq_putc(m, '\n');
  47. return 0;
  48. }
  49. static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
  50. {
  51. struct irq_desc *desc = irq_to_desc((long)m->private);
  52. unsigned long flags;
  53. cpumask_var_t mask;
  54. if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
  55. return -ENOMEM;
  56. raw_spin_lock_irqsave(&desc->lock, flags);
  57. if (desc->affinity_hint)
  58. cpumask_copy(mask, desc->affinity_hint);
  59. raw_spin_unlock_irqrestore(&desc->lock, flags);
  60. seq_cpumask(m, mask);
  61. seq_putc(m, '\n');
  62. free_cpumask_var(mask);
  63. return 0;
  64. }
  65. #ifndef is_affinity_mask_valid
  66. #define is_affinity_mask_valid(val) 1
  67. #endif
  68. int no_irq_affinity;
  69. static int irq_affinity_proc_show(struct seq_file *m, void *v)
  70. {
  71. return show_irq_affinity(0, m, v);
  72. }
  73. static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
  74. {
  75. return show_irq_affinity(1, m, v);
  76. }
  77. static ssize_t write_irq_affinity(int type, struct file *file,
  78. const char __user *buffer, size_t count, loff_t *pos)
  79. {
  80. unsigned int irq = (int)(long)PDE_DATA(file_inode(file));
  81. cpumask_var_t new_value;
  82. int err;
  83. if (!irq_can_set_affinity(irq) || no_irq_affinity)
  84. return -EIO;
  85. if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
  86. return -ENOMEM;
  87. if (type)
  88. err = cpumask_parselist_user(buffer, count, new_value);
  89. else
  90. err = cpumask_parse_user(buffer, count, new_value);
  91. if (err)
  92. goto free_cpumask;
  93. if (!is_affinity_mask_valid(new_value)) {
  94. err = -EINVAL;
  95. goto free_cpumask;
  96. }
  97. /*
  98. * Do not allow disabling IRQs completely - it's a too easy
  99. * way to make the system unusable accidentally :-) At least
  100. * one online CPU still has to be targeted.
  101. */
  102. if (!cpumask_intersects(new_value, cpu_online_mask)) {
  103. /* Special case for empty set - allow the architecture
  104. code to set default SMP affinity. */
  105. err = irq_select_affinity_usr(irq, new_value) ? -EINVAL : count;
  106. } else {
  107. irq_set_affinity(irq, new_value);
  108. err = count;
  109. }
  110. free_cpumask:
  111. free_cpumask_var(new_value);
  112. return err;
  113. }
  114. static ssize_t irq_affinity_proc_write(struct file *file,
  115. const char __user *buffer, size_t count, loff_t *pos)
  116. {
  117. return write_irq_affinity(0, file, buffer, count, pos);
  118. }
  119. static ssize_t irq_affinity_list_proc_write(struct file *file,
  120. const char __user *buffer, size_t count, loff_t *pos)
  121. {
  122. return write_irq_affinity(1, file, buffer, count, pos);
  123. }
  124. static int irq_affinity_proc_open(struct inode *inode, struct file *file)
  125. {
  126. return single_open(file, irq_affinity_proc_show, PDE_DATA(inode));
  127. }
  128. static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
  129. {
  130. return single_open(file, irq_affinity_list_proc_show, PDE_DATA(inode));
  131. }
  132. static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file)
  133. {
  134. return single_open(file, irq_affinity_hint_proc_show, PDE_DATA(inode));
  135. }
  136. static const struct file_operations irq_affinity_proc_fops = {
  137. .open = irq_affinity_proc_open,
  138. .read = seq_read,
  139. .llseek = seq_lseek,
  140. .release = single_release,
  141. .write = irq_affinity_proc_write,
  142. };
  143. static const struct file_operations irq_affinity_hint_proc_fops = {
  144. .open = irq_affinity_hint_proc_open,
  145. .read = seq_read,
  146. .llseek = seq_lseek,
  147. .release = single_release,
  148. };
  149. static const struct file_operations irq_affinity_list_proc_fops = {
  150. .open = irq_affinity_list_proc_open,
  151. .read = seq_read,
  152. .llseek = seq_lseek,
  153. .release = single_release,
  154. .write = irq_affinity_list_proc_write,
  155. };
  156. static int default_affinity_show(struct seq_file *m, void *v)
  157. {
  158. seq_cpumask(m, irq_default_affinity);
  159. seq_putc(m, '\n');
  160. return 0;
  161. }
  162. static ssize_t default_affinity_write(struct file *file,
  163. const char __user *buffer, size_t count, loff_t *ppos)
  164. {
  165. cpumask_var_t new_value;
  166. int err;
  167. if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
  168. return -ENOMEM;
  169. err = cpumask_parse_user(buffer, count, new_value);
  170. if (err)
  171. goto out;
  172. if (!is_affinity_mask_valid(new_value)) {
  173. err = -EINVAL;
  174. goto out;
  175. }
  176. /*
  177. * Do not allow disabling IRQs completely - it's a too easy
  178. * way to make the system unusable accidentally :-) At least
  179. * one online CPU still has to be targeted.
  180. */
  181. if (!cpumask_intersects(new_value, cpu_online_mask)) {
  182. err = -EINVAL;
  183. goto out;
  184. }
  185. cpumask_copy(irq_default_affinity, new_value);
  186. err = count;
  187. out:
  188. free_cpumask_var(new_value);
  189. return err;
  190. }
  191. static int default_affinity_open(struct inode *inode, struct file *file)
  192. {
  193. return single_open(file, default_affinity_show, PDE_DATA(inode));
  194. }
  195. static const struct file_operations default_affinity_proc_fops = {
  196. .open = default_affinity_open,
  197. .read = seq_read,
  198. .llseek = seq_lseek,
  199. .release = single_release,
  200. .write = default_affinity_write,
  201. };
  202. static int irq_node_proc_show(struct seq_file *m, void *v)
  203. {
  204. struct irq_desc *desc = irq_to_desc((long) m->private);
  205. seq_printf(m, "%d\n", desc->irq_data.node);
  206. return 0;
  207. }
  208. static int irq_node_proc_open(struct inode *inode, struct file *file)
  209. {
  210. return single_open(file, irq_node_proc_show, PDE_DATA(inode));
  211. }
  212. static const struct file_operations irq_node_proc_fops = {
  213. .open = irq_node_proc_open,
  214. .read = seq_read,
  215. .llseek = seq_lseek,
  216. .release = single_release,
  217. };
  218. #endif
  219. static int irq_spurious_proc_show(struct seq_file *m, void *v)
  220. {
  221. struct irq_desc *desc = irq_to_desc((long) m->private);
  222. seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
  223. desc->irq_count, desc->irqs_unhandled,
  224. jiffies_to_msecs(desc->last_unhandled));
  225. return 0;
  226. }
  227. static int irq_spurious_proc_open(struct inode *inode, struct file *file)
  228. {
  229. return single_open(file, irq_spurious_proc_show, PDE_DATA(inode));
  230. }
  231. static const struct file_operations irq_spurious_proc_fops = {
  232. .open = irq_spurious_proc_open,
  233. .read = seq_read,
  234. .llseek = seq_lseek,
  235. .release = single_release,
  236. };
  237. #define MAX_NAMELEN 128
  238. static int name_unique(unsigned int irq, struct irqaction *new_action)
  239. {
  240. struct irq_desc *desc = irq_to_desc(irq);
  241. struct irqaction *action;
  242. unsigned long flags;
  243. int ret = 1;
  244. raw_spin_lock_irqsave(&desc->lock, flags);
  245. for (action = desc->action ; action; action = action->next) {
  246. if ((action != new_action) && action->name &&
  247. !strcmp(new_action->name, action->name)) {
  248. ret = 0;
  249. break;
  250. }
  251. }
  252. raw_spin_unlock_irqrestore(&desc->lock, flags);
  253. return ret;
  254. }
  255. void register_handler_proc(unsigned int irq, struct irqaction *action)
  256. {
  257. char name [MAX_NAMELEN];
  258. struct irq_desc *desc = irq_to_desc(irq);
  259. if (!desc->dir || action->dir || !action->name ||
  260. !name_unique(irq, action))
  261. return;
  262. memset(name, 0, MAX_NAMELEN);
  263. snprintf(name, MAX_NAMELEN, "%s", action->name);
  264. /* create /proc/irq/1234/handler/ */
  265. action->dir = proc_mkdir(name, desc->dir);
  266. }
  267. #undef MAX_NAMELEN
  268. #define MAX_NAMELEN 10
  269. void register_irq_proc(unsigned int irq, struct irq_desc *desc)
  270. {
  271. char name [MAX_NAMELEN];
  272. if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
  273. return;
  274. memset(name, 0, MAX_NAMELEN);
  275. sprintf(name, "%d", irq);
  276. /* create /proc/irq/1234 */
  277. desc->dir = proc_mkdir(name, root_irq_dir);
  278. if (!desc->dir)
  279. return;
  280. #ifdef CONFIG_SMP
  281. /* create /proc/irq/<irq>/smp_affinity */
  282. proc_create_data("smp_affinity", 0644, desc->dir,
  283. &irq_affinity_proc_fops, (void *)(long)irq);
  284. /* create /proc/irq/<irq>/affinity_hint */
  285. proc_create_data("affinity_hint", 0444, desc->dir,
  286. &irq_affinity_hint_proc_fops, (void *)(long)irq);
  287. /* create /proc/irq/<irq>/smp_affinity_list */
  288. proc_create_data("smp_affinity_list", 0644, desc->dir,
  289. &irq_affinity_list_proc_fops, (void *)(long)irq);
  290. proc_create_data("node", 0444, desc->dir,
  291. &irq_node_proc_fops, (void *)(long)irq);
  292. #endif
  293. proc_create_data("spurious", 0444, desc->dir,
  294. &irq_spurious_proc_fops, (void *)(long)irq);
  295. }
  296. void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
  297. {
  298. char name [MAX_NAMELEN];
  299. if (!root_irq_dir || !desc->dir)
  300. return;
  301. #ifdef CONFIG_SMP
  302. remove_proc_entry("smp_affinity", desc->dir);
  303. remove_proc_entry("affinity_hint", desc->dir);
  304. remove_proc_entry("smp_affinity_list", desc->dir);
  305. remove_proc_entry("node", desc->dir);
  306. #endif
  307. remove_proc_entry("spurious", desc->dir);
  308. memset(name, 0, MAX_NAMELEN);
  309. sprintf(name, "%u", irq);
  310. remove_proc_entry(name, root_irq_dir);
  311. }
  312. #undef MAX_NAMELEN
  313. void unregister_handler_proc(unsigned int irq, struct irqaction *action)
  314. {
  315. proc_remove(action->dir);
  316. }
  317. static void register_default_affinity_proc(void)
  318. {
  319. #ifdef CONFIG_SMP
  320. proc_create("irq/default_smp_affinity", 0644, NULL,
  321. &default_affinity_proc_fops);
  322. #endif
  323. }
  324. void init_irq_proc(void)
  325. {
  326. unsigned int irq;
  327. struct irq_desc *desc;
  328. /* create /proc/irq */
  329. root_irq_dir = proc_mkdir("irq", NULL);
  330. if (!root_irq_dir)
  331. return;
  332. register_default_affinity_proc();
  333. /*
  334. * Create entries for all existing IRQs.
  335. */
  336. for_each_irq_desc(irq, desc) {
  337. if (!desc)
  338. continue;
  339. register_irq_proc(irq, desc);
  340. }
  341. }
  342. #ifdef CONFIG_GENERIC_IRQ_SHOW
  343. int __weak arch_show_interrupts(struct seq_file *p, int prec)
  344. {
  345. return 0;
  346. }
  347. #ifndef ACTUAL_NR_IRQS
  348. # define ACTUAL_NR_IRQS nr_irqs
  349. #endif
  350. int show_interrupts(struct seq_file *p, void *v)
  351. {
  352. static int prec;
  353. unsigned long flags, any_count = 0;
  354. int i = *(loff_t *) v, j;
  355. struct irqaction *action;
  356. struct irq_desc *desc;
  357. if (i > ACTUAL_NR_IRQS)
  358. return 0;
  359. if (i == ACTUAL_NR_IRQS)
  360. return arch_show_interrupts(p, prec);
  361. /* print header and calculate the width of the first column */
  362. if (i == 0) {
  363. for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
  364. j *= 10;
  365. seq_printf(p, "%*s", prec + 8, "");
  366. for_each_online_cpu(j)
  367. seq_printf(p, "CPU%-8d", j);
  368. seq_putc(p, '\n');
  369. }
  370. irq_lock_sparse();
  371. desc = irq_to_desc(i);
  372. if (!desc)
  373. goto outsparse;
  374. raw_spin_lock_irqsave(&desc->lock, flags);
  375. for_each_online_cpu(j)
  376. any_count |= kstat_irqs_cpu(i, j);
  377. action = desc->action;
  378. if (!action && !any_count)
  379. goto out;
  380. seq_printf(p, "%*d: ", prec, i);
  381. for_each_online_cpu(j)
  382. seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
  383. if (desc->irq_data.chip) {
  384. if (desc->irq_data.chip->irq_print_chip)
  385. desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
  386. else if (desc->irq_data.chip->name)
  387. seq_printf(p, " %8s", desc->irq_data.chip->name);
  388. else
  389. seq_printf(p, " %8s", "-");
  390. } else {
  391. seq_printf(p, " %8s", "None");
  392. }
  393. if (desc->irq_data.domain)
  394. seq_printf(p, " %*d", prec, (int) desc->irq_data.hwirq);
  395. #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
  396. seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
  397. #endif
  398. if (desc->name)
  399. seq_printf(p, "-%-8s", desc->name);
  400. if (action) {
  401. seq_printf(p, " %s", action->name);
  402. while ((action = action->next) != NULL)
  403. seq_printf(p, ", %s", action->name);
  404. }
  405. seq_putc(p, '\n');
  406. out:
  407. raw_spin_unlock_irqrestore(&desc->lock, flags);
  408. outsparse:
  409. irq_unlock_sparse();
  410. return 0;
  411. }
  412. #endif