proc.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
  4. *
  5. * This file contains the /proc/irq/ handling code.
  6. */
  7. #include <linux/irq.h>
  8. #include <linux/gfp.h>
  9. #include <linux/proc_fs.h>
  10. #include <linux/seq_file.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/kernel_stat.h>
  13. #include <linux/mutex.h>
  14. #include "internals.h"
  15. /*
  16. * Access rules:
  17. *
  18. * procfs protects read/write of /proc/irq/N/ files against a
  19. * concurrent free of the interrupt descriptor. remove_proc_entry()
  20. * immediately prevents new read/writes to happen and waits for
  21. * already running read/write functions to complete.
  22. *
  23. * We remove the proc entries first and then delete the interrupt
  24. * descriptor from the radix tree and free it. So it is guaranteed
  25. * that irq_to_desc(N) is valid as long as the read/writes are
  26. * permitted by procfs.
  27. *
  28. * The read from /proc/interrupts is a different problem because there
  29. * is no protection. So the lookup and the access to irqdesc
  30. * information must be protected by sparse_irq_lock.
  31. */
  32. static struct proc_dir_entry *root_irq_dir;
  33. #ifdef CONFIG_SMP
  34. enum {
  35. AFFINITY,
  36. AFFINITY_LIST,
  37. EFFECTIVE,
  38. EFFECTIVE_LIST,
  39. };
  40. static int show_irq_affinity(int type, struct seq_file *m)
  41. {
  42. struct irq_desc *desc = irq_to_desc((long)m->private);
  43. const struct cpumask *mask;
  44. switch (type) {
  45. case AFFINITY:
  46. case AFFINITY_LIST:
  47. mask = desc->irq_common_data.affinity;
  48. #ifdef CONFIG_GENERIC_PENDING_IRQ
  49. if (irqd_is_setaffinity_pending(&desc->irq_data))
  50. mask = desc->pending_mask;
  51. #endif
  52. break;
  53. case EFFECTIVE:
  54. case EFFECTIVE_LIST:
  55. #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
  56. mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
  57. break;
  58. #endif
  59. default:
  60. return -EINVAL;
  61. }
  62. switch (type) {
  63. case AFFINITY_LIST:
  64. case EFFECTIVE_LIST:
  65. seq_printf(m, "%*pbl\n", cpumask_pr_args(mask));
  66. break;
  67. case AFFINITY:
  68. case EFFECTIVE:
  69. seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
  70. break;
  71. }
  72. return 0;
  73. }
  74. static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
  75. {
  76. struct irq_desc *desc = irq_to_desc((long)m->private);
  77. unsigned long flags;
  78. cpumask_var_t mask;
  79. if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
  80. return -ENOMEM;
  81. raw_spin_lock_irqsave(&desc->lock, flags);
  82. if (desc->affinity_hint)
  83. cpumask_copy(mask, desc->affinity_hint);
  84. raw_spin_unlock_irqrestore(&desc->lock, flags);
  85. seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
  86. free_cpumask_var(mask);
  87. return 0;
  88. }
  89. #ifndef is_affinity_mask_valid
  90. #define is_affinity_mask_valid(val) 1
  91. #endif
  92. int no_irq_affinity;
  93. static int irq_affinity_proc_show(struct seq_file *m, void *v)
  94. {
  95. return show_irq_affinity(AFFINITY, m);
  96. }
  97. static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
  98. {
  99. return show_irq_affinity(AFFINITY_LIST, m);
  100. }
  101. static ssize_t write_irq_affinity(int type, struct file *file,
  102. const char __user *buffer, size_t count, loff_t *pos)
  103. {
  104. unsigned int irq = (int)(long)PDE_DATA(file_inode(file));
  105. cpumask_var_t new_value;
  106. int err;
  107. if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
  108. return -EIO;
  109. if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
  110. return -ENOMEM;
  111. if (type)
  112. err = cpumask_parselist_user(buffer, count, new_value);
  113. else
  114. err = cpumask_parse_user(buffer, count, new_value);
  115. if (err)
  116. goto free_cpumask;
  117. if (!is_affinity_mask_valid(new_value)) {
  118. err = -EINVAL;
  119. goto free_cpumask;
  120. }
  121. /*
  122. * Do not allow disabling IRQs completely - it's a too easy
  123. * way to make the system unusable accidentally :-) At least
  124. * one online CPU still has to be targeted.
  125. */
  126. if (!cpumask_intersects(new_value, cpu_online_mask)) {
  127. /*
  128. * Special case for empty set - allow the architecture code
  129. * to set default SMP affinity.
  130. */
  131. err = irq_select_affinity_usr(irq) ? -EINVAL : count;
  132. } else {
  133. err = irq_set_affinity(irq, new_value);
  134. if (!err)
  135. err = count;
  136. }
  137. free_cpumask:
  138. free_cpumask_var(new_value);
  139. return err;
  140. }
  141. static ssize_t irq_affinity_proc_write(struct file *file,
  142. const char __user *buffer, size_t count, loff_t *pos)
  143. {
  144. return write_irq_affinity(0, file, buffer, count, pos);
  145. }
  146. static ssize_t irq_affinity_list_proc_write(struct file *file,
  147. const char __user *buffer, size_t count, loff_t *pos)
  148. {
  149. return write_irq_affinity(1, file, buffer, count, pos);
  150. }
  151. static int irq_affinity_proc_open(struct inode *inode, struct file *file)
  152. {
  153. return single_open(file, irq_affinity_proc_show, PDE_DATA(inode));
  154. }
  155. static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
  156. {
  157. return single_open(file, irq_affinity_list_proc_show, PDE_DATA(inode));
  158. }
  159. static const struct file_operations irq_affinity_proc_fops = {
  160. .open = irq_affinity_proc_open,
  161. .read = seq_read,
  162. .llseek = seq_lseek,
  163. .release = single_release,
  164. .write = irq_affinity_proc_write,
  165. };
  166. static const struct file_operations irq_affinity_list_proc_fops = {
  167. .open = irq_affinity_list_proc_open,
  168. .read = seq_read,
  169. .llseek = seq_lseek,
  170. .release = single_release,
  171. .write = irq_affinity_list_proc_write,
  172. };
  173. #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
  174. static int irq_effective_aff_proc_show(struct seq_file *m, void *v)
  175. {
  176. return show_irq_affinity(EFFECTIVE, m);
  177. }
  178. static int irq_effective_aff_list_proc_show(struct seq_file *m, void *v)
  179. {
  180. return show_irq_affinity(EFFECTIVE_LIST, m);
  181. }
  182. #endif
  183. static int default_affinity_show(struct seq_file *m, void *v)
  184. {
  185. seq_printf(m, "%*pb\n", cpumask_pr_args(irq_default_affinity));
  186. return 0;
  187. }
  188. static ssize_t default_affinity_write(struct file *file,
  189. const char __user *buffer, size_t count, loff_t *ppos)
  190. {
  191. cpumask_var_t new_value;
  192. int err;
  193. if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
  194. return -ENOMEM;
  195. err = cpumask_parse_user(buffer, count, new_value);
  196. if (err)
  197. goto out;
  198. if (!is_affinity_mask_valid(new_value)) {
  199. err = -EINVAL;
  200. goto out;
  201. }
  202. /*
  203. * Do not allow disabling IRQs completely - it's a too easy
  204. * way to make the system unusable accidentally :-) At least
  205. * one online CPU still has to be targeted.
  206. */
  207. if (!cpumask_intersects(new_value, cpu_online_mask)) {
  208. err = -EINVAL;
  209. goto out;
  210. }
  211. cpumask_copy(irq_default_affinity, new_value);
  212. err = count;
  213. out:
  214. free_cpumask_var(new_value);
  215. return err;
  216. }
  217. static int default_affinity_open(struct inode *inode, struct file *file)
  218. {
  219. return single_open(file, default_affinity_show, PDE_DATA(inode));
  220. }
  221. static const struct file_operations default_affinity_proc_fops = {
  222. .open = default_affinity_open,
  223. .read = seq_read,
  224. .llseek = seq_lseek,
  225. .release = single_release,
  226. .write = default_affinity_write,
  227. };
  228. static int irq_node_proc_show(struct seq_file *m, void *v)
  229. {
  230. struct irq_desc *desc = irq_to_desc((long) m->private);
  231. seq_printf(m, "%d\n", irq_desc_get_node(desc));
  232. return 0;
  233. }
  234. #endif
  235. static int irq_spurious_proc_show(struct seq_file *m, void *v)
  236. {
  237. struct irq_desc *desc = irq_to_desc((long) m->private);
  238. seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
  239. desc->irq_count, desc->irqs_unhandled,
  240. jiffies_to_msecs(desc->last_unhandled));
  241. return 0;
  242. }
  243. #define MAX_NAMELEN 128
  244. static int name_unique(unsigned int irq, struct irqaction *new_action)
  245. {
  246. struct irq_desc *desc = irq_to_desc(irq);
  247. struct irqaction *action;
  248. unsigned long flags;
  249. int ret = 1;
  250. raw_spin_lock_irqsave(&desc->lock, flags);
  251. for_each_action_of_desc(desc, action) {
  252. if ((action != new_action) && action->name &&
  253. !strcmp(new_action->name, action->name)) {
  254. ret = 0;
  255. break;
  256. }
  257. }
  258. raw_spin_unlock_irqrestore(&desc->lock, flags);
  259. return ret;
  260. }
  261. void register_handler_proc(unsigned int irq, struct irqaction *action)
  262. {
  263. char name [MAX_NAMELEN];
  264. struct irq_desc *desc = irq_to_desc(irq);
  265. if (!desc->dir || action->dir || !action->name ||
  266. !name_unique(irq, action))
  267. return;
  268. snprintf(name, MAX_NAMELEN, "%s", action->name);
  269. /* create /proc/irq/1234/handler/ */
  270. action->dir = proc_mkdir(name, desc->dir);
  271. }
  272. #undef MAX_NAMELEN
  273. #define MAX_NAMELEN 10
  274. void register_irq_proc(unsigned int irq, struct irq_desc *desc)
  275. {
  276. static DEFINE_MUTEX(register_lock);
  277. void __maybe_unused *irqp = (void *)(unsigned long) irq;
  278. char name [MAX_NAMELEN];
  279. if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
  280. return;
  281. /*
  282. * irq directories are registered only when a handler is
  283. * added, not when the descriptor is created, so multiple
  284. * tasks might try to register at the same time.
  285. */
  286. mutex_lock(&register_lock);
  287. if (desc->dir)
  288. goto out_unlock;
  289. sprintf(name, "%d", irq);
  290. /* create /proc/irq/1234 */
  291. desc->dir = proc_mkdir(name, root_irq_dir);
  292. if (!desc->dir)
  293. goto out_unlock;
  294. #ifdef CONFIG_SMP
  295. /* create /proc/irq/<irq>/smp_affinity */
  296. proc_create_data("smp_affinity", 0644, desc->dir,
  297. &irq_affinity_proc_fops, irqp);
  298. /* create /proc/irq/<irq>/affinity_hint */
  299. proc_create_single_data("affinity_hint", 0444, desc->dir,
  300. irq_affinity_hint_proc_show, irqp);
  301. /* create /proc/irq/<irq>/smp_affinity_list */
  302. proc_create_data("smp_affinity_list", 0644, desc->dir,
  303. &irq_affinity_list_proc_fops, irqp);
  304. proc_create_single_data("node", 0444, desc->dir, irq_node_proc_show,
  305. irqp);
  306. # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
  307. proc_create_single_data("effective_affinity", 0444, desc->dir,
  308. irq_effective_aff_proc_show, irqp);
  309. proc_create_single_data("effective_affinity_list", 0444, desc->dir,
  310. irq_effective_aff_list_proc_show, irqp);
  311. # endif
  312. #endif
  313. proc_create_single_data("spurious", 0444, desc->dir,
  314. irq_spurious_proc_show, (void *)(long)irq);
  315. out_unlock:
  316. mutex_unlock(&register_lock);
  317. }
  318. void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
  319. {
  320. char name [MAX_NAMELEN];
  321. if (!root_irq_dir || !desc->dir)
  322. return;
  323. #ifdef CONFIG_SMP
  324. remove_proc_entry("smp_affinity", desc->dir);
  325. remove_proc_entry("affinity_hint", desc->dir);
  326. remove_proc_entry("smp_affinity_list", desc->dir);
  327. remove_proc_entry("node", desc->dir);
  328. # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
  329. remove_proc_entry("effective_affinity", desc->dir);
  330. remove_proc_entry("effective_affinity_list", desc->dir);
  331. # endif
  332. #endif
  333. remove_proc_entry("spurious", desc->dir);
  334. sprintf(name, "%u", irq);
  335. remove_proc_entry(name, root_irq_dir);
  336. }
  337. #undef MAX_NAMELEN
  338. void unregister_handler_proc(unsigned int irq, struct irqaction *action)
  339. {
  340. proc_remove(action->dir);
  341. }
  342. static void register_default_affinity_proc(void)
  343. {
  344. #ifdef CONFIG_SMP
  345. proc_create("irq/default_smp_affinity", 0644, NULL,
  346. &default_affinity_proc_fops);
  347. #endif
  348. }
  349. void init_irq_proc(void)
  350. {
  351. unsigned int irq;
  352. struct irq_desc *desc;
  353. /* create /proc/irq */
  354. root_irq_dir = proc_mkdir("irq", NULL);
  355. if (!root_irq_dir)
  356. return;
  357. register_default_affinity_proc();
  358. /*
  359. * Create entries for all existing IRQs.
  360. */
  361. for_each_irq_desc(irq, desc)
  362. register_irq_proc(irq, desc);
  363. }
  364. #ifdef CONFIG_GENERIC_IRQ_SHOW
  365. int __weak arch_show_interrupts(struct seq_file *p, int prec)
  366. {
  367. return 0;
  368. }
  369. #ifndef ACTUAL_NR_IRQS
  370. # define ACTUAL_NR_IRQS nr_irqs
  371. #endif
  372. int show_interrupts(struct seq_file *p, void *v)
  373. {
  374. static int prec;
  375. unsigned long flags, any_count = 0;
  376. int i = *(loff_t *) v, j;
  377. struct irqaction *action;
  378. struct irq_desc *desc;
  379. if (i > ACTUAL_NR_IRQS)
  380. return 0;
  381. if (i == ACTUAL_NR_IRQS)
  382. return arch_show_interrupts(p, prec);
  383. /* print header and calculate the width of the first column */
  384. if (i == 0) {
  385. for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
  386. j *= 10;
  387. seq_printf(p, "%*s", prec + 8, "");
  388. for_each_online_cpu(j)
  389. seq_printf(p, "CPU%-8d", j);
  390. seq_putc(p, '\n');
  391. }
  392. irq_lock_sparse();
  393. desc = irq_to_desc(i);
  394. if (!desc)
  395. goto outsparse;
  396. raw_spin_lock_irqsave(&desc->lock, flags);
  397. for_each_online_cpu(j)
  398. any_count |= kstat_irqs_cpu(i, j);
  399. action = desc->action;
  400. if ((!action || irq_desc_is_chained(desc)) && !any_count)
  401. goto out;
  402. seq_printf(p, "%*d: ", prec, i);
  403. for_each_online_cpu(j)
  404. seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
  405. if (desc->irq_data.chip) {
  406. if (desc->irq_data.chip->irq_print_chip)
  407. desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
  408. else if (desc->irq_data.chip->name)
  409. seq_printf(p, " %8s", desc->irq_data.chip->name);
  410. else
  411. seq_printf(p, " %8s", "-");
  412. } else {
  413. seq_printf(p, " %8s", "None");
  414. }
  415. if (desc->irq_data.domain)
  416. seq_printf(p, " %*d", prec, (int) desc->irq_data.hwirq);
  417. else
  418. seq_printf(p, " %*s", prec, "");
  419. #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
  420. seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
  421. #endif
  422. if (desc->name)
  423. seq_printf(p, "-%-8s", desc->name);
  424. if (action) {
  425. seq_printf(p, " %s", action->name);
  426. while ((action = action->next) != NULL)
  427. seq_printf(p, ", %s", action->name);
  428. }
  429. seq_putc(p, '\n');
  430. out:
  431. raw_spin_unlock_irqrestore(&desc->lock, flags);
  432. outsparse:
  433. irq_unlock_sparse();
  434. return 0;
  435. }
  436. #endif