matrix.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443
  1. /*
  2. * Copyright (C) 2017 Thomas Gleixner <tglx@linutronix.de>
  3. *
  4. * SPDX-License-Identifier: GPL-2.0
  5. */
  6. #include <linux/spinlock.h>
  7. #include <linux/seq_file.h>
  8. #include <linux/bitmap.h>
  9. #include <linux/percpu.h>
  10. #include <linux/cpu.h>
  11. #include <linux/irq.h>
  12. #define IRQ_MATRIX_SIZE (BITS_TO_LONGS(IRQ_MATRIX_BITS) * sizeof(unsigned long))
  13. struct cpumap {
  14. unsigned int available;
  15. unsigned int allocated;
  16. unsigned int managed;
  17. bool online;
  18. unsigned long alloc_map[IRQ_MATRIX_SIZE];
  19. unsigned long managed_map[IRQ_MATRIX_SIZE];
  20. };
  21. struct irq_matrix {
  22. unsigned int matrix_bits;
  23. unsigned int alloc_start;
  24. unsigned int alloc_end;
  25. unsigned int alloc_size;
  26. unsigned int global_available;
  27. unsigned int global_reserved;
  28. unsigned int systembits_inalloc;
  29. unsigned int total_allocated;
  30. unsigned int online_maps;
  31. struct cpumap __percpu *maps;
  32. unsigned long scratch_map[IRQ_MATRIX_SIZE];
  33. unsigned long system_map[IRQ_MATRIX_SIZE];
  34. };
  35. #define CREATE_TRACE_POINTS
  36. #include <trace/events/irq_matrix.h>
  37. /**
  38. * irq_alloc_matrix - Allocate a irq_matrix structure and initialize it
  39. * @matrix_bits: Number of matrix bits must be <= IRQ_MATRIX_BITS
  40. * @alloc_start: From which bit the allocation search starts
  41. * @alloc_end: At which bit the allocation search ends, i.e first
  42. * invalid bit
  43. */
  44. __init struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
  45. unsigned int alloc_start,
  46. unsigned int alloc_end)
  47. {
  48. struct irq_matrix *m;
  49. if (matrix_bits > IRQ_MATRIX_BITS)
  50. return NULL;
  51. m = kzalloc(sizeof(*m), GFP_KERNEL);
  52. if (!m)
  53. return NULL;
  54. m->matrix_bits = matrix_bits;
  55. m->alloc_start = alloc_start;
  56. m->alloc_end = alloc_end;
  57. m->alloc_size = alloc_end - alloc_start;
  58. m->maps = alloc_percpu(*m->maps);
  59. if (!m->maps) {
  60. kfree(m);
  61. return NULL;
  62. }
  63. return m;
  64. }
  65. /**
  66. * irq_matrix_online - Bring the local CPU matrix online
  67. * @m: Matrix pointer
  68. */
  69. void irq_matrix_online(struct irq_matrix *m)
  70. {
  71. struct cpumap *cm = this_cpu_ptr(m->maps);
  72. BUG_ON(cm->online);
  73. bitmap_zero(cm->alloc_map, m->matrix_bits);
  74. cm->available = m->alloc_size - (cm->managed + m->systembits_inalloc);
  75. cm->allocated = 0;
  76. m->global_available += cm->available;
  77. cm->online = true;
  78. m->online_maps++;
  79. trace_irq_matrix_online(m);
  80. }
  81. /**
  82. * irq_matrix_offline - Bring the local CPU matrix offline
  83. * @m: Matrix pointer
  84. */
  85. void irq_matrix_offline(struct irq_matrix *m)
  86. {
  87. struct cpumap *cm = this_cpu_ptr(m->maps);
  88. /* Update the global available size */
  89. m->global_available -= cm->available;
  90. cm->online = false;
  91. m->online_maps--;
  92. trace_irq_matrix_offline(m);
  93. }
  94. static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm,
  95. unsigned int num, bool managed)
  96. {
  97. unsigned int area, start = m->alloc_start;
  98. unsigned int end = m->alloc_end;
  99. bitmap_or(m->scratch_map, cm->managed_map, m->system_map, end);
  100. bitmap_or(m->scratch_map, m->scratch_map, cm->alloc_map, end);
  101. area = bitmap_find_next_zero_area(m->scratch_map, end, start, num, 0);
  102. if (area >= end)
  103. return area;
  104. if (managed)
  105. bitmap_set(cm->managed_map, area, num);
  106. else
  107. bitmap_set(cm->alloc_map, area, num);
  108. return area;
  109. }
  110. /**
  111. * irq_matrix_assign_system - Assign system wide entry in the matrix
  112. * @m: Matrix pointer
  113. * @bit: Which bit to reserve
  114. * @replace: Replace an already allocated vector with a system
  115. * vector at the same bit position.
  116. *
  117. * The BUG_ON()s below are on purpose. If this goes wrong in the
  118. * early boot process, then the chance to survive is about zero.
  119. * If this happens when the system is life, it's not much better.
  120. */
  121. void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit,
  122. bool replace)
  123. {
  124. struct cpumap *cm = this_cpu_ptr(m->maps);
  125. BUG_ON(bit > m->matrix_bits);
  126. BUG_ON(m->online_maps > 1 || (m->online_maps && !replace));
  127. set_bit(bit, m->system_map);
  128. if (replace) {
  129. BUG_ON(!test_and_clear_bit(bit, cm->alloc_map));
  130. cm->allocated--;
  131. m->total_allocated--;
  132. }
  133. if (bit >= m->alloc_start && bit < m->alloc_end)
  134. m->systembits_inalloc++;
  135. trace_irq_matrix_assign_system(bit, m);
  136. }
  137. /**
  138. * irq_matrix_reserve_managed - Reserve a managed interrupt in a CPU map
  139. * @m: Matrix pointer
  140. * @msk: On which CPUs the bits should be reserved.
  141. *
  142. * Can be called for offline CPUs. Note, this will only reserve one bit
  143. * on all CPUs in @msk, but it's not guaranteed that the bits are at the
  144. * same offset on all CPUs
  145. */
  146. int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk)
  147. {
  148. unsigned int cpu, failed_cpu;
  149. for_each_cpu(cpu, msk) {
  150. struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
  151. unsigned int bit;
  152. bit = matrix_alloc_area(m, cm, 1, true);
  153. if (bit >= m->alloc_end)
  154. goto cleanup;
  155. cm->managed++;
  156. if (cm->online) {
  157. cm->available--;
  158. m->global_available--;
  159. }
  160. trace_irq_matrix_reserve_managed(bit, cpu, m, cm);
  161. }
  162. return 0;
  163. cleanup:
  164. failed_cpu = cpu;
  165. for_each_cpu(cpu, msk) {
  166. if (cpu == failed_cpu)
  167. break;
  168. irq_matrix_remove_managed(m, cpumask_of(cpu));
  169. }
  170. return -ENOSPC;
  171. }
  172. /**
  173. * irq_matrix_remove_managed - Remove managed interrupts in a CPU map
  174. * @m: Matrix pointer
  175. * @msk: On which CPUs the bits should be removed
  176. *
  177. * Can be called for offline CPUs
  178. *
  179. * This removes not allocated managed interrupts from the map. It does
  180. * not matter which one because the managed interrupts free their
  181. * allocation when they shut down. If not, the accounting is screwed,
  182. * but all what can be done at this point is warn about it.
  183. */
  184. void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk)
  185. {
  186. unsigned int cpu;
  187. for_each_cpu(cpu, msk) {
  188. struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
  189. unsigned int bit, end = m->alloc_end;
  190. if (WARN_ON_ONCE(!cm->managed))
  191. continue;
  192. /* Get managed bit which are not allocated */
  193. bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
  194. bit = find_first_bit(m->scratch_map, end);
  195. if (WARN_ON_ONCE(bit >= end))
  196. continue;
  197. clear_bit(bit, cm->managed_map);
  198. cm->managed--;
  199. if (cm->online) {
  200. cm->available++;
  201. m->global_available++;
  202. }
  203. trace_irq_matrix_remove_managed(bit, cpu, m, cm);
  204. }
  205. }
  206. /**
  207. * irq_matrix_alloc_managed - Allocate a managed interrupt in a CPU map
  208. * @m: Matrix pointer
  209. * @cpu: On which CPU the interrupt should be allocated
  210. */
  211. int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu)
  212. {
  213. struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
  214. unsigned int bit, end = m->alloc_end;
  215. /* Get managed bit which are not allocated */
  216. bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
  217. bit = find_first_bit(m->scratch_map, end);
  218. if (bit >= end)
  219. return -ENOSPC;
  220. set_bit(bit, cm->alloc_map);
  221. cm->allocated++;
  222. m->total_allocated++;
  223. trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
  224. return bit;
  225. }
  226. /**
  227. * irq_matrix_assign - Assign a preallocated interrupt in the local CPU map
  228. * @m: Matrix pointer
  229. * @bit: Which bit to mark
  230. *
  231. * This should only be used to mark preallocated vectors
  232. */
  233. void irq_matrix_assign(struct irq_matrix *m, unsigned int bit)
  234. {
  235. struct cpumap *cm = this_cpu_ptr(m->maps);
  236. if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
  237. return;
  238. if (WARN_ON_ONCE(test_and_set_bit(bit, cm->alloc_map)))
  239. return;
  240. cm->allocated++;
  241. m->total_allocated++;
  242. cm->available--;
  243. m->global_available--;
  244. trace_irq_matrix_assign(bit, smp_processor_id(), m, cm);
  245. }
  246. /**
  247. * irq_matrix_reserve - Reserve interrupts
  248. * @m: Matrix pointer
  249. *
  250. * This is merily a book keeping call. It increments the number of globally
  251. * reserved interrupt bits w/o actually allocating them. This allows to
  252. * setup interrupt descriptors w/o assigning low level resources to it.
  253. * The actual allocation happens when the interrupt gets activated.
  254. */
  255. void irq_matrix_reserve(struct irq_matrix *m)
  256. {
  257. if (m->global_reserved <= m->global_available &&
  258. m->global_reserved + 1 > m->global_available)
  259. pr_warn("Interrupt reservation exceeds available resources\n");
  260. m->global_reserved++;
  261. trace_irq_matrix_reserve(m);
  262. }
  263. /**
  264. * irq_matrix_remove_reserved - Remove interrupt reservation
  265. * @m: Matrix pointer
  266. *
  267. * This is merily a book keeping call. It decrements the number of globally
  268. * reserved interrupt bits. This is used to undo irq_matrix_reserve() when the
  269. * interrupt was never in use and a real vector allocated, which undid the
  270. * reservation.
  271. */
  272. void irq_matrix_remove_reserved(struct irq_matrix *m)
  273. {
  274. m->global_reserved--;
  275. trace_irq_matrix_remove_reserved(m);
  276. }
  277. /**
  278. * irq_matrix_alloc - Allocate a regular interrupt in a CPU map
  279. * @m: Matrix pointer
  280. * @msk: Which CPUs to search in
  281. * @reserved: Allocate previously reserved interrupts
  282. * @mapped_cpu: Pointer to store the CPU for which the irq was allocated
  283. */
  284. int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
  285. bool reserved, unsigned int *mapped_cpu)
  286. {
  287. unsigned int cpu;
  288. for_each_cpu(cpu, msk) {
  289. struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
  290. unsigned int bit;
  291. if (!cm->online)
  292. continue;
  293. bit = matrix_alloc_area(m, cm, 1, false);
  294. if (bit < m->alloc_end) {
  295. cm->allocated++;
  296. cm->available--;
  297. m->total_allocated++;
  298. m->global_available--;
  299. if (reserved)
  300. m->global_reserved--;
  301. *mapped_cpu = cpu;
  302. trace_irq_matrix_alloc(bit, cpu, m, cm);
  303. return bit;
  304. }
  305. }
  306. return -ENOSPC;
  307. }
  308. /**
  309. * irq_matrix_free - Free allocated interrupt in the matrix
  310. * @m: Matrix pointer
  311. * @cpu: Which CPU map needs be updated
  312. * @bit: The bit to remove
  313. * @managed: If true, the interrupt is managed and not accounted
  314. * as available.
  315. */
  316. void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
  317. unsigned int bit, bool managed)
  318. {
  319. struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
  320. if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
  321. return;
  322. if (cm->online) {
  323. clear_bit(bit, cm->alloc_map);
  324. cm->allocated--;
  325. m->total_allocated--;
  326. if (!managed) {
  327. cm->available++;
  328. m->global_available++;
  329. }
  330. }
  331. trace_irq_matrix_free(bit, cpu, m, cm);
  332. }
  333. /**
  334. * irq_matrix_available - Get the number of globally available irqs
  335. * @m: Pointer to the matrix to query
  336. * @cpudown: If true, the local CPU is about to go down, adjust
  337. * the number of available irqs accordingly
  338. */
  339. unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown)
  340. {
  341. struct cpumap *cm = this_cpu_ptr(m->maps);
  342. return m->global_available - cpudown ? cm->available : 0;
  343. }
  344. /**
  345. * irq_matrix_reserved - Get the number of globally reserved irqs
  346. * @m: Pointer to the matrix to query
  347. */
  348. unsigned int irq_matrix_reserved(struct irq_matrix *m)
  349. {
  350. return m->global_reserved;
  351. }
  352. /**
  353. * irq_matrix_allocated - Get the number of allocated irqs on the local cpu
  354. * @m: Pointer to the matrix to search
  355. *
  356. * This returns number of allocated irqs
  357. */
  358. unsigned int irq_matrix_allocated(struct irq_matrix *m)
  359. {
  360. struct cpumap *cm = this_cpu_ptr(m->maps);
  361. return cm->allocated;
  362. }
  363. #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
  364. /**
  365. * irq_matrix_debug_show - Show detailed allocation information
  366. * @sf: Pointer to the seq_file to print to
  367. * @m: Pointer to the matrix allocator
  368. * @ind: Indentation for the print format
  369. *
  370. * Note, this is a lockless snapshot.
  371. */
  372. void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind)
  373. {
  374. unsigned int nsys = bitmap_weight(m->system_map, m->matrix_bits);
  375. int cpu;
  376. seq_printf(sf, "Online bitmaps: %6u\n", m->online_maps);
  377. seq_printf(sf, "Global available: %6u\n", m->global_available);
  378. seq_printf(sf, "Global reserved: %6u\n", m->global_reserved);
  379. seq_printf(sf, "Total allocated: %6u\n", m->total_allocated);
  380. seq_printf(sf, "System: %u: %*pbl\n", nsys, m->matrix_bits,
  381. m->system_map);
  382. seq_printf(sf, "%*s| CPU | avl | man | act | vectors\n", ind, " ");
  383. cpus_read_lock();
  384. for_each_online_cpu(cpu) {
  385. struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
  386. seq_printf(sf, "%*s %4d %4u %4u %4u %*pbl\n", ind, " ",
  387. cpu, cm->available, cm->managed, cm->allocated,
  388. m->matrix_bits, cm->alloc_map);
  389. }
  390. cpus_read_unlock();
  391. }
  392. #endif