matrix.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2017 Thomas Gleixner <tglx@linutronix.de>
  3. #include <linux/spinlock.h>
  4. #include <linux/seq_file.h>
  5. #include <linux/bitmap.h>
  6. #include <linux/percpu.h>
  7. #include <linux/cpu.h>
  8. #include <linux/irq.h>
  9. #define IRQ_MATRIX_SIZE (BITS_TO_LONGS(IRQ_MATRIX_BITS) * sizeof(unsigned long))
  10. struct cpumap {
  11. unsigned int available;
  12. unsigned int allocated;
  13. unsigned int managed;
  14. bool initialized;
  15. bool online;
  16. unsigned long alloc_map[IRQ_MATRIX_SIZE];
  17. unsigned long managed_map[IRQ_MATRIX_SIZE];
  18. };
  19. struct irq_matrix {
  20. unsigned int matrix_bits;
  21. unsigned int alloc_start;
  22. unsigned int alloc_end;
  23. unsigned int alloc_size;
  24. unsigned int global_available;
  25. unsigned int global_reserved;
  26. unsigned int systembits_inalloc;
  27. unsigned int total_allocated;
  28. unsigned int online_maps;
  29. struct cpumap __percpu *maps;
  30. unsigned long scratch_map[IRQ_MATRIX_SIZE];
  31. unsigned long system_map[IRQ_MATRIX_SIZE];
  32. };
  33. #define CREATE_TRACE_POINTS
  34. #include <trace/events/irq_matrix.h>
  35. /**
  36. * irq_alloc_matrix - Allocate a irq_matrix structure and initialize it
  37. * @matrix_bits: Number of matrix bits must be <= IRQ_MATRIX_BITS
  38. * @alloc_start: From which bit the allocation search starts
  39. * @alloc_end: At which bit the allocation search ends, i.e first
  40. * invalid bit
  41. */
  42. __init struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
  43. unsigned int alloc_start,
  44. unsigned int alloc_end)
  45. {
  46. struct irq_matrix *m;
  47. if (matrix_bits > IRQ_MATRIX_BITS)
  48. return NULL;
  49. m = kzalloc(sizeof(*m), GFP_KERNEL);
  50. if (!m)
  51. return NULL;
  52. m->matrix_bits = matrix_bits;
  53. m->alloc_start = alloc_start;
  54. m->alloc_end = alloc_end;
  55. m->alloc_size = alloc_end - alloc_start;
  56. m->maps = alloc_percpu(*m->maps);
  57. if (!m->maps) {
  58. kfree(m);
  59. return NULL;
  60. }
  61. return m;
  62. }
  63. /**
  64. * irq_matrix_online - Bring the local CPU matrix online
  65. * @m: Matrix pointer
  66. */
  67. void irq_matrix_online(struct irq_matrix *m)
  68. {
  69. struct cpumap *cm = this_cpu_ptr(m->maps);
  70. BUG_ON(cm->online);
  71. if (!cm->initialized) {
  72. cm->available = m->alloc_size;
  73. cm->available -= cm->managed + m->systembits_inalloc;
  74. cm->initialized = true;
  75. }
  76. m->global_available += cm->available;
  77. cm->online = true;
  78. m->online_maps++;
  79. trace_irq_matrix_online(m);
  80. }
  81. /**
  82. * irq_matrix_offline - Bring the local CPU matrix offline
  83. * @m: Matrix pointer
  84. */
  85. void irq_matrix_offline(struct irq_matrix *m)
  86. {
  87. struct cpumap *cm = this_cpu_ptr(m->maps);
  88. /* Update the global available size */
  89. m->global_available -= cm->available;
  90. cm->online = false;
  91. m->online_maps--;
  92. trace_irq_matrix_offline(m);
  93. }
  94. static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm,
  95. unsigned int num, bool managed)
  96. {
  97. unsigned int area, start = m->alloc_start;
  98. unsigned int end = m->alloc_end;
  99. bitmap_or(m->scratch_map, cm->managed_map, m->system_map, end);
  100. bitmap_or(m->scratch_map, m->scratch_map, cm->alloc_map, end);
  101. area = bitmap_find_next_zero_area(m->scratch_map, end, start, num, 0);
  102. if (area >= end)
  103. return area;
  104. if (managed)
  105. bitmap_set(cm->managed_map, area, num);
  106. else
  107. bitmap_set(cm->alloc_map, area, num);
  108. return area;
  109. }
  110. /**
  111. * irq_matrix_assign_system - Assign system wide entry in the matrix
  112. * @m: Matrix pointer
  113. * @bit: Which bit to reserve
  114. * @replace: Replace an already allocated vector with a system
  115. * vector at the same bit position.
  116. *
  117. * The BUG_ON()s below are on purpose. If this goes wrong in the
  118. * early boot process, then the chance to survive is about zero.
  119. * If this happens when the system is life, it's not much better.
  120. */
  121. void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit,
  122. bool replace)
  123. {
  124. struct cpumap *cm = this_cpu_ptr(m->maps);
  125. BUG_ON(bit > m->matrix_bits);
  126. BUG_ON(m->online_maps > 1 || (m->online_maps && !replace));
  127. set_bit(bit, m->system_map);
  128. if (replace) {
  129. BUG_ON(!test_and_clear_bit(bit, cm->alloc_map));
  130. cm->allocated--;
  131. m->total_allocated--;
  132. }
  133. if (bit >= m->alloc_start && bit < m->alloc_end)
  134. m->systembits_inalloc++;
  135. trace_irq_matrix_assign_system(bit, m);
  136. }
  137. /**
  138. * irq_matrix_reserve_managed - Reserve a managed interrupt in a CPU map
  139. * @m: Matrix pointer
  140. * @msk: On which CPUs the bits should be reserved.
  141. *
  142. * Can be called for offline CPUs. Note, this will only reserve one bit
  143. * on all CPUs in @msk, but it's not guaranteed that the bits are at the
  144. * same offset on all CPUs
  145. */
  146. int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk)
  147. {
  148. unsigned int cpu, failed_cpu;
  149. for_each_cpu(cpu, msk) {
  150. struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
  151. unsigned int bit;
  152. bit = matrix_alloc_area(m, cm, 1, true);
  153. if (bit >= m->alloc_end)
  154. goto cleanup;
  155. cm->managed++;
  156. if (cm->online) {
  157. cm->available--;
  158. m->global_available--;
  159. }
  160. trace_irq_matrix_reserve_managed(bit, cpu, m, cm);
  161. }
  162. return 0;
  163. cleanup:
  164. failed_cpu = cpu;
  165. for_each_cpu(cpu, msk) {
  166. if (cpu == failed_cpu)
  167. break;
  168. irq_matrix_remove_managed(m, cpumask_of(cpu));
  169. }
  170. return -ENOSPC;
  171. }
  172. /**
  173. * irq_matrix_remove_managed - Remove managed interrupts in a CPU map
  174. * @m: Matrix pointer
  175. * @msk: On which CPUs the bits should be removed
  176. *
  177. * Can be called for offline CPUs
  178. *
  179. * This removes not allocated managed interrupts from the map. It does
  180. * not matter which one because the managed interrupts free their
  181. * allocation when they shut down. If not, the accounting is screwed,
  182. * but all what can be done at this point is warn about it.
  183. */
  184. void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk)
  185. {
  186. unsigned int cpu;
  187. for_each_cpu(cpu, msk) {
  188. struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
  189. unsigned int bit, end = m->alloc_end;
  190. if (WARN_ON_ONCE(!cm->managed))
  191. continue;
  192. /* Get managed bit which are not allocated */
  193. bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
  194. bit = find_first_bit(m->scratch_map, end);
  195. if (WARN_ON_ONCE(bit >= end))
  196. continue;
  197. clear_bit(bit, cm->managed_map);
  198. cm->managed--;
  199. if (cm->online) {
  200. cm->available++;
  201. m->global_available++;
  202. }
  203. trace_irq_matrix_remove_managed(bit, cpu, m, cm);
  204. }
  205. }
  206. /**
  207. * irq_matrix_alloc_managed - Allocate a managed interrupt in a CPU map
  208. * @m: Matrix pointer
  209. * @cpu: On which CPU the interrupt should be allocated
  210. */
  211. int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu)
  212. {
  213. struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
  214. unsigned int bit, end = m->alloc_end;
  215. /* Get managed bit which are not allocated */
  216. bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
  217. bit = find_first_bit(m->scratch_map, end);
  218. if (bit >= end)
  219. return -ENOSPC;
  220. set_bit(bit, cm->alloc_map);
  221. cm->allocated++;
  222. m->total_allocated++;
  223. trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
  224. return bit;
  225. }
  226. /**
  227. * irq_matrix_assign - Assign a preallocated interrupt in the local CPU map
  228. * @m: Matrix pointer
  229. * @bit: Which bit to mark
  230. *
  231. * This should only be used to mark preallocated vectors
  232. */
  233. void irq_matrix_assign(struct irq_matrix *m, unsigned int bit)
  234. {
  235. struct cpumap *cm = this_cpu_ptr(m->maps);
  236. if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
  237. return;
  238. if (WARN_ON_ONCE(test_and_set_bit(bit, cm->alloc_map)))
  239. return;
  240. cm->allocated++;
  241. m->total_allocated++;
  242. cm->available--;
  243. m->global_available--;
  244. trace_irq_matrix_assign(bit, smp_processor_id(), m, cm);
  245. }
  246. /**
  247. * irq_matrix_reserve - Reserve interrupts
  248. * @m: Matrix pointer
  249. *
  250. * This is merily a book keeping call. It increments the number of globally
  251. * reserved interrupt bits w/o actually allocating them. This allows to
  252. * setup interrupt descriptors w/o assigning low level resources to it.
  253. * The actual allocation happens when the interrupt gets activated.
  254. */
  255. void irq_matrix_reserve(struct irq_matrix *m)
  256. {
  257. if (m->global_reserved <= m->global_available &&
  258. m->global_reserved + 1 > m->global_available)
  259. pr_warn("Interrupt reservation exceeds available resources\n");
  260. m->global_reserved++;
  261. trace_irq_matrix_reserve(m);
  262. }
  263. /**
  264. * irq_matrix_remove_reserved - Remove interrupt reservation
  265. * @m: Matrix pointer
  266. *
  267. * This is merily a book keeping call. It decrements the number of globally
  268. * reserved interrupt bits. This is used to undo irq_matrix_reserve() when the
  269. * interrupt was never in use and a real vector allocated, which undid the
  270. * reservation.
  271. */
  272. void irq_matrix_remove_reserved(struct irq_matrix *m)
  273. {
  274. m->global_reserved--;
  275. trace_irq_matrix_remove_reserved(m);
  276. }
  277. /**
  278. * irq_matrix_alloc - Allocate a regular interrupt in a CPU map
  279. * @m: Matrix pointer
  280. * @msk: Which CPUs to search in
  281. * @reserved: Allocate previously reserved interrupts
  282. * @mapped_cpu: Pointer to store the CPU for which the irq was allocated
  283. */
  284. int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
  285. bool reserved, unsigned int *mapped_cpu)
  286. {
  287. unsigned int cpu, best_cpu, maxavl = 0;
  288. struct cpumap *cm;
  289. unsigned int bit;
  290. best_cpu = UINT_MAX;
  291. for_each_cpu(cpu, msk) {
  292. cm = per_cpu_ptr(m->maps, cpu);
  293. if (!cm->online || cm->available <= maxavl)
  294. continue;
  295. best_cpu = cpu;
  296. maxavl = cm->available;
  297. }
  298. if (maxavl) {
  299. cm = per_cpu_ptr(m->maps, best_cpu);
  300. bit = matrix_alloc_area(m, cm, 1, false);
  301. if (bit < m->alloc_end) {
  302. cm->allocated++;
  303. cm->available--;
  304. m->total_allocated++;
  305. m->global_available--;
  306. if (reserved)
  307. m->global_reserved--;
  308. *mapped_cpu = best_cpu;
  309. trace_irq_matrix_alloc(bit, best_cpu, m, cm);
  310. return bit;
  311. }
  312. }
  313. return -ENOSPC;
  314. }
  315. /**
  316. * irq_matrix_free - Free allocated interrupt in the matrix
  317. * @m: Matrix pointer
  318. * @cpu: Which CPU map needs be updated
  319. * @bit: The bit to remove
  320. * @managed: If true, the interrupt is managed and not accounted
  321. * as available.
  322. */
  323. void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
  324. unsigned int bit, bool managed)
  325. {
  326. struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
  327. if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
  328. return;
  329. clear_bit(bit, cm->alloc_map);
  330. cm->allocated--;
  331. if (cm->online)
  332. m->total_allocated--;
  333. if (!managed) {
  334. cm->available++;
  335. if (cm->online)
  336. m->global_available++;
  337. }
  338. trace_irq_matrix_free(bit, cpu, m, cm);
  339. }
  340. /**
  341. * irq_matrix_available - Get the number of globally available irqs
  342. * @m: Pointer to the matrix to query
  343. * @cpudown: If true, the local CPU is about to go down, adjust
  344. * the number of available irqs accordingly
  345. */
  346. unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown)
  347. {
  348. struct cpumap *cm = this_cpu_ptr(m->maps);
  349. if (!cpudown)
  350. return m->global_available;
  351. return m->global_available - cm->available;
  352. }
  353. /**
  354. * irq_matrix_reserved - Get the number of globally reserved irqs
  355. * @m: Pointer to the matrix to query
  356. */
  357. unsigned int irq_matrix_reserved(struct irq_matrix *m)
  358. {
  359. return m->global_reserved;
  360. }
  361. /**
  362. * irq_matrix_allocated - Get the number of allocated irqs on the local cpu
  363. * @m: Pointer to the matrix to search
  364. *
  365. * This returns number of allocated irqs
  366. */
  367. unsigned int irq_matrix_allocated(struct irq_matrix *m)
  368. {
  369. struct cpumap *cm = this_cpu_ptr(m->maps);
  370. return cm->allocated;
  371. }
  372. #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
  373. /**
  374. * irq_matrix_debug_show - Show detailed allocation information
  375. * @sf: Pointer to the seq_file to print to
  376. * @m: Pointer to the matrix allocator
  377. * @ind: Indentation for the print format
  378. *
  379. * Note, this is a lockless snapshot.
  380. */
  381. void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind)
  382. {
  383. unsigned int nsys = bitmap_weight(m->system_map, m->matrix_bits);
  384. int cpu;
  385. seq_printf(sf, "Online bitmaps: %6u\n", m->online_maps);
  386. seq_printf(sf, "Global available: %6u\n", m->global_available);
  387. seq_printf(sf, "Global reserved: %6u\n", m->global_reserved);
  388. seq_printf(sf, "Total allocated: %6u\n", m->total_allocated);
  389. seq_printf(sf, "System: %u: %*pbl\n", nsys, m->matrix_bits,
  390. m->system_map);
  391. seq_printf(sf, "%*s| CPU | avl | man | act | vectors\n", ind, " ");
  392. cpus_read_lock();
  393. for_each_online_cpu(cpu) {
  394. struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
  395. seq_printf(sf, "%*s %4d %4u %4u %4u %*pbl\n", ind, " ",
  396. cpu, cm->available, cm->managed, cm->allocated,
  397. m->matrix_bits, cm->alloc_map);
  398. }
  399. cpus_read_unlock();
  400. }
  401. #endif