matrix.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2017 Thomas Gleixner <tglx@linutronix.de>
  3. #include <linux/spinlock.h>
  4. #include <linux/seq_file.h>
  5. #include <linux/bitmap.h>
  6. #include <linux/percpu.h>
  7. #include <linux/cpu.h>
  8. #include <linux/irq.h>
  9. #define IRQ_MATRIX_SIZE (BITS_TO_LONGS(IRQ_MATRIX_BITS))
  10. struct cpumap {
  11. unsigned int available;
  12. unsigned int allocated;
  13. unsigned int managed;
  14. bool initialized;
  15. bool online;
  16. unsigned long alloc_map[IRQ_MATRIX_SIZE];
  17. unsigned long managed_map[IRQ_MATRIX_SIZE];
  18. };
  19. struct irq_matrix {
  20. unsigned int matrix_bits;
  21. unsigned int alloc_start;
  22. unsigned int alloc_end;
  23. unsigned int alloc_size;
  24. unsigned int global_available;
  25. unsigned int global_reserved;
  26. unsigned int systembits_inalloc;
  27. unsigned int total_allocated;
  28. unsigned int online_maps;
  29. struct cpumap __percpu *maps;
  30. unsigned long scratch_map[IRQ_MATRIX_SIZE];
  31. unsigned long system_map[IRQ_MATRIX_SIZE];
  32. };
  33. #define CREATE_TRACE_POINTS
  34. #include <trace/events/irq_matrix.h>
  35. /**
  36. * irq_alloc_matrix - Allocate a irq_matrix structure and initialize it
  37. * @matrix_bits: Number of matrix bits must be <= IRQ_MATRIX_BITS
  38. * @alloc_start: From which bit the allocation search starts
  39. * @alloc_end: At which bit the allocation search ends, i.e first
  40. * invalid bit
  41. */
  42. __init struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
  43. unsigned int alloc_start,
  44. unsigned int alloc_end)
  45. {
  46. struct irq_matrix *m;
  47. if (matrix_bits > IRQ_MATRIX_BITS)
  48. return NULL;
  49. m = kzalloc(sizeof(*m), GFP_KERNEL);
  50. if (!m)
  51. return NULL;
  52. m->matrix_bits = matrix_bits;
  53. m->alloc_start = alloc_start;
  54. m->alloc_end = alloc_end;
  55. m->alloc_size = alloc_end - alloc_start;
  56. m->maps = alloc_percpu(*m->maps);
  57. if (!m->maps) {
  58. kfree(m);
  59. return NULL;
  60. }
  61. return m;
  62. }
  63. /**
  64. * irq_matrix_online - Bring the local CPU matrix online
  65. * @m: Matrix pointer
  66. */
  67. void irq_matrix_online(struct irq_matrix *m)
  68. {
  69. struct cpumap *cm = this_cpu_ptr(m->maps);
  70. BUG_ON(cm->online);
  71. if (!cm->initialized) {
  72. cm->available = m->alloc_size;
  73. cm->available -= cm->managed + m->systembits_inalloc;
  74. cm->initialized = true;
  75. }
  76. m->global_available += cm->available;
  77. cm->online = true;
  78. m->online_maps++;
  79. trace_irq_matrix_online(m);
  80. }
  81. /**
  82. * irq_matrix_offline - Bring the local CPU matrix offline
  83. * @m: Matrix pointer
  84. */
  85. void irq_matrix_offline(struct irq_matrix *m)
  86. {
  87. struct cpumap *cm = this_cpu_ptr(m->maps);
  88. /* Update the global available size */
  89. m->global_available -= cm->available;
  90. cm->online = false;
  91. m->online_maps--;
  92. trace_irq_matrix_offline(m);
  93. }
  94. static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm,
  95. unsigned int num, bool managed)
  96. {
  97. unsigned int area, start = m->alloc_start;
  98. unsigned int end = m->alloc_end;
  99. bitmap_or(m->scratch_map, cm->managed_map, m->system_map, end);
  100. bitmap_or(m->scratch_map, m->scratch_map, cm->alloc_map, end);
  101. area = bitmap_find_next_zero_area(m->scratch_map, end, start, num, 0);
  102. if (area >= end)
  103. return area;
  104. if (managed)
  105. bitmap_set(cm->managed_map, area, num);
  106. else
  107. bitmap_set(cm->alloc_map, area, num);
  108. return area;
  109. }
  110. /* Find the best CPU which has the lowest vector allocation count */
  111. static unsigned int matrix_find_best_cpu(struct irq_matrix *m,
  112. const struct cpumask *msk)
  113. {
  114. unsigned int cpu, best_cpu, maxavl = 0;
  115. struct cpumap *cm;
  116. best_cpu = UINT_MAX;
  117. for_each_cpu(cpu, msk) {
  118. cm = per_cpu_ptr(m->maps, cpu);
  119. if (!cm->online || cm->available <= maxavl)
  120. continue;
  121. best_cpu = cpu;
  122. maxavl = cm->available;
  123. }
  124. return best_cpu;
  125. }
  126. /**
  127. * irq_matrix_assign_system - Assign system wide entry in the matrix
  128. * @m: Matrix pointer
  129. * @bit: Which bit to reserve
  130. * @replace: Replace an already allocated vector with a system
  131. * vector at the same bit position.
  132. *
  133. * The BUG_ON()s below are on purpose. If this goes wrong in the
  134. * early boot process, then the chance to survive is about zero.
  135. * If this happens when the system is life, it's not much better.
  136. */
  137. void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit,
  138. bool replace)
  139. {
  140. struct cpumap *cm = this_cpu_ptr(m->maps);
  141. BUG_ON(bit > m->matrix_bits);
  142. BUG_ON(m->online_maps > 1 || (m->online_maps && !replace));
  143. set_bit(bit, m->system_map);
  144. if (replace) {
  145. BUG_ON(!test_and_clear_bit(bit, cm->alloc_map));
  146. cm->allocated--;
  147. m->total_allocated--;
  148. }
  149. if (bit >= m->alloc_start && bit < m->alloc_end)
  150. m->systembits_inalloc++;
  151. trace_irq_matrix_assign_system(bit, m);
  152. }
  153. /**
  154. * irq_matrix_reserve_managed - Reserve a managed interrupt in a CPU map
  155. * @m: Matrix pointer
  156. * @msk: On which CPUs the bits should be reserved.
  157. *
  158. * Can be called for offline CPUs. Note, this will only reserve one bit
  159. * on all CPUs in @msk, but it's not guaranteed that the bits are at the
  160. * same offset on all CPUs
  161. */
  162. int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk)
  163. {
  164. unsigned int cpu, failed_cpu;
  165. for_each_cpu(cpu, msk) {
  166. struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
  167. unsigned int bit;
  168. bit = matrix_alloc_area(m, cm, 1, true);
  169. if (bit >= m->alloc_end)
  170. goto cleanup;
  171. cm->managed++;
  172. if (cm->online) {
  173. cm->available--;
  174. m->global_available--;
  175. }
  176. trace_irq_matrix_reserve_managed(bit, cpu, m, cm);
  177. }
  178. return 0;
  179. cleanup:
  180. failed_cpu = cpu;
  181. for_each_cpu(cpu, msk) {
  182. if (cpu == failed_cpu)
  183. break;
  184. irq_matrix_remove_managed(m, cpumask_of(cpu));
  185. }
  186. return -ENOSPC;
  187. }
  188. /**
  189. * irq_matrix_remove_managed - Remove managed interrupts in a CPU map
  190. * @m: Matrix pointer
  191. * @msk: On which CPUs the bits should be removed
  192. *
  193. * Can be called for offline CPUs
  194. *
  195. * This removes not allocated managed interrupts from the map. It does
  196. * not matter which one because the managed interrupts free their
  197. * allocation when they shut down. If not, the accounting is screwed,
  198. * but all what can be done at this point is warn about it.
  199. */
  200. void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk)
  201. {
  202. unsigned int cpu;
  203. for_each_cpu(cpu, msk) {
  204. struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
  205. unsigned int bit, end = m->alloc_end;
  206. if (WARN_ON_ONCE(!cm->managed))
  207. continue;
  208. /* Get managed bit which are not allocated */
  209. bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
  210. bit = find_first_bit(m->scratch_map, end);
  211. if (WARN_ON_ONCE(bit >= end))
  212. continue;
  213. clear_bit(bit, cm->managed_map);
  214. cm->managed--;
  215. if (cm->online) {
  216. cm->available++;
  217. m->global_available++;
  218. }
  219. trace_irq_matrix_remove_managed(bit, cpu, m, cm);
  220. }
  221. }
  222. /**
  223. * irq_matrix_alloc_managed - Allocate a managed interrupt in a CPU map
  224. * @m: Matrix pointer
  225. * @cpu: On which CPU the interrupt should be allocated
  226. */
  227. int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
  228. unsigned int *mapped_cpu)
  229. {
  230. unsigned int bit, cpu, end = m->alloc_end;
  231. struct cpumap *cm;
  232. if (cpumask_empty(msk))
  233. return -EINVAL;
  234. cpu = matrix_find_best_cpu(m, msk);
  235. if (cpu == UINT_MAX)
  236. return -ENOSPC;
  237. cm = per_cpu_ptr(m->maps, cpu);
  238. end = m->alloc_end;
  239. /* Get managed bit which are not allocated */
  240. bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
  241. bit = find_first_bit(m->scratch_map, end);
  242. if (bit >= end)
  243. return -ENOSPC;
  244. set_bit(bit, cm->alloc_map);
  245. cm->allocated++;
  246. m->total_allocated++;
  247. *mapped_cpu = cpu;
  248. trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
  249. return bit;
  250. }
  251. /**
  252. * irq_matrix_assign - Assign a preallocated interrupt in the local CPU map
  253. * @m: Matrix pointer
  254. * @bit: Which bit to mark
  255. *
  256. * This should only be used to mark preallocated vectors
  257. */
  258. void irq_matrix_assign(struct irq_matrix *m, unsigned int bit)
  259. {
  260. struct cpumap *cm = this_cpu_ptr(m->maps);
  261. if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
  262. return;
  263. if (WARN_ON_ONCE(test_and_set_bit(bit, cm->alloc_map)))
  264. return;
  265. cm->allocated++;
  266. m->total_allocated++;
  267. cm->available--;
  268. m->global_available--;
  269. trace_irq_matrix_assign(bit, smp_processor_id(), m, cm);
  270. }
  271. /**
  272. * irq_matrix_reserve - Reserve interrupts
  273. * @m: Matrix pointer
  274. *
  275. * This is merily a book keeping call. It increments the number of globally
  276. * reserved interrupt bits w/o actually allocating them. This allows to
  277. * setup interrupt descriptors w/o assigning low level resources to it.
  278. * The actual allocation happens when the interrupt gets activated.
  279. */
  280. void irq_matrix_reserve(struct irq_matrix *m)
  281. {
  282. if (m->global_reserved <= m->global_available &&
  283. m->global_reserved + 1 > m->global_available)
  284. pr_warn("Interrupt reservation exceeds available resources\n");
  285. m->global_reserved++;
  286. trace_irq_matrix_reserve(m);
  287. }
  288. /**
  289. * irq_matrix_remove_reserved - Remove interrupt reservation
  290. * @m: Matrix pointer
  291. *
  292. * This is merily a book keeping call. It decrements the number of globally
  293. * reserved interrupt bits. This is used to undo irq_matrix_reserve() when the
  294. * interrupt was never in use and a real vector allocated, which undid the
  295. * reservation.
  296. */
  297. void irq_matrix_remove_reserved(struct irq_matrix *m)
  298. {
  299. m->global_reserved--;
  300. trace_irq_matrix_remove_reserved(m);
  301. }
  302. /**
  303. * irq_matrix_alloc - Allocate a regular interrupt in a CPU map
  304. * @m: Matrix pointer
  305. * @msk: Which CPUs to search in
  306. * @reserved: Allocate previously reserved interrupts
  307. * @mapped_cpu: Pointer to store the CPU for which the irq was allocated
  308. */
  309. int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
  310. bool reserved, unsigned int *mapped_cpu)
  311. {
  312. unsigned int cpu, bit;
  313. struct cpumap *cm;
  314. cpu = matrix_find_best_cpu(m, msk);
  315. if (cpu == UINT_MAX)
  316. return -ENOSPC;
  317. cm = per_cpu_ptr(m->maps, cpu);
  318. bit = matrix_alloc_area(m, cm, 1, false);
  319. if (bit >= m->alloc_end)
  320. return -ENOSPC;
  321. cm->allocated++;
  322. cm->available--;
  323. m->total_allocated++;
  324. m->global_available--;
  325. if (reserved)
  326. m->global_reserved--;
  327. *mapped_cpu = cpu;
  328. trace_irq_matrix_alloc(bit, cpu, m, cm);
  329. return bit;
  330. }
  331. /**
  332. * irq_matrix_free - Free allocated interrupt in the matrix
  333. * @m: Matrix pointer
  334. * @cpu: Which CPU map needs be updated
  335. * @bit: The bit to remove
  336. * @managed: If true, the interrupt is managed and not accounted
  337. * as available.
  338. */
  339. void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
  340. unsigned int bit, bool managed)
  341. {
  342. struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
  343. if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
  344. return;
  345. clear_bit(bit, cm->alloc_map);
  346. cm->allocated--;
  347. if (cm->online)
  348. m->total_allocated--;
  349. if (!managed) {
  350. cm->available++;
  351. if (cm->online)
  352. m->global_available++;
  353. }
  354. trace_irq_matrix_free(bit, cpu, m, cm);
  355. }
  356. /**
  357. * irq_matrix_available - Get the number of globally available irqs
  358. * @m: Pointer to the matrix to query
  359. * @cpudown: If true, the local CPU is about to go down, adjust
  360. * the number of available irqs accordingly
  361. */
  362. unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown)
  363. {
  364. struct cpumap *cm = this_cpu_ptr(m->maps);
  365. if (!cpudown)
  366. return m->global_available;
  367. return m->global_available - cm->available;
  368. }
  369. /**
  370. * irq_matrix_reserved - Get the number of globally reserved irqs
  371. * @m: Pointer to the matrix to query
  372. */
  373. unsigned int irq_matrix_reserved(struct irq_matrix *m)
  374. {
  375. return m->global_reserved;
  376. }
  377. /**
  378. * irq_matrix_allocated - Get the number of allocated irqs on the local cpu
  379. * @m: Pointer to the matrix to search
  380. *
  381. * This returns number of allocated irqs
  382. */
  383. unsigned int irq_matrix_allocated(struct irq_matrix *m)
  384. {
  385. struct cpumap *cm = this_cpu_ptr(m->maps);
  386. return cm->allocated;
  387. }
  388. #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
  389. /**
  390. * irq_matrix_debug_show - Show detailed allocation information
  391. * @sf: Pointer to the seq_file to print to
  392. * @m: Pointer to the matrix allocator
  393. * @ind: Indentation for the print format
  394. *
  395. * Note, this is a lockless snapshot.
  396. */
  397. void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind)
  398. {
  399. unsigned int nsys = bitmap_weight(m->system_map, m->matrix_bits);
  400. int cpu;
  401. seq_printf(sf, "Online bitmaps: %6u\n", m->online_maps);
  402. seq_printf(sf, "Global available: %6u\n", m->global_available);
  403. seq_printf(sf, "Global reserved: %6u\n", m->global_reserved);
  404. seq_printf(sf, "Total allocated: %6u\n", m->total_allocated);
  405. seq_printf(sf, "System: %u: %*pbl\n", nsys, m->matrix_bits,
  406. m->system_map);
  407. seq_printf(sf, "%*s| CPU | avl | man | act | vectors\n", ind, " ");
  408. cpus_read_lock();
  409. for_each_online_cpu(cpu) {
  410. struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
  411. seq_printf(sf, "%*s %4d %4u %4u %4u %*pbl\n", ind, " ",
  412. cpu, cm->available, cm->managed, cm->allocated,
  413. m->matrix_bits, cm->alloc_map);
  414. }
  415. cpus_read_unlock();
  416. }
  417. #endif