affinity.c 1.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061
  1. #include <linux/interrupt.h>
  2. #include <linux/kernel.h>
  3. #include <linux/slab.h>
  4. #include <linux/cpu.h>
  5. static int get_first_sibling(unsigned int cpu)
  6. {
  7. unsigned int ret;
  8. ret = cpumask_first(topology_sibling_cpumask(cpu));
  9. if (ret < nr_cpu_ids)
  10. return ret;
  11. return cpu;
  12. }
  13. /*
  14. * Take a map of online CPUs and the number of available interrupt vectors
  15. * and generate an output cpumask suitable for spreading MSI/MSI-X vectors
  16. * so that they are distributed as good as possible around the CPUs. If
  17. * more vectors than CPUs are available we'll map one to each CPU,
  18. * otherwise we map one to the first sibling of each socket.
  19. *
  20. * If there are more vectors than CPUs we will still only have one bit
  21. * set per CPU, but interrupt code will keep on assigning the vectors from
  22. * the start of the bitmap until we run out of vectors.
  23. */
  24. struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs)
  25. {
  26. struct cpumask *affinity_mask;
  27. unsigned int max_vecs = *nr_vecs;
  28. if (max_vecs == 1)
  29. return NULL;
  30. affinity_mask = kzalloc(cpumask_size(), GFP_KERNEL);
  31. if (!affinity_mask) {
  32. *nr_vecs = 1;
  33. return NULL;
  34. }
  35. if (max_vecs >= num_online_cpus()) {
  36. cpumask_copy(affinity_mask, cpu_online_mask);
  37. *nr_vecs = num_online_cpus();
  38. } else {
  39. unsigned int vecs = 0, cpu;
  40. for_each_online_cpu(cpu) {
  41. if (cpu == get_first_sibling(cpu)) {
  42. cpumask_set_cpu(cpu, affinity_mask);
  43. vecs++;
  44. }
  45. if (--max_vecs == 0)
  46. break;
  47. }
  48. *nr_vecs = vecs;
  49. }
  50. return affinity_mask;
  51. }