smp.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/smp.h>
  3. #include <linux/cpu.h>
  4. #include <linux/slab.h>
  5. #include <linux/cpumask.h>
  6. #include <linux/percpu.h>
  7. #include <xen/events.h>
  8. #include <xen/hvc-console.h>
  9. #include "xen-ops.h"
  10. #include "smp.h"
  11. static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
  12. static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
  13. static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
  14. static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
  15. static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
  16. static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
  17. /*
  18. * Reschedule call back.
  19. */
  20. static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
  21. {
  22. inc_irq_stat(irq_resched_count);
  23. scheduler_ipi();
  24. return IRQ_HANDLED;
  25. }
  26. void xen_smp_intr_free(unsigned int cpu)
  27. {
  28. if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
  29. unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
  30. per_cpu(xen_resched_irq, cpu).irq = -1;
  31. kfree(per_cpu(xen_resched_irq, cpu).name);
  32. per_cpu(xen_resched_irq, cpu).name = NULL;
  33. }
  34. if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
  35. unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
  36. per_cpu(xen_callfunc_irq, cpu).irq = -1;
  37. kfree(per_cpu(xen_callfunc_irq, cpu).name);
  38. per_cpu(xen_callfunc_irq, cpu).name = NULL;
  39. }
  40. if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
  41. unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
  42. per_cpu(xen_debug_irq, cpu).irq = -1;
  43. kfree(per_cpu(xen_debug_irq, cpu).name);
  44. per_cpu(xen_debug_irq, cpu).name = NULL;
  45. }
  46. if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
  47. unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
  48. NULL);
  49. per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
  50. kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
  51. per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
  52. }
  53. }
  54. int xen_smp_intr_init(unsigned int cpu)
  55. {
  56. int rc;
  57. char *resched_name, *callfunc_name, *debug_name;
  58. resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
  59. rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
  60. cpu,
  61. xen_reschedule_interrupt,
  62. IRQF_PERCPU|IRQF_NOBALANCING,
  63. resched_name,
  64. NULL);
  65. if (rc < 0)
  66. goto fail;
  67. per_cpu(xen_resched_irq, cpu).irq = rc;
  68. per_cpu(xen_resched_irq, cpu).name = resched_name;
  69. callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
  70. rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
  71. cpu,
  72. xen_call_function_interrupt,
  73. IRQF_PERCPU|IRQF_NOBALANCING,
  74. callfunc_name,
  75. NULL);
  76. if (rc < 0)
  77. goto fail;
  78. per_cpu(xen_callfunc_irq, cpu).irq = rc;
  79. per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
  80. debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
  81. rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
  82. IRQF_PERCPU | IRQF_NOBALANCING,
  83. debug_name, NULL);
  84. if (rc < 0)
  85. goto fail;
  86. per_cpu(xen_debug_irq, cpu).irq = rc;
  87. per_cpu(xen_debug_irq, cpu).name = debug_name;
  88. callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
  89. rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
  90. cpu,
  91. xen_call_function_single_interrupt,
  92. IRQF_PERCPU|IRQF_NOBALANCING,
  93. callfunc_name,
  94. NULL);
  95. if (rc < 0)
  96. goto fail;
  97. per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
  98. per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
  99. return 0;
  100. fail:
  101. xen_smp_intr_free(cpu);
  102. return rc;
  103. }
  104. void __init xen_smp_cpus_done(unsigned int max_cpus)
  105. {
  106. int cpu, rc, count = 0;
  107. if (xen_hvm_domain())
  108. native_smp_cpus_done(max_cpus);
  109. else
  110. calculate_max_logical_packages();
  111. if (xen_have_vcpu_info_placement)
  112. return;
  113. for_each_online_cpu(cpu) {
  114. if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS)
  115. continue;
  116. rc = cpu_down(cpu);
  117. if (rc == 0) {
  118. /*
  119. * Reset vcpu_info so this cpu cannot be onlined again.
  120. */
  121. xen_vcpu_info_reset(cpu);
  122. count++;
  123. } else {
  124. pr_warn("%s: failed to bring CPU %d down, error %d\n",
  125. __func__, cpu, rc);
  126. }
  127. }
  128. WARN(count, "%s: brought %d CPUs offline\n", __func__, count);
  129. }
  130. void xen_smp_send_reschedule(int cpu)
  131. {
  132. xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
  133. }
  134. static void __xen_send_IPI_mask(const struct cpumask *mask,
  135. int vector)
  136. {
  137. unsigned cpu;
  138. for_each_cpu_and(cpu, mask, cpu_online_mask)
  139. xen_send_IPI_one(cpu, vector);
  140. }
  141. void xen_smp_send_call_function_ipi(const struct cpumask *mask)
  142. {
  143. int cpu;
  144. __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
  145. /* Make sure other vcpus get a chance to run if they need to. */
  146. for_each_cpu(cpu, mask) {
  147. if (xen_vcpu_stolen(cpu)) {
  148. HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
  149. break;
  150. }
  151. }
  152. }
  153. void xen_smp_send_call_function_single_ipi(int cpu)
  154. {
  155. __xen_send_IPI_mask(cpumask_of(cpu),
  156. XEN_CALL_FUNCTION_SINGLE_VECTOR);
  157. }
  158. static inline int xen_map_vector(int vector)
  159. {
  160. int xen_vector;
  161. switch (vector) {
  162. case RESCHEDULE_VECTOR:
  163. xen_vector = XEN_RESCHEDULE_VECTOR;
  164. break;
  165. case CALL_FUNCTION_VECTOR:
  166. xen_vector = XEN_CALL_FUNCTION_VECTOR;
  167. break;
  168. case CALL_FUNCTION_SINGLE_VECTOR:
  169. xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
  170. break;
  171. case IRQ_WORK_VECTOR:
  172. xen_vector = XEN_IRQ_WORK_VECTOR;
  173. break;
  174. #ifdef CONFIG_X86_64
  175. case NMI_VECTOR:
  176. case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */
  177. xen_vector = XEN_NMI_VECTOR;
  178. break;
  179. #endif
  180. default:
  181. xen_vector = -1;
  182. printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
  183. vector);
  184. }
  185. return xen_vector;
  186. }
  187. void xen_send_IPI_mask(const struct cpumask *mask,
  188. int vector)
  189. {
  190. int xen_vector = xen_map_vector(vector);
  191. if (xen_vector >= 0)
  192. __xen_send_IPI_mask(mask, xen_vector);
  193. }
  194. void xen_send_IPI_all(int vector)
  195. {
  196. int xen_vector = xen_map_vector(vector);
  197. if (xen_vector >= 0)
  198. __xen_send_IPI_mask(cpu_online_mask, xen_vector);
  199. }
  200. void xen_send_IPI_self(int vector)
  201. {
  202. int xen_vector = xen_map_vector(vector);
  203. if (xen_vector >= 0)
  204. xen_send_IPI_one(smp_processor_id(), xen_vector);
  205. }
  206. void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
  207. int vector)
  208. {
  209. unsigned cpu;
  210. unsigned int this_cpu = smp_processor_id();
  211. int xen_vector = xen_map_vector(vector);
  212. if (!(num_online_cpus() > 1) || (xen_vector < 0))
  213. return;
  214. for_each_cpu_and(cpu, mask, cpu_online_mask) {
  215. if (this_cpu == cpu)
  216. continue;
  217. xen_send_IPI_one(cpu, xen_vector);
  218. }
  219. }
  220. void xen_send_IPI_allbutself(int vector)
  221. {
  222. xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
  223. }
  224. static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
  225. {
  226. irq_enter();
  227. generic_smp_call_function_interrupt();
  228. inc_irq_stat(irq_call_count);
  229. irq_exit();
  230. return IRQ_HANDLED;
  231. }
  232. static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
  233. {
  234. irq_enter();
  235. generic_smp_call_function_single_interrupt();
  236. inc_irq_stat(irq_call_count);
  237. irq_exit();
  238. return IRQ_HANDLED;
  239. }