smp.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. #include <linux/smp.h>
  2. #include <linux/cpu.h>
  3. #include <linux/slab.h>
  4. #include <linux/cpumask.h>
  5. #include <linux/percpu.h>
  6. #include <xen/events.h>
  7. #include <xen/hvc-console.h>
  8. #include "xen-ops.h"
  9. #include "smp.h"
  10. static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
  11. static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
  12. static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
  13. static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
  14. static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
  15. static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
  16. /*
  17. * Reschedule call back.
  18. */
  19. static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
  20. {
  21. inc_irq_stat(irq_resched_count);
  22. scheduler_ipi();
  23. return IRQ_HANDLED;
  24. }
  25. void xen_smp_intr_free(unsigned int cpu)
  26. {
  27. if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
  28. unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
  29. per_cpu(xen_resched_irq, cpu).irq = -1;
  30. kfree(per_cpu(xen_resched_irq, cpu).name);
  31. per_cpu(xen_resched_irq, cpu).name = NULL;
  32. }
  33. if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
  34. unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
  35. per_cpu(xen_callfunc_irq, cpu).irq = -1;
  36. kfree(per_cpu(xen_callfunc_irq, cpu).name);
  37. per_cpu(xen_callfunc_irq, cpu).name = NULL;
  38. }
  39. if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
  40. unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
  41. per_cpu(xen_debug_irq, cpu).irq = -1;
  42. kfree(per_cpu(xen_debug_irq, cpu).name);
  43. per_cpu(xen_debug_irq, cpu).name = NULL;
  44. }
  45. if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
  46. unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
  47. NULL);
  48. per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
  49. kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
  50. per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
  51. }
  52. }
  53. int xen_smp_intr_init(unsigned int cpu)
  54. {
  55. int rc;
  56. char *resched_name, *callfunc_name, *debug_name;
  57. resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
  58. rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
  59. cpu,
  60. xen_reschedule_interrupt,
  61. IRQF_PERCPU|IRQF_NOBALANCING,
  62. resched_name,
  63. NULL);
  64. if (rc < 0)
  65. goto fail;
  66. per_cpu(xen_resched_irq, cpu).irq = rc;
  67. per_cpu(xen_resched_irq, cpu).name = resched_name;
  68. callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
  69. rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
  70. cpu,
  71. xen_call_function_interrupt,
  72. IRQF_PERCPU|IRQF_NOBALANCING,
  73. callfunc_name,
  74. NULL);
  75. if (rc < 0)
  76. goto fail;
  77. per_cpu(xen_callfunc_irq, cpu).irq = rc;
  78. per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
  79. debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
  80. rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
  81. IRQF_PERCPU | IRQF_NOBALANCING,
  82. debug_name, NULL);
  83. if (rc < 0)
  84. goto fail;
  85. per_cpu(xen_debug_irq, cpu).irq = rc;
  86. per_cpu(xen_debug_irq, cpu).name = debug_name;
  87. callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
  88. rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
  89. cpu,
  90. xen_call_function_single_interrupt,
  91. IRQF_PERCPU|IRQF_NOBALANCING,
  92. callfunc_name,
  93. NULL);
  94. if (rc < 0)
  95. goto fail;
  96. per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
  97. per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
  98. return 0;
  99. fail:
  100. xen_smp_intr_free(cpu);
  101. return rc;
  102. }
  103. void __init xen_smp_cpus_done(unsigned int max_cpus)
  104. {
  105. int cpu, rc, count = 0;
  106. if (xen_hvm_domain())
  107. native_smp_cpus_done(max_cpus);
  108. if (xen_have_vcpu_info_placement)
  109. return;
  110. for_each_online_cpu(cpu) {
  111. if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS)
  112. continue;
  113. rc = cpu_down(cpu);
  114. if (rc == 0) {
  115. /*
  116. * Reset vcpu_info so this cpu cannot be onlined again.
  117. */
  118. xen_vcpu_info_reset(cpu);
  119. count++;
  120. } else {
  121. pr_warn("%s: failed to bring CPU %d down, error %d\n",
  122. __func__, cpu, rc);
  123. }
  124. }
  125. WARN(count, "%s: brought %d CPUs offline\n", __func__, count);
  126. }
  127. void xen_smp_send_reschedule(int cpu)
  128. {
  129. xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
  130. }
  131. static void __xen_send_IPI_mask(const struct cpumask *mask,
  132. int vector)
  133. {
  134. unsigned cpu;
  135. for_each_cpu_and(cpu, mask, cpu_online_mask)
  136. xen_send_IPI_one(cpu, vector);
  137. }
  138. void xen_smp_send_call_function_ipi(const struct cpumask *mask)
  139. {
  140. int cpu;
  141. __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
  142. /* Make sure other vcpus get a chance to run if they need to. */
  143. for_each_cpu(cpu, mask) {
  144. if (xen_vcpu_stolen(cpu)) {
  145. HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
  146. break;
  147. }
  148. }
  149. }
  150. void xen_smp_send_call_function_single_ipi(int cpu)
  151. {
  152. __xen_send_IPI_mask(cpumask_of(cpu),
  153. XEN_CALL_FUNCTION_SINGLE_VECTOR);
  154. }
  155. static inline int xen_map_vector(int vector)
  156. {
  157. int xen_vector;
  158. switch (vector) {
  159. case RESCHEDULE_VECTOR:
  160. xen_vector = XEN_RESCHEDULE_VECTOR;
  161. break;
  162. case CALL_FUNCTION_VECTOR:
  163. xen_vector = XEN_CALL_FUNCTION_VECTOR;
  164. break;
  165. case CALL_FUNCTION_SINGLE_VECTOR:
  166. xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
  167. break;
  168. case IRQ_WORK_VECTOR:
  169. xen_vector = XEN_IRQ_WORK_VECTOR;
  170. break;
  171. #ifdef CONFIG_X86_64
  172. case NMI_VECTOR:
  173. case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */
  174. xen_vector = XEN_NMI_VECTOR;
  175. break;
  176. #endif
  177. default:
  178. xen_vector = -1;
  179. printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
  180. vector);
  181. }
  182. return xen_vector;
  183. }
  184. void xen_send_IPI_mask(const struct cpumask *mask,
  185. int vector)
  186. {
  187. int xen_vector = xen_map_vector(vector);
  188. if (xen_vector >= 0)
  189. __xen_send_IPI_mask(mask, xen_vector);
  190. }
  191. void xen_send_IPI_all(int vector)
  192. {
  193. int xen_vector = xen_map_vector(vector);
  194. if (xen_vector >= 0)
  195. __xen_send_IPI_mask(cpu_online_mask, xen_vector);
  196. }
  197. void xen_send_IPI_self(int vector)
  198. {
  199. int xen_vector = xen_map_vector(vector);
  200. if (xen_vector >= 0)
  201. xen_send_IPI_one(smp_processor_id(), xen_vector);
  202. }
  203. void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
  204. int vector)
  205. {
  206. unsigned cpu;
  207. unsigned int this_cpu = smp_processor_id();
  208. int xen_vector = xen_map_vector(vector);
  209. if (!(num_online_cpus() > 1) || (xen_vector < 0))
  210. return;
  211. for_each_cpu_and(cpu, mask, cpu_online_mask) {
  212. if (this_cpu == cpu)
  213. continue;
  214. xen_send_IPI_one(cpu, xen_vector);
  215. }
  216. }
  217. void xen_send_IPI_allbutself(int vector)
  218. {
  219. xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
  220. }
  221. static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
  222. {
  223. irq_enter();
  224. generic_smp_call_function_interrupt();
  225. inc_irq_stat(irq_call_count);
  226. irq_exit();
  227. return IRQ_HANDLED;
  228. }
  229. static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
  230. {
  231. irq_enter();
  232. generic_smp_call_function_single_interrupt();
  233. inc_irq_stat(irq_call_count);
  234. irq_exit();
  235. return IRQ_HANDLED;
  236. }