smp.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. #include <linux/smp.h>
  2. #include <linux/slab.h>
  3. #include <linux/cpumask.h>
  4. #include <linux/percpu.h>
  5. #include <xen/events.h>
  6. #include <xen/hvc-console.h>
  7. #include "xen-ops.h"
  8. #include "smp.h"
  9. static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
  10. static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
  11. static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
  12. static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
  13. static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
  14. static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
  15. /*
  16. * Reschedule call back.
  17. */
  18. static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
  19. {
  20. inc_irq_stat(irq_resched_count);
  21. scheduler_ipi();
  22. return IRQ_HANDLED;
  23. }
  24. void xen_smp_intr_free(unsigned int cpu)
  25. {
  26. if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
  27. unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
  28. per_cpu(xen_resched_irq, cpu).irq = -1;
  29. kfree(per_cpu(xen_resched_irq, cpu).name);
  30. per_cpu(xen_resched_irq, cpu).name = NULL;
  31. }
  32. if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
  33. unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
  34. per_cpu(xen_callfunc_irq, cpu).irq = -1;
  35. kfree(per_cpu(xen_callfunc_irq, cpu).name);
  36. per_cpu(xen_callfunc_irq, cpu).name = NULL;
  37. }
  38. if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
  39. unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
  40. per_cpu(xen_debug_irq, cpu).irq = -1;
  41. kfree(per_cpu(xen_debug_irq, cpu).name);
  42. per_cpu(xen_debug_irq, cpu).name = NULL;
  43. }
  44. if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
  45. unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
  46. NULL);
  47. per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
  48. kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
  49. per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
  50. }
  51. }
  52. int xen_smp_intr_init(unsigned int cpu)
  53. {
  54. int rc;
  55. char *resched_name, *callfunc_name, *debug_name;
  56. resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
  57. rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
  58. cpu,
  59. xen_reschedule_interrupt,
  60. IRQF_PERCPU|IRQF_NOBALANCING,
  61. resched_name,
  62. NULL);
  63. if (rc < 0)
  64. goto fail;
  65. per_cpu(xen_resched_irq, cpu).irq = rc;
  66. per_cpu(xen_resched_irq, cpu).name = resched_name;
  67. callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
  68. rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
  69. cpu,
  70. xen_call_function_interrupt,
  71. IRQF_PERCPU|IRQF_NOBALANCING,
  72. callfunc_name,
  73. NULL);
  74. if (rc < 0)
  75. goto fail;
  76. per_cpu(xen_callfunc_irq, cpu).irq = rc;
  77. per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
  78. debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
  79. rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
  80. IRQF_PERCPU | IRQF_NOBALANCING,
  81. debug_name, NULL);
  82. if (rc < 0)
  83. goto fail;
  84. per_cpu(xen_debug_irq, cpu).irq = rc;
  85. per_cpu(xen_debug_irq, cpu).name = debug_name;
  86. callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
  87. rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
  88. cpu,
  89. xen_call_function_single_interrupt,
  90. IRQF_PERCPU|IRQF_NOBALANCING,
  91. callfunc_name,
  92. NULL);
  93. if (rc < 0)
  94. goto fail;
  95. per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
  96. per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
  97. return 0;
  98. fail:
  99. xen_smp_intr_free(cpu);
  100. return rc;
  101. }
  102. void xen_smp_send_reschedule(int cpu)
  103. {
  104. xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
  105. }
  106. static void __xen_send_IPI_mask(const struct cpumask *mask,
  107. int vector)
  108. {
  109. unsigned cpu;
  110. for_each_cpu_and(cpu, mask, cpu_online_mask)
  111. xen_send_IPI_one(cpu, vector);
  112. }
  113. void xen_smp_send_call_function_ipi(const struct cpumask *mask)
  114. {
  115. int cpu;
  116. __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
  117. /* Make sure other vcpus get a chance to run if they need to. */
  118. for_each_cpu(cpu, mask) {
  119. if (xen_vcpu_stolen(cpu)) {
  120. HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
  121. break;
  122. }
  123. }
  124. }
  125. void xen_smp_send_call_function_single_ipi(int cpu)
  126. {
  127. __xen_send_IPI_mask(cpumask_of(cpu),
  128. XEN_CALL_FUNCTION_SINGLE_VECTOR);
  129. }
  130. static inline int xen_map_vector(int vector)
  131. {
  132. int xen_vector;
  133. switch (vector) {
  134. case RESCHEDULE_VECTOR:
  135. xen_vector = XEN_RESCHEDULE_VECTOR;
  136. break;
  137. case CALL_FUNCTION_VECTOR:
  138. xen_vector = XEN_CALL_FUNCTION_VECTOR;
  139. break;
  140. case CALL_FUNCTION_SINGLE_VECTOR:
  141. xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
  142. break;
  143. case IRQ_WORK_VECTOR:
  144. xen_vector = XEN_IRQ_WORK_VECTOR;
  145. break;
  146. #ifdef CONFIG_X86_64
  147. case NMI_VECTOR:
  148. case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */
  149. xen_vector = XEN_NMI_VECTOR;
  150. break;
  151. #endif
  152. default:
  153. xen_vector = -1;
  154. printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
  155. vector);
  156. }
  157. return xen_vector;
  158. }
  159. void xen_send_IPI_mask(const struct cpumask *mask,
  160. int vector)
  161. {
  162. int xen_vector = xen_map_vector(vector);
  163. if (xen_vector >= 0)
  164. __xen_send_IPI_mask(mask, xen_vector);
  165. }
  166. void xen_send_IPI_all(int vector)
  167. {
  168. int xen_vector = xen_map_vector(vector);
  169. if (xen_vector >= 0)
  170. __xen_send_IPI_mask(cpu_online_mask, xen_vector);
  171. }
  172. void xen_send_IPI_self(int vector)
  173. {
  174. int xen_vector = xen_map_vector(vector);
  175. if (xen_vector >= 0)
  176. xen_send_IPI_one(smp_processor_id(), xen_vector);
  177. }
  178. void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
  179. int vector)
  180. {
  181. unsigned cpu;
  182. unsigned int this_cpu = smp_processor_id();
  183. int xen_vector = xen_map_vector(vector);
  184. if (!(num_online_cpus() > 1) || (xen_vector < 0))
  185. return;
  186. for_each_cpu_and(cpu, mask, cpu_online_mask) {
  187. if (this_cpu == cpu)
  188. continue;
  189. xen_send_IPI_one(cpu, xen_vector);
  190. }
  191. }
  192. void xen_send_IPI_allbutself(int vector)
  193. {
  194. xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
  195. }
  196. static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
  197. {
  198. irq_enter();
  199. generic_smp_call_function_interrupt();
  200. inc_irq_stat(irq_call_count);
  201. irq_exit();
  202. return IRQ_HANDLED;
  203. }
  204. static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
  205. {
  206. irq_enter();
  207. generic_smp_call_function_single_interrupt();
  208. inc_irq_stat(irq_call_count);
  209. irq_exit();
  210. return IRQ_HANDLED;
  211. }