ipi.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. #include <linux/cpumask.h>
  2. #include <linux/interrupt.h>
  3. #include <linux/mm.h>
  4. #include <linux/delay.h>
  5. #include <linux/spinlock.h>
  6. #include <linux/kernel_stat.h>
  7. #include <linux/mc146818rtc.h>
  8. #include <linux/cache.h>
  9. #include <linux/cpu.h>
  10. #include <linux/module.h>
  11. #include <asm/smp.h>
  12. #include <asm/mtrr.h>
  13. #include <asm/tlbflush.h>
  14. #include <asm/mmu_context.h>
  15. #include <asm/apic.h>
  16. #include <asm/proto.h>
  17. #include <asm/ipi.h>
  18. void default_send_IPI_single_phys(int cpu, int vector)
  19. {
  20. unsigned long flags;
  21. local_irq_save(flags);
  22. __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
  23. vector, APIC_DEST_PHYSICAL);
  24. local_irq_restore(flags);
  25. }
  26. void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
  27. {
  28. unsigned long query_cpu;
  29. unsigned long flags;
  30. /*
  31. * Hack. The clustered APIC addressing mode doesn't allow us to send
  32. * to an arbitrary mask, so I do a unicast to each CPU instead.
  33. * - mbligh
  34. */
  35. local_irq_save(flags);
  36. for_each_cpu(query_cpu, mask) {
  37. __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
  38. query_cpu), vector, APIC_DEST_PHYSICAL);
  39. }
  40. local_irq_restore(flags);
  41. }
  42. void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
  43. int vector)
  44. {
  45. unsigned int this_cpu = smp_processor_id();
  46. unsigned int query_cpu;
  47. unsigned long flags;
  48. /* See Hack comment above */
  49. local_irq_save(flags);
  50. for_each_cpu(query_cpu, mask) {
  51. if (query_cpu == this_cpu)
  52. continue;
  53. __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
  54. query_cpu), vector, APIC_DEST_PHYSICAL);
  55. }
  56. local_irq_restore(flags);
  57. }
  58. /*
  59. * Helper function for APICs which insist on cpumasks
  60. */
  61. void default_send_IPI_single(int cpu, int vector)
  62. {
  63. apic->send_IPI_mask(cpumask_of(cpu), vector);
  64. }
  65. #ifdef CONFIG_X86_32
  66. void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
  67. int vector)
  68. {
  69. unsigned long flags;
  70. unsigned int query_cpu;
  71. /*
  72. * Hack. The clustered APIC addressing mode doesn't allow us to send
  73. * to an arbitrary mask, so I do a unicasts to each CPU instead. This
  74. * should be modified to do 1 message per cluster ID - mbligh
  75. */
  76. local_irq_save(flags);
  77. for_each_cpu(query_cpu, mask)
  78. __default_send_IPI_dest_field(
  79. early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
  80. vector, apic->dest_logical);
  81. local_irq_restore(flags);
  82. }
  83. void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
  84. int vector)
  85. {
  86. unsigned long flags;
  87. unsigned int query_cpu;
  88. unsigned int this_cpu = smp_processor_id();
  89. /* See Hack comment above */
  90. local_irq_save(flags);
  91. for_each_cpu(query_cpu, mask) {
  92. if (query_cpu == this_cpu)
  93. continue;
  94. __default_send_IPI_dest_field(
  95. early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
  96. vector, apic->dest_logical);
  97. }
  98. local_irq_restore(flags);
  99. }
  100. /*
  101. * This is only used on smaller machines.
  102. */
  103. void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
  104. {
  105. unsigned long mask = cpumask_bits(cpumask)[0];
  106. unsigned long flags;
  107. if (!mask)
  108. return;
  109. local_irq_save(flags);
  110. WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
  111. __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
  112. local_irq_restore(flags);
  113. }
  114. void default_send_IPI_allbutself(int vector)
  115. {
  116. /*
  117. * if there are no other CPUs in the system then we get an APIC send
  118. * error if we try to broadcast, thus avoid sending IPIs in this case.
  119. */
  120. if (!(num_online_cpus() > 1))
  121. return;
  122. __default_local_send_IPI_allbutself(vector);
  123. }
  124. void default_send_IPI_all(int vector)
  125. {
  126. __default_local_send_IPI_all(vector);
  127. }
  128. void default_send_IPI_self(int vector)
  129. {
  130. __default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical);
  131. }
  132. /* must come after the send_IPI functions above for inlining */
  133. static int convert_apicid_to_cpu(int apic_id)
  134. {
  135. int i;
  136. for_each_possible_cpu(i) {
  137. if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
  138. return i;
  139. }
  140. return -1;
  141. }
  142. int safe_smp_processor_id(void)
  143. {
  144. int apicid, cpuid;
  145. if (!cpu_has_apic)
  146. return 0;
  147. apicid = hard_smp_processor_id();
  148. if (apicid == BAD_APICID)
  149. return 0;
  150. cpuid = convert_apicid_to_cpu(apicid);
  151. return cpuid >= 0 ? cpuid : 0;
  152. }
  153. #endif