smp.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. /*
  2. * SMP initialisation and IPI support
  3. * Based on arch/arm64/kernel/smp.c
  4. *
  5. * Copyright (C) 2012 ARM Ltd.
  6. * Copyright (C) 2015 Regents of the University of California
  7. * Copyright (C) 2017 SiFive
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #include <linux/interrupt.h>
  22. #include <linux/smp.h>
  23. #include <linux/sched.h>
  24. #include <linux/seq_file.h>
  25. #include <asm/sbi.h>
  26. #include <asm/tlbflush.h>
  27. #include <asm/cacheflush.h>
  28. enum ipi_message_type {
  29. IPI_RESCHEDULE,
  30. IPI_CALL_FUNC,
  31. IPI_MAX
  32. };
  33. /* A collection of single bit ipi messages. */
  34. static struct {
  35. unsigned long stats[IPI_MAX] ____cacheline_aligned;
  36. unsigned long bits ____cacheline_aligned;
  37. } ipi_data[NR_CPUS] __cacheline_aligned;
  38. int riscv_hartid_to_cpuid(int hartid)
  39. {
  40. int i = -1;
  41. for (i = 0; i < NR_CPUS; i++)
  42. if (cpuid_to_hartid_map(i) == hartid)
  43. return i;
  44. pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
  45. BUG();
  46. return i;
  47. }
  48. void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
  49. {
  50. int cpu;
  51. for_each_cpu(cpu, in)
  52. cpumask_set_cpu(cpuid_to_hartid_map(cpu), out);
  53. }
  54. /* Unsupported */
  55. int setup_profiling_timer(unsigned int multiplier)
  56. {
  57. return -EINVAL;
  58. }
  59. void riscv_software_interrupt(void)
  60. {
  61. unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
  62. unsigned long *stats = ipi_data[smp_processor_id()].stats;
  63. /* Clear pending IPI */
  64. csr_clear(sip, SIE_SSIE);
  65. while (true) {
  66. unsigned long ops;
  67. /* Order bit clearing and data access. */
  68. mb();
  69. ops = xchg(pending_ipis, 0);
  70. if (ops == 0)
  71. return;
  72. if (ops & (1 << IPI_RESCHEDULE)) {
  73. stats[IPI_RESCHEDULE]++;
  74. scheduler_ipi();
  75. }
  76. if (ops & (1 << IPI_CALL_FUNC)) {
  77. stats[IPI_CALL_FUNC]++;
  78. generic_smp_call_function_interrupt();
  79. }
  80. BUG_ON((ops >> IPI_MAX) != 0);
  81. /* Order data access and bit testing. */
  82. mb();
  83. }
  84. }
  85. static void
  86. send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
  87. {
  88. int cpuid, hartid;
  89. struct cpumask hartid_mask;
  90. cpumask_clear(&hartid_mask);
  91. mb();
  92. for_each_cpu(cpuid, to_whom) {
  93. set_bit(operation, &ipi_data[cpuid].bits);
  94. hartid = cpuid_to_hartid_map(cpuid);
  95. cpumask_set_cpu(hartid, &hartid_mask);
  96. }
  97. mb();
  98. sbi_send_ipi(cpumask_bits(&hartid_mask));
  99. }
  100. static const char * const ipi_names[] = {
  101. [IPI_RESCHEDULE] = "Rescheduling interrupts",
  102. [IPI_CALL_FUNC] = "Function call interrupts",
  103. };
  104. void show_ipi_stats(struct seq_file *p, int prec)
  105. {
  106. unsigned int cpu, i;
  107. for (i = 0; i < IPI_MAX; i++) {
  108. seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
  109. prec >= 4 ? " " : "");
  110. for_each_online_cpu(cpu)
  111. seq_printf(p, "%10lu ", ipi_data[cpu].stats[i]);
  112. seq_printf(p, " %s\n", ipi_names[i]);
  113. }
  114. }
  115. void arch_send_call_function_ipi_mask(struct cpumask *mask)
  116. {
  117. send_ipi_message(mask, IPI_CALL_FUNC);
  118. }
  119. void arch_send_call_function_single_ipi(int cpu)
  120. {
  121. send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
  122. }
  123. static void ipi_stop(void *unused)
  124. {
  125. while (1)
  126. wait_for_interrupt();
  127. }
  128. void smp_send_stop(void)
  129. {
  130. on_each_cpu(ipi_stop, NULL, 1);
  131. }
  132. void smp_send_reschedule(int cpu)
  133. {
  134. send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
  135. }
  136. /*
  137. * Performs an icache flush for the given MM context. RISC-V has no direct
  138. * mechanism for instruction cache shoot downs, so instead we send an IPI that
  139. * informs the remote harts they need to flush their local instruction caches.
  140. * To avoid pathologically slow behavior in a common case (a bunch of
  141. * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
  142. * IPIs for harts that are not currently executing a MM context and instead
  143. * schedule a deferred local instruction cache flush to be performed before
  144. * execution resumes on each hart.
  145. */
  146. void flush_icache_mm(struct mm_struct *mm, bool local)
  147. {
  148. unsigned int cpu;
  149. cpumask_t others, hmask, *mask;
  150. preempt_disable();
  151. /* Mark every hart's icache as needing a flush for this MM. */
  152. mask = &mm->context.icache_stale_mask;
  153. cpumask_setall(mask);
  154. /* Flush this hart's I$ now, and mark it as flushed. */
  155. cpu = smp_processor_id();
  156. cpumask_clear_cpu(cpu, mask);
  157. local_flush_icache_all();
  158. /*
  159. * Flush the I$ of other harts concurrently executing, and mark them as
  160. * flushed.
  161. */
  162. cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
  163. local |= cpumask_empty(&others);
  164. if (mm != current->active_mm || !local) {
  165. cpumask_clear(&hmask);
  166. riscv_cpuid_to_hartid_mask(&others, &hmask);
  167. sbi_remote_fence_i(hmask.bits);
  168. } else {
  169. /*
  170. * It's assumed that at least one strongly ordered operation is
  171. * performed on this hart between setting a hart's cpumask bit
  172. * and scheduling this MM context on that hart. Sending an SBI
  173. * remote message will do this, but in the case where no
  174. * messages are sent we still need to order this hart's writes
  175. * with flush_icache_deferred().
  176. */
  177. smp_mb();
  178. }
  179. preempt_enable();
  180. }