smp.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/module.h>
  3. #include <linux/init.h>
  4. #include <linux/kernel.h>
  5. #include <linux/mm.h>
  6. #include <linux/sched.h>
  7. #include <linux/kernel_stat.h>
  8. #include <linux/notifier.h>
  9. #include <linux/cpu.h>
  10. #include <linux/percpu.h>
  11. #include <linux/delay.h>
  12. #include <linux/err.h>
  13. #include <linux/irq.h>
  14. #include <linux/irqdomain.h>
  15. #include <linux/of.h>
  16. #include <linux/sched/task_stack.h>
  17. #include <linux/sched/mm.h>
  18. #include <asm/irq.h>
  19. #include <asm/traps.h>
  20. #include <asm/sections.h>
  21. #include <asm/mmu_context.h>
  22. #include <asm/pgalloc.h>
  23. struct ipi_data_struct {
  24. unsigned long bits ____cacheline_aligned;
  25. };
  26. static DEFINE_PER_CPU(struct ipi_data_struct, ipi_data);
  27. enum ipi_message_type {
  28. IPI_EMPTY,
  29. IPI_RESCHEDULE,
  30. IPI_CALL_FUNC,
  31. IPI_MAX
  32. };
  33. static irqreturn_t handle_ipi(int irq, void *dev)
  34. {
  35. while (true) {
  36. unsigned long ops;
  37. ops = xchg(&this_cpu_ptr(&ipi_data)->bits, 0);
  38. if (ops == 0)
  39. return IRQ_HANDLED;
  40. if (ops & (1 << IPI_RESCHEDULE))
  41. scheduler_ipi();
  42. if (ops & (1 << IPI_CALL_FUNC))
  43. generic_smp_call_function_interrupt();
  44. BUG_ON((ops >> IPI_MAX) != 0);
  45. }
  46. return IRQ_HANDLED;
  47. }
  48. static void (*send_arch_ipi)(const struct cpumask *mask);
  49. static int ipi_irq;
  50. void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq)
  51. {
  52. if (send_arch_ipi)
  53. return;
  54. send_arch_ipi = func;
  55. ipi_irq = irq;
  56. }
  57. static void
  58. send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
  59. {
  60. int i;
  61. for_each_cpu(i, to_whom)
  62. set_bit(operation, &per_cpu_ptr(&ipi_data, i)->bits);
  63. smp_mb();
  64. send_arch_ipi(to_whom);
  65. }
  66. void arch_send_call_function_ipi_mask(struct cpumask *mask)
  67. {
  68. send_ipi_message(mask, IPI_CALL_FUNC);
  69. }
  70. void arch_send_call_function_single_ipi(int cpu)
  71. {
  72. send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
  73. }
  74. static void ipi_stop(void *unused)
  75. {
  76. while (1);
  77. }
  78. void smp_send_stop(void)
  79. {
  80. on_each_cpu(ipi_stop, NULL, 1);
  81. }
  82. void smp_send_reschedule(int cpu)
  83. {
  84. send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
  85. }
  86. void __init smp_prepare_boot_cpu(void)
  87. {
  88. }
  89. void __init smp_prepare_cpus(unsigned int max_cpus)
  90. {
  91. }
  92. static void __init enable_smp_ipi(void)
  93. {
  94. enable_percpu_irq(ipi_irq, 0);
  95. }
  96. static int ipi_dummy_dev;
  97. void __init setup_smp_ipi(void)
  98. {
  99. int rc;
  100. if (ipi_irq == 0)
  101. panic("%s IRQ mapping failed\n", __func__);
  102. rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt",
  103. &ipi_dummy_dev);
  104. if (rc)
  105. panic("%s IRQ request failed\n", __func__);
  106. enable_smp_ipi();
  107. }
  108. void __init setup_smp(void)
  109. {
  110. struct device_node *node = NULL;
  111. int cpu;
  112. while ((node = of_find_node_by_type(node, "cpu"))) {
  113. if (!of_device_is_available(node))
  114. continue;
  115. if (of_property_read_u32(node, "reg", &cpu))
  116. continue;
  117. if (cpu >= NR_CPUS)
  118. continue;
  119. set_cpu_possible(cpu, true);
  120. set_cpu_present(cpu, true);
  121. }
  122. }
  123. extern void _start_smp_secondary(void);
  124. volatile unsigned int secondary_hint;
  125. volatile unsigned int secondary_ccr;
  126. volatile unsigned int secondary_stack;
  127. int __cpu_up(unsigned int cpu, struct task_struct *tidle)
  128. {
  129. unsigned int tmp;
  130. secondary_stack = (unsigned int)tidle->stack + THREAD_SIZE;
  131. secondary_hint = mfcr("cr31");
  132. secondary_ccr = mfcr("cr18");
  133. /*
  134. * Because other CPUs are in reset status, we must flush data
  135. * from cache to out and secondary CPUs use them in
  136. * csky_start_secondary(void)
  137. */
  138. mtcr("cr17", 0x22);
  139. /* Enable cpu in SMP reset ctrl reg */
  140. tmp = mfcr("cr<29, 0>");
  141. tmp |= 1 << cpu;
  142. mtcr("cr<29, 0>", tmp);
  143. /* Wait for the cpu online */
  144. while (!cpu_online(cpu));
  145. secondary_stack = 0;
  146. return 0;
  147. }
  148. void __init smp_cpus_done(unsigned int max_cpus)
  149. {
  150. }
  151. int setup_profiling_timer(unsigned int multiplier)
  152. {
  153. return -EINVAL;
  154. }
  155. void csky_start_secondary(void)
  156. {
  157. struct mm_struct *mm = &init_mm;
  158. unsigned int cpu = smp_processor_id();
  159. mtcr("cr31", secondary_hint);
  160. mtcr("cr18", secondary_ccr);
  161. mtcr("vbr", vec_base);
  162. flush_tlb_all();
  163. write_mmu_pagemask(0);
  164. TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir);
  165. TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir);
  166. asid_cache(smp_processor_id()) = ASID_FIRST_VERSION;
  167. #ifdef CONFIG_CPU_HAS_FPU
  168. init_fpu();
  169. #endif
  170. enable_smp_ipi();
  171. mmget(mm);
  172. mmgrab(mm);
  173. current->active_mm = mm;
  174. cpumask_set_cpu(cpu, mm_cpumask(mm));
  175. notify_cpu_starting(cpu);
  176. set_cpu_online(cpu, true);
  177. pr_info("CPU%u Online: %s...\n", cpu, __func__);
  178. local_irq_enable();
  179. preempt_disable();
  180. cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
  181. }