machine_kexec.c 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216
  1. /*
  2. * machine_kexec.c for kexec
  3. * Created by <nschichan@corp.free.fr> on Thu Oct 12 15:15:06 2006
  4. *
  5. * This source code is licensed under the GNU General Public License,
  6. * Version 2. See the file COPYING for more details.
  7. */
  8. #include <linux/compiler.h>
  9. #include <linux/kexec.h>
  10. #include <linux/mm.h>
  11. #include <linux/delay.h>
  12. #include <asm/cacheflush.h>
  13. #include <asm/page.h>
  14. extern const unsigned char relocate_new_kernel[];
  15. extern const size_t relocate_new_kernel_size;
  16. extern unsigned long kexec_start_address;
  17. extern unsigned long kexec_indirection_page;
  18. static unsigned long reboot_code_buffer;
  19. #ifdef CONFIG_SMP
  20. static void (*relocated_kexec_smp_wait)(void *);
  21. atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
  22. void (*_crash_smp_send_stop)(void) = NULL;
  23. #endif
  24. int (*_machine_kexec_prepare)(struct kimage *) = NULL;
  25. void (*_machine_kexec_shutdown)(void) = NULL;
  26. void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
  27. static void kexec_image_info(const struct kimage *kimage)
  28. {
  29. unsigned long i;
  30. pr_debug("kexec kimage info:\n");
  31. pr_debug(" type: %d\n", kimage->type);
  32. pr_debug(" start: %lx\n", kimage->start);
  33. pr_debug(" head: %lx\n", kimage->head);
  34. pr_debug(" nr_segments: %lu\n", kimage->nr_segments);
  35. for (i = 0; i < kimage->nr_segments; i++) {
  36. pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
  37. i,
  38. kimage->segment[i].mem,
  39. kimage->segment[i].mem + kimage->segment[i].memsz,
  40. (unsigned long)kimage->segment[i].memsz,
  41. (unsigned long)kimage->segment[i].memsz / PAGE_SIZE);
  42. }
  43. }
  44. int
  45. machine_kexec_prepare(struct kimage *kimage)
  46. {
  47. #ifdef CONFIG_SMP
  48. if (!kexec_nonboot_cpu_func())
  49. return -EINVAL;
  50. #endif
  51. kexec_image_info(kimage);
  52. if (_machine_kexec_prepare)
  53. return _machine_kexec_prepare(kimage);
  54. return 0;
  55. }
  56. void
  57. machine_kexec_cleanup(struct kimage *kimage)
  58. {
  59. }
  60. #ifdef CONFIG_SMP
  61. static void kexec_shutdown_secondary(void *param)
  62. {
  63. int cpu = smp_processor_id();
  64. if (!cpu_online(cpu))
  65. return;
  66. /* We won't be sent IPIs any more. */
  67. set_cpu_online(cpu, false);
  68. local_irq_disable();
  69. while (!atomic_read(&kexec_ready_to_reboot))
  70. cpu_relax();
  71. kexec_reboot();
  72. /* NOTREACHED */
  73. }
  74. #endif
  75. void
  76. machine_shutdown(void)
  77. {
  78. if (_machine_kexec_shutdown)
  79. _machine_kexec_shutdown();
  80. #ifdef CONFIG_SMP
  81. smp_call_function(kexec_shutdown_secondary, NULL, 0);
  82. while (num_online_cpus() > 1) {
  83. cpu_relax();
  84. mdelay(1);
  85. }
  86. #endif
  87. }
  88. void
  89. machine_crash_shutdown(struct pt_regs *regs)
  90. {
  91. if (_machine_crash_shutdown)
  92. _machine_crash_shutdown(regs);
  93. else
  94. default_machine_crash_shutdown(regs);
  95. }
  96. #ifdef CONFIG_SMP
  97. void kexec_nonboot_cpu_jump(void)
  98. {
  99. local_flush_icache_range((unsigned long)relocated_kexec_smp_wait,
  100. reboot_code_buffer + relocate_new_kernel_size);
  101. relocated_kexec_smp_wait(NULL);
  102. }
  103. #endif
  104. void kexec_reboot(void)
  105. {
  106. void (*do_kexec)(void) __noreturn;
  107. #ifdef CONFIG_SMP
  108. if (smp_processor_id() > 0) {
  109. /*
  110. * Instead of cpu_relax() or wait, this is needed for kexec
  111. * smp reboot. Kdump usually doesn't require an smp new
  112. * kernel, but kexec may do.
  113. */
  114. kexec_nonboot_cpu();
  115. /* NOTREACHED */
  116. }
  117. #endif
  118. /*
  119. * Make sure we get correct instructions written by the
  120. * machine_kexec() CPU.
  121. */
  122. local_flush_icache_range(reboot_code_buffer,
  123. reboot_code_buffer + relocate_new_kernel_size);
  124. do_kexec = (void *)reboot_code_buffer;
  125. do_kexec();
  126. }
  127. void
  128. machine_kexec(struct kimage *image)
  129. {
  130. unsigned long entry;
  131. unsigned long *ptr;
  132. reboot_code_buffer =
  133. (unsigned long)page_address(image->control_code_page);
  134. kexec_start_address =
  135. (unsigned long) phys_to_virt(image->start);
  136. if (image->type == KEXEC_TYPE_DEFAULT) {
  137. kexec_indirection_page =
  138. (unsigned long) phys_to_virt(image->head & PAGE_MASK);
  139. } else {
  140. kexec_indirection_page = (unsigned long)&image->head;
  141. }
  142. memcpy((void*)reboot_code_buffer, relocate_new_kernel,
  143. relocate_new_kernel_size);
  144. /*
  145. * The generic kexec code builds a page list with physical
  146. * addresses. they are directly accessible through KSEG0 (or
  147. * CKSEG0 or XPHYS if on 64bit system), hence the
  148. * phys_to_virt() call.
  149. */
  150. for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
  151. ptr = (entry & IND_INDIRECTION) ?
  152. phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
  153. if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
  154. *ptr & IND_DESTINATION)
  155. *ptr = (unsigned long) phys_to_virt(*ptr);
  156. }
  157. /* Mark offline BEFORE disabling local irq. */
  158. set_cpu_online(smp_processor_id(), false);
  159. /*
  160. * we do not want to be bothered.
  161. */
  162. local_irq_disable();
  163. printk("Will call new kernel at %08lx\n", image->start);
  164. printk("Bye ...\n");
  165. /* Make reboot code buffer available to the boot CPU. */
  166. __flush_cache_all();
  167. #ifdef CONFIG_SMP
  168. /* All secondary cpus now may jump to kexec_wait cycle */
  169. relocated_kexec_smp_wait = reboot_code_buffer +
  170. (void *)(kexec_smp_wait - relocate_new_kernel);
  171. smp_wmb();
  172. atomic_set(&kexec_ready_to_reboot, 1);
  173. #endif
  174. kexec_reboot();
  175. }