machine_kexec.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278
  1. /*
  2. * Copyright IBM Corp. 2005, 2011
  3. *
  4. * Author(s): Rolf Adelsberger,
  5. * Heiko Carstens <heiko.carstens@de.ibm.com>
  6. * Michael Holzheu <holzheu@linux.vnet.ibm.com>
  7. */
  8. #include <linux/device.h>
  9. #include <linux/mm.h>
  10. #include <linux/kexec.h>
  11. #include <linux/delay.h>
  12. #include <linux/reboot.h>
  13. #include <linux/ftrace.h>
  14. #include <linux/debug_locks.h>
  15. #include <linux/suspend.h>
  16. #include <asm/cio.h>
  17. #include <asm/setup.h>
  18. #include <asm/pgtable.h>
  19. #include <asm/pgalloc.h>
  20. #include <asm/smp.h>
  21. #include <asm/reset.h>
  22. #include <asm/ipl.h>
  23. #include <asm/diag.h>
  24. #include <asm/elf.h>
  25. #include <asm/asm-offsets.h>
  26. #include <asm/os_info.h>
  27. #include <asm/switch_to.h>
  28. typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
  29. extern const unsigned char relocate_kernel[];
  30. extern const unsigned long long relocate_kernel_len;
  31. #ifdef CONFIG_CRASH_DUMP
  32. /*
  33. * Create ELF notes for one CPU
  34. */
  35. static void add_elf_notes(int cpu)
  36. {
  37. struct save_area *sa = (void *) 4608 + store_prefix();
  38. void *ptr;
  39. memcpy((void *) (4608UL + sa->pref_reg), sa, sizeof(*sa));
  40. ptr = (u64 *) per_cpu_ptr(crash_notes, cpu);
  41. ptr = fill_cpu_elf_notes(ptr, sa, NULL);
  42. memset(ptr, 0, sizeof(struct elf_note));
  43. }
  44. /*
  45. * Initialize CPU ELF notes
  46. */
  47. static void setup_regs(void)
  48. {
  49. unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE;
  50. struct _lowcore *lc;
  51. int cpu, this_cpu;
  52. /* Get lowcore pointer from store status of this CPU (absolute zero) */
  53. lc = (struct _lowcore *)(unsigned long)S390_lowcore.prefixreg_save_area;
  54. this_cpu = smp_find_processor_id(stap());
  55. add_elf_notes(this_cpu);
  56. for_each_online_cpu(cpu) {
  57. if (cpu == this_cpu)
  58. continue;
  59. if (smp_store_status(cpu))
  60. continue;
  61. add_elf_notes(cpu);
  62. }
  63. if (MACHINE_HAS_VX)
  64. save_vx_regs_safe((void *) lc->vector_save_area_addr);
  65. /* Copy dump CPU store status info to absolute zero */
  66. memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area));
  67. }
  68. /*
  69. * PM notifier callback for kdump
  70. */
  71. static int machine_kdump_pm_cb(struct notifier_block *nb, unsigned long action,
  72. void *ptr)
  73. {
  74. switch (action) {
  75. case PM_SUSPEND_PREPARE:
  76. case PM_HIBERNATION_PREPARE:
  77. if (crashk_res.start)
  78. crash_map_reserved_pages();
  79. break;
  80. case PM_POST_SUSPEND:
  81. case PM_POST_HIBERNATION:
  82. if (crashk_res.start)
  83. crash_unmap_reserved_pages();
  84. break;
  85. default:
  86. return NOTIFY_DONE;
  87. }
  88. return NOTIFY_OK;
  89. }
  90. static int __init machine_kdump_pm_init(void)
  91. {
  92. pm_notifier(machine_kdump_pm_cb, 0);
  93. return 0;
  94. }
  95. arch_initcall(machine_kdump_pm_init);
  96. #endif
  97. /*
  98. * Start kdump: We expect here that a store status has been done on our CPU
  99. */
  100. static void __do_machine_kdump(void *image)
  101. {
  102. #ifdef CONFIG_CRASH_DUMP
  103. int (*start_kdump)(int) = (void *)((struct kimage *) image)->start;
  104. setup_regs();
  105. __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
  106. start_kdump(1);
  107. #endif
  108. }
  109. /*
  110. * Check if kdump checksums are valid: We call purgatory with parameter "0"
  111. */
  112. static int kdump_csum_valid(struct kimage *image)
  113. {
  114. #ifdef CONFIG_CRASH_DUMP
  115. int (*start_kdump)(int) = (void *)image->start;
  116. int rc;
  117. __arch_local_irq_stnsm(0xfb); /* disable DAT */
  118. rc = start_kdump(0);
  119. __arch_local_irq_stosm(0x04); /* enable DAT */
  120. return rc ? 0 : -EINVAL;
  121. #else
  122. return -EINVAL;
  123. #endif
  124. }
  125. /*
  126. * Map or unmap crashkernel memory
  127. */
  128. static void crash_map_pages(int enable)
  129. {
  130. unsigned long size = resource_size(&crashk_res);
  131. BUG_ON(crashk_res.start % KEXEC_CRASH_MEM_ALIGN ||
  132. size % KEXEC_CRASH_MEM_ALIGN);
  133. if (enable)
  134. vmem_add_mapping(crashk_res.start, size);
  135. else {
  136. vmem_remove_mapping(crashk_res.start, size);
  137. if (size)
  138. os_info_crashkernel_add(crashk_res.start, size);
  139. else
  140. os_info_crashkernel_add(0, 0);
  141. }
  142. }
  143. /*
  144. * Map crashkernel memory
  145. */
  146. void crash_map_reserved_pages(void)
  147. {
  148. crash_map_pages(1);
  149. }
  150. /*
  151. * Unmap crashkernel memory
  152. */
  153. void crash_unmap_reserved_pages(void)
  154. {
  155. crash_map_pages(0);
  156. }
  157. /*
  158. * Give back memory to hypervisor before new kdump is loaded
  159. */
  160. static int machine_kexec_prepare_kdump(void)
  161. {
  162. #ifdef CONFIG_CRASH_DUMP
  163. if (MACHINE_IS_VM)
  164. diag10_range(PFN_DOWN(crashk_res.start),
  165. PFN_DOWN(crashk_res.end - crashk_res.start + 1));
  166. return 0;
  167. #else
  168. return -EINVAL;
  169. #endif
  170. }
  171. int machine_kexec_prepare(struct kimage *image)
  172. {
  173. void *reboot_code_buffer;
  174. /* Can't replace kernel image since it is read-only. */
  175. if (ipl_flags & IPL_NSS_VALID)
  176. return -EOPNOTSUPP;
  177. if (image->type == KEXEC_TYPE_CRASH)
  178. return machine_kexec_prepare_kdump();
  179. /* We don't support anything but the default image type for now. */
  180. if (image->type != KEXEC_TYPE_DEFAULT)
  181. return -EINVAL;
  182. /* Get the destination where the assembler code should be copied to.*/
  183. reboot_code_buffer = (void *) page_to_phys(image->control_code_page);
  184. /* Then copy it */
  185. memcpy(reboot_code_buffer, relocate_kernel, relocate_kernel_len);
  186. return 0;
  187. }
  188. void machine_kexec_cleanup(struct kimage *image)
  189. {
  190. }
  191. void arch_crash_save_vmcoreinfo(void)
  192. {
  193. VMCOREINFO_SYMBOL(lowcore_ptr);
  194. VMCOREINFO_SYMBOL(high_memory);
  195. VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
  196. }
  197. void machine_shutdown(void)
  198. {
  199. }
  200. void machine_crash_shutdown(struct pt_regs *regs)
  201. {
  202. }
  203. /*
  204. * Do normal kexec
  205. */
  206. static void __do_machine_kexec(void *data)
  207. {
  208. relocate_kernel_t data_mover;
  209. struct kimage *image = data;
  210. data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page);
  211. /* Call the moving routine */
  212. (*data_mover)(&image->head, image->start);
  213. }
  214. /*
  215. * Reset system and call either kdump or normal kexec
  216. */
  217. static void __machine_kexec(void *data)
  218. {
  219. struct kimage *image = data;
  220. __arch_local_irq_stosm(0x04); /* enable DAT */
  221. pfault_fini();
  222. tracing_off();
  223. debug_locks_off();
  224. if (image->type == KEXEC_TYPE_CRASH) {
  225. lgr_info_log();
  226. s390_reset_system(__do_machine_kdump, data);
  227. } else {
  228. s390_reset_system(__do_machine_kexec, data);
  229. }
  230. disabled_wait((unsigned long) __builtin_return_address(0));
  231. }
  232. /*
  233. * Do either kdump or normal kexec. In case of kdump we first ask
  234. * purgatory, if kdump checksums are valid.
  235. */
  236. void machine_kexec(struct kimage *image)
  237. {
  238. if (image->type == KEXEC_TYPE_CRASH && !kdump_csum_valid(image))
  239. return;
  240. tracer_disable();
  241. smp_send_stop();
  242. smp_call_ipl_cpu(__machine_kexec, image);
  243. }