crash.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702
  1. /*
  2. * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
  3. *
  4. * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
  5. *
  6. * Copyright (C) IBM Corporation, 2004. All rights reserved.
  7. * Copyright (C) Red Hat Inc., 2014. All rights reserved.
  8. * Authors:
  9. * Vivek Goyal <vgoyal@redhat.com>
  10. *
  11. */
  12. #define pr_fmt(fmt) "kexec: " fmt
  13. #include <linux/types.h>
  14. #include <linux/kernel.h>
  15. #include <linux/smp.h>
  16. #include <linux/reboot.h>
  17. #include <linux/kexec.h>
  18. #include <linux/delay.h>
  19. #include <linux/elf.h>
  20. #include <linux/elfcore.h>
  21. #include <linux/module.h>
  22. #include <linux/slab.h>
  23. #include <linux/vmalloc.h>
  24. #include <asm/processor.h>
  25. #include <asm/hardirq.h>
  26. #include <asm/nmi.h>
  27. #include <asm/hw_irq.h>
  28. #include <asm/apic.h>
  29. #include <asm/io_apic.h>
  30. #include <asm/hpet.h>
  31. #include <linux/kdebug.h>
  32. #include <asm/cpu.h>
  33. #include <asm/reboot.h>
  34. #include <asm/virtext.h>
  35. /* Alignment required for elf header segment */
  36. #define ELF_CORE_HEADER_ALIGN 4096
  37. /* This primarily represents number of split ranges due to exclusion */
  38. #define CRASH_MAX_RANGES 16
  39. struct crash_mem_range {
  40. u64 start, end;
  41. };
  42. struct crash_mem {
  43. unsigned int nr_ranges;
  44. struct crash_mem_range ranges[CRASH_MAX_RANGES];
  45. };
  46. /* Misc data about ram ranges needed to prepare elf headers */
  47. struct crash_elf_data {
  48. struct kimage *image;
  49. /*
  50. * Total number of ram ranges we have after various adjustments for
  51. * GART, crash reserved region etc.
  52. */
  53. unsigned int max_nr_ranges;
  54. unsigned long gart_start, gart_end;
  55. /* Pointer to elf header */
  56. void *ehdr;
  57. /* Pointer to next phdr */
  58. void *bufp;
  59. struct crash_mem mem;
  60. };
  61. /* Used while preparing memory map entries for second kernel */
  62. struct crash_memmap_data {
  63. struct boot_params *params;
  64. /* Type of memory */
  65. unsigned int type;
  66. };
  67. int in_crash_kexec;
  68. /*
  69. * This is used to VMCLEAR all VMCSs loaded on the
  70. * processor. And when loading kvm_intel module, the
  71. * callback function pointer will be assigned.
  72. *
  73. * protected by rcu.
  74. */
  75. crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL;
  76. EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
  77. unsigned long crash_zero_bytes;
  78. static inline void cpu_crash_vmclear_loaded_vmcss(void)
  79. {
  80. crash_vmclear_fn *do_vmclear_operation = NULL;
  81. rcu_read_lock();
  82. do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
  83. if (do_vmclear_operation)
  84. do_vmclear_operation();
  85. rcu_read_unlock();
  86. }
  87. #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
  88. static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
  89. {
  90. #ifdef CONFIG_X86_32
  91. struct pt_regs fixed_regs;
  92. if (!user_mode(regs)) {
  93. crash_fixup_ss_esp(&fixed_regs, regs);
  94. regs = &fixed_regs;
  95. }
  96. #endif
  97. crash_save_cpu(regs, cpu);
  98. /*
  99. * VMCLEAR VMCSs loaded on all cpus if needed.
  100. */
  101. cpu_crash_vmclear_loaded_vmcss();
  102. /* Disable VMX or SVM if needed.
  103. *
  104. * We need to disable virtualization on all CPUs.
  105. * Having VMX or SVM enabled on any CPU may break rebooting
  106. * after the kdump kernel has finished its task.
  107. */
  108. cpu_emergency_vmxoff();
  109. cpu_emergency_svm_disable();
  110. disable_local_APIC();
  111. }
  112. static void kdump_nmi_shootdown_cpus(void)
  113. {
  114. in_crash_kexec = 1;
  115. nmi_shootdown_cpus(kdump_nmi_callback);
  116. disable_local_APIC();
  117. }
  118. #else
  119. static void kdump_nmi_shootdown_cpus(void)
  120. {
  121. /* There are no cpus to shootdown */
  122. }
  123. #endif
  124. void native_machine_crash_shutdown(struct pt_regs *regs)
  125. {
  126. /* This function is only called after the system
  127. * has panicked or is otherwise in a critical state.
  128. * The minimum amount of code to allow a kexec'd kernel
  129. * to run successfully needs to happen here.
  130. *
  131. * In practice this means shooting down the other cpus in
  132. * an SMP system.
  133. */
  134. /* The kernel is broken so disable interrupts */
  135. local_irq_disable();
  136. kdump_nmi_shootdown_cpus();
  137. /*
  138. * VMCLEAR VMCSs loaded on this cpu if needed.
  139. */
  140. cpu_crash_vmclear_loaded_vmcss();
  141. /* Booting kdump kernel with VMX or SVM enabled won't work,
  142. * because (among other limitations) we can't disable paging
  143. * with the virt flags.
  144. */
  145. cpu_emergency_vmxoff();
  146. cpu_emergency_svm_disable();
  147. #ifdef CONFIG_X86_IO_APIC
  148. /* Prevent crash_kexec() from deadlocking on ioapic_lock. */
  149. ioapic_zap_locks();
  150. disable_IO_APIC();
  151. #endif
  152. lapic_shutdown();
  153. #ifdef CONFIG_HPET_TIMER
  154. hpet_disable();
  155. #endif
  156. crash_save_cpu(regs, safe_smp_processor_id());
  157. }
  158. #ifdef CONFIG_KEXEC_FILE
  159. static int get_nr_ram_ranges_callback(unsigned long start_pfn,
  160. unsigned long nr_pfn, void *arg)
  161. {
  162. int *nr_ranges = arg;
  163. (*nr_ranges)++;
  164. return 0;
  165. }
  166. static int get_gart_ranges_callback(u64 start, u64 end, void *arg)
  167. {
  168. struct crash_elf_data *ced = arg;
  169. ced->gart_start = start;
  170. ced->gart_end = end;
  171. /* Not expecting more than 1 gart aperture */
  172. return 1;
  173. }
  174. /* Gather all the required information to prepare elf headers for ram regions */
  175. static void fill_up_crash_elf_data(struct crash_elf_data *ced,
  176. struct kimage *image)
  177. {
  178. unsigned int nr_ranges = 0;
  179. ced->image = image;
  180. walk_system_ram_range(0, -1, &nr_ranges,
  181. get_nr_ram_ranges_callback);
  182. ced->max_nr_ranges = nr_ranges;
  183. /*
  184. * We don't create ELF headers for GART aperture as an attempt
  185. * to dump this memory in second kernel leads to hang/crash.
  186. * If gart aperture is present, one needs to exclude that region
  187. * and that could lead to need of extra phdr.
  188. */
  189. walk_iomem_res("GART", IORESOURCE_MEM, 0, -1,
  190. ced, get_gart_ranges_callback);
  191. /*
  192. * If we have gart region, excluding that could potentially split
  193. * a memory range, resulting in extra header. Account for that.
  194. */
  195. if (ced->gart_end)
  196. ced->max_nr_ranges++;
  197. /* Exclusion of crash region could split memory ranges */
  198. ced->max_nr_ranges++;
  199. /* If crashk_low_res is not 0, another range split possible */
  200. if (crashk_low_res.end)
  201. ced->max_nr_ranges++;
  202. }
  203. static int exclude_mem_range(struct crash_mem *mem,
  204. unsigned long long mstart, unsigned long long mend)
  205. {
  206. int i, j;
  207. unsigned long long start, end;
  208. struct crash_mem_range temp_range = {0, 0};
  209. for (i = 0; i < mem->nr_ranges; i++) {
  210. start = mem->ranges[i].start;
  211. end = mem->ranges[i].end;
  212. if (mstart > end || mend < start)
  213. continue;
  214. /* Truncate any area outside of range */
  215. if (mstart < start)
  216. mstart = start;
  217. if (mend > end)
  218. mend = end;
  219. /* Found completely overlapping range */
  220. if (mstart == start && mend == end) {
  221. mem->ranges[i].start = 0;
  222. mem->ranges[i].end = 0;
  223. if (i < mem->nr_ranges - 1) {
  224. /* Shift rest of the ranges to left */
  225. for (j = i; j < mem->nr_ranges - 1; j++) {
  226. mem->ranges[j].start =
  227. mem->ranges[j+1].start;
  228. mem->ranges[j].end =
  229. mem->ranges[j+1].end;
  230. }
  231. }
  232. mem->nr_ranges--;
  233. return 0;
  234. }
  235. if (mstart > start && mend < end) {
  236. /* Split original range */
  237. mem->ranges[i].end = mstart - 1;
  238. temp_range.start = mend + 1;
  239. temp_range.end = end;
  240. } else if (mstart != start)
  241. mem->ranges[i].end = mstart - 1;
  242. else
  243. mem->ranges[i].start = mend + 1;
  244. break;
  245. }
  246. /* If a split happend, add the split to array */
  247. if (!temp_range.end)
  248. return 0;
  249. /* Split happened */
  250. if (i == CRASH_MAX_RANGES - 1) {
  251. pr_err("Too many crash ranges after split\n");
  252. return -ENOMEM;
  253. }
  254. /* Location where new range should go */
  255. j = i + 1;
  256. if (j < mem->nr_ranges) {
  257. /* Move over all ranges one slot towards the end */
  258. for (i = mem->nr_ranges - 1; i >= j; i--)
  259. mem->ranges[i + 1] = mem->ranges[i];
  260. }
  261. mem->ranges[j].start = temp_range.start;
  262. mem->ranges[j].end = temp_range.end;
  263. mem->nr_ranges++;
  264. return 0;
  265. }
  266. /*
  267. * Look for any unwanted ranges between mstart, mend and remove them. This
  268. * might lead to split and split ranges are put in ced->mem.ranges[] array
  269. */
  270. static int elf_header_exclude_ranges(struct crash_elf_data *ced,
  271. unsigned long long mstart, unsigned long long mend)
  272. {
  273. struct crash_mem *cmem = &ced->mem;
  274. int ret = 0;
  275. memset(cmem->ranges, 0, sizeof(cmem->ranges));
  276. cmem->ranges[0].start = mstart;
  277. cmem->ranges[0].end = mend;
  278. cmem->nr_ranges = 1;
  279. /* Exclude crashkernel region */
  280. ret = exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
  281. if (ret)
  282. return ret;
  283. if (crashk_low_res.end) {
  284. ret = exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end);
  285. if (ret)
  286. return ret;
  287. }
  288. /* Exclude GART region */
  289. if (ced->gart_end) {
  290. ret = exclude_mem_range(cmem, ced->gart_start, ced->gart_end);
  291. if (ret)
  292. return ret;
  293. }
  294. return ret;
  295. }
  296. static int prepare_elf64_ram_headers_callback(u64 start, u64 end, void *arg)
  297. {
  298. struct crash_elf_data *ced = arg;
  299. Elf64_Ehdr *ehdr;
  300. Elf64_Phdr *phdr;
  301. unsigned long mstart, mend;
  302. struct kimage *image = ced->image;
  303. struct crash_mem *cmem;
  304. int ret, i;
  305. ehdr = ced->ehdr;
  306. /* Exclude unwanted mem ranges */
  307. ret = elf_header_exclude_ranges(ced, start, end);
  308. if (ret)
  309. return ret;
  310. /* Go through all the ranges in ced->mem.ranges[] and prepare phdr */
  311. cmem = &ced->mem;
  312. for (i = 0; i < cmem->nr_ranges; i++) {
  313. mstart = cmem->ranges[i].start;
  314. mend = cmem->ranges[i].end;
  315. phdr = ced->bufp;
  316. ced->bufp += sizeof(Elf64_Phdr);
  317. phdr->p_type = PT_LOAD;
  318. phdr->p_flags = PF_R|PF_W|PF_X;
  319. phdr->p_offset = mstart;
  320. /*
  321. * If a range matches backup region, adjust offset to backup
  322. * segment.
  323. */
  324. if (mstart == image->arch.backup_src_start &&
  325. (mend - mstart + 1) == image->arch.backup_src_sz)
  326. phdr->p_offset = image->arch.backup_load_addr;
  327. phdr->p_paddr = mstart;
  328. phdr->p_vaddr = (unsigned long long) __va(mstart);
  329. phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
  330. phdr->p_align = 0;
  331. ehdr->e_phnum++;
  332. pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
  333. phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
  334. ehdr->e_phnum, phdr->p_offset);
  335. }
  336. return ret;
  337. }
  338. static int prepare_elf64_headers(struct crash_elf_data *ced,
  339. void **addr, unsigned long *sz)
  340. {
  341. Elf64_Ehdr *ehdr;
  342. Elf64_Phdr *phdr;
  343. unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
  344. unsigned char *buf, *bufp;
  345. unsigned int cpu;
  346. unsigned long long notes_addr;
  347. int ret;
  348. /* extra phdr for vmcoreinfo elf note */
  349. nr_phdr = nr_cpus + 1;
  350. nr_phdr += ced->max_nr_ranges;
  351. /*
  352. * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
  353. * area on x86_64 (ffffffff80000000 - ffffffffa0000000).
  354. * I think this is required by tools like gdb. So same physical
  355. * memory will be mapped in two elf headers. One will contain kernel
  356. * text virtual addresses and other will have __va(physical) addresses.
  357. */
  358. nr_phdr++;
  359. elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr);
  360. elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN);
  361. buf = vzalloc(elf_sz);
  362. if (!buf)
  363. return -ENOMEM;
  364. bufp = buf;
  365. ehdr = (Elf64_Ehdr *)bufp;
  366. bufp += sizeof(Elf64_Ehdr);
  367. memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
  368. ehdr->e_ident[EI_CLASS] = ELFCLASS64;
  369. ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
  370. ehdr->e_ident[EI_VERSION] = EV_CURRENT;
  371. ehdr->e_ident[EI_OSABI] = ELF_OSABI;
  372. memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
  373. ehdr->e_type = ET_CORE;
  374. ehdr->e_machine = ELF_ARCH;
  375. ehdr->e_version = EV_CURRENT;
  376. ehdr->e_phoff = sizeof(Elf64_Ehdr);
  377. ehdr->e_ehsize = sizeof(Elf64_Ehdr);
  378. ehdr->e_phentsize = sizeof(Elf64_Phdr);
  379. /* Prepare one phdr of type PT_NOTE for each present cpu */
  380. for_each_present_cpu(cpu) {
  381. phdr = (Elf64_Phdr *)bufp;
  382. bufp += sizeof(Elf64_Phdr);
  383. phdr->p_type = PT_NOTE;
  384. notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
  385. phdr->p_offset = phdr->p_paddr = notes_addr;
  386. phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t);
  387. (ehdr->e_phnum)++;
  388. }
  389. /* Prepare one PT_NOTE header for vmcoreinfo */
  390. phdr = (Elf64_Phdr *)bufp;
  391. bufp += sizeof(Elf64_Phdr);
  392. phdr->p_type = PT_NOTE;
  393. phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
  394. phdr->p_filesz = phdr->p_memsz = sizeof(vmcoreinfo_note);
  395. (ehdr->e_phnum)++;
  396. #ifdef CONFIG_X86_64
  397. /* Prepare PT_LOAD type program header for kernel text region */
  398. phdr = (Elf64_Phdr *)bufp;
  399. bufp += sizeof(Elf64_Phdr);
  400. phdr->p_type = PT_LOAD;
  401. phdr->p_flags = PF_R|PF_W|PF_X;
  402. phdr->p_vaddr = (Elf64_Addr)_text;
  403. phdr->p_filesz = phdr->p_memsz = _end - _text;
  404. phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
  405. (ehdr->e_phnum)++;
  406. #endif
  407. /* Prepare PT_LOAD headers for system ram chunks. */
  408. ced->ehdr = ehdr;
  409. ced->bufp = bufp;
  410. ret = walk_system_ram_res(0, -1, ced,
  411. prepare_elf64_ram_headers_callback);
  412. if (ret < 0)
  413. return ret;
  414. *addr = buf;
  415. *sz = elf_sz;
  416. return 0;
  417. }
  418. /* Prepare elf headers. Return addr and size */
  419. static int prepare_elf_headers(struct kimage *image, void **addr,
  420. unsigned long *sz)
  421. {
  422. struct crash_elf_data *ced;
  423. int ret;
  424. ced = kzalloc(sizeof(*ced), GFP_KERNEL);
  425. if (!ced)
  426. return -ENOMEM;
  427. fill_up_crash_elf_data(ced, image);
  428. /* By default prepare 64bit headers */
  429. ret = prepare_elf64_headers(ced, addr, sz);
  430. kfree(ced);
  431. return ret;
  432. }
  433. static int add_e820_entry(struct boot_params *params, struct e820entry *entry)
  434. {
  435. unsigned int nr_e820_entries;
  436. nr_e820_entries = params->e820_entries;
  437. if (nr_e820_entries >= E820MAX)
  438. return 1;
  439. memcpy(&params->e820_map[nr_e820_entries], entry,
  440. sizeof(struct e820entry));
  441. params->e820_entries++;
  442. return 0;
  443. }
  444. static int memmap_entry_callback(u64 start, u64 end, void *arg)
  445. {
  446. struct crash_memmap_data *cmd = arg;
  447. struct boot_params *params = cmd->params;
  448. struct e820entry ei;
  449. ei.addr = start;
  450. ei.size = end - start + 1;
  451. ei.type = cmd->type;
  452. add_e820_entry(params, &ei);
  453. return 0;
  454. }
  455. static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
  456. unsigned long long mstart,
  457. unsigned long long mend)
  458. {
  459. unsigned long start, end;
  460. int ret = 0;
  461. cmem->ranges[0].start = mstart;
  462. cmem->ranges[0].end = mend;
  463. cmem->nr_ranges = 1;
  464. /* Exclude Backup region */
  465. start = image->arch.backup_load_addr;
  466. end = start + image->arch.backup_src_sz - 1;
  467. ret = exclude_mem_range(cmem, start, end);
  468. if (ret)
  469. return ret;
  470. /* Exclude elf header region */
  471. start = image->arch.elf_load_addr;
  472. end = start + image->arch.elf_headers_sz - 1;
  473. return exclude_mem_range(cmem, start, end);
  474. }
  475. /* Prepare memory map for crash dump kernel */
  476. int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
  477. {
  478. int i, ret = 0;
  479. unsigned long flags;
  480. struct e820entry ei;
  481. struct crash_memmap_data cmd;
  482. struct crash_mem *cmem;
  483. cmem = vzalloc(sizeof(struct crash_mem));
  484. if (!cmem)
  485. return -ENOMEM;
  486. memset(&cmd, 0, sizeof(struct crash_memmap_data));
  487. cmd.params = params;
  488. /* Add first 640K segment */
  489. ei.addr = image->arch.backup_src_start;
  490. ei.size = image->arch.backup_src_sz;
  491. ei.type = E820_RAM;
  492. add_e820_entry(params, &ei);
  493. /* Add ACPI tables */
  494. cmd.type = E820_ACPI;
  495. flags = IORESOURCE_MEM | IORESOURCE_BUSY;
  496. walk_iomem_res("ACPI Tables", flags, 0, -1, &cmd,
  497. memmap_entry_callback);
  498. /* Add ACPI Non-volatile Storage */
  499. cmd.type = E820_NVS;
  500. walk_iomem_res("ACPI Non-volatile Storage", flags, 0, -1, &cmd,
  501. memmap_entry_callback);
  502. /* Add crashk_low_res region */
  503. if (crashk_low_res.end) {
  504. ei.addr = crashk_low_res.start;
  505. ei.size = crashk_low_res.end - crashk_low_res.start + 1;
  506. ei.type = E820_RAM;
  507. add_e820_entry(params, &ei);
  508. }
  509. /* Exclude some ranges from crashk_res and add rest to memmap */
  510. ret = memmap_exclude_ranges(image, cmem, crashk_res.start,
  511. crashk_res.end);
  512. if (ret)
  513. goto out;
  514. for (i = 0; i < cmem->nr_ranges; i++) {
  515. ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1;
  516. /* If entry is less than a page, skip it */
  517. if (ei.size < PAGE_SIZE)
  518. continue;
  519. ei.addr = cmem->ranges[i].start;
  520. ei.type = E820_RAM;
  521. add_e820_entry(params, &ei);
  522. }
  523. out:
  524. vfree(cmem);
  525. return ret;
  526. }
  527. static int determine_backup_region(u64 start, u64 end, void *arg)
  528. {
  529. struct kimage *image = arg;
  530. image->arch.backup_src_start = start;
  531. image->arch.backup_src_sz = end - start + 1;
  532. /* Expecting only one range for backup region */
  533. return 1;
  534. }
  535. int crash_load_segments(struct kimage *image)
  536. {
  537. unsigned long src_start, src_sz, elf_sz;
  538. void *elf_addr;
  539. int ret;
  540. /*
  541. * Determine and load a segment for backup area. First 640K RAM
  542. * region is backup source
  543. */
  544. ret = walk_system_ram_res(KEXEC_BACKUP_SRC_START, KEXEC_BACKUP_SRC_END,
  545. image, determine_backup_region);
  546. /* Zero or postive return values are ok */
  547. if (ret < 0)
  548. return ret;
  549. src_start = image->arch.backup_src_start;
  550. src_sz = image->arch.backup_src_sz;
  551. /* Add backup segment. */
  552. if (src_sz) {
  553. /*
  554. * Ideally there is no source for backup segment. This is
  555. * copied in purgatory after crash. Just add a zero filled
  556. * segment for now to make sure checksum logic works fine.
  557. */
  558. ret = kexec_add_buffer(image, (char *)&crash_zero_bytes,
  559. sizeof(crash_zero_bytes), src_sz,
  560. PAGE_SIZE, 0, -1, 0,
  561. &image->arch.backup_load_addr);
  562. if (ret)
  563. return ret;
  564. pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n",
  565. image->arch.backup_load_addr, src_start, src_sz);
  566. }
  567. /* Prepare elf headers and add a segment */
  568. ret = prepare_elf_headers(image, &elf_addr, &elf_sz);
  569. if (ret)
  570. return ret;
  571. image->arch.elf_headers = elf_addr;
  572. image->arch.elf_headers_sz = elf_sz;
  573. ret = kexec_add_buffer(image, (char *)elf_addr, elf_sz, elf_sz,
  574. ELF_CORE_HEADER_ALIGN, 0, -1, 0,
  575. &image->arch.elf_load_addr);
  576. if (ret) {
  577. vfree((void *)image->arch.elf_headers);
  578. return ret;
  579. }
  580. pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
  581. image->arch.elf_load_addr, elf_sz, elf_sz);
  582. return ret;
  583. }
  584. #endif /* CONFIG_KEXEC_FILE */