crash_dump.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687
  1. /*
  2. * S390 kdump implementation
  3. *
  4. * Copyright IBM Corp. 2011
  5. * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
  6. */
  7. #include <linux/crash_dump.h>
  8. #include <asm/lowcore.h>
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/gfp.h>
  12. #include <linux/slab.h>
  13. #include <linux/bootmem.h>
  14. #include <linux/elf.h>
  15. #include <linux/memblock.h>
  16. #include <asm/os_info.h>
  17. #include <asm/elf.h>
  18. #include <asm/ipl.h>
  19. #include <asm/sclp.h>
  20. #define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y)))
  21. #define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
  22. #define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y))))
  23. static struct memblock_region oldmem_region;
  24. static struct memblock_type oldmem_type = {
  25. .cnt = 1,
  26. .max = 1,
  27. .total_size = 0,
  28. .regions = &oldmem_region,
  29. };
  30. #define for_each_dump_mem_range(i, nid, p_start, p_end, p_nid) \
  31. for (i = 0, __next_mem_range(&i, nid, &memblock.physmem, \
  32. &oldmem_type, p_start, \
  33. p_end, p_nid); \
  34. i != (u64)ULLONG_MAX; \
  35. __next_mem_range(&i, nid, &memblock.physmem, \
  36. &oldmem_type, \
  37. p_start, p_end, p_nid))
  38. struct dump_save_areas dump_save_areas;
  39. /*
  40. * Allocate and add a save area for a CPU
  41. */
  42. struct save_area_ext *dump_save_area_create(int cpu)
  43. {
  44. struct save_area_ext **save_areas, *save_area;
  45. save_area = kmalloc(sizeof(*save_area), GFP_KERNEL);
  46. if (!save_area)
  47. return NULL;
  48. if (cpu + 1 > dump_save_areas.count) {
  49. dump_save_areas.count = cpu + 1;
  50. save_areas = krealloc(dump_save_areas.areas,
  51. dump_save_areas.count * sizeof(void *),
  52. GFP_KERNEL | __GFP_ZERO);
  53. if (!save_areas) {
  54. kfree(save_area);
  55. return NULL;
  56. }
  57. dump_save_areas.areas = save_areas;
  58. }
  59. dump_save_areas.areas[cpu] = save_area;
  60. return save_area;
  61. }
  62. /*
  63. * Return physical address for virtual address
  64. */
  65. static inline void *load_real_addr(void *addr)
  66. {
  67. unsigned long real_addr;
  68. asm volatile(
  69. " lra %0,0(%1)\n"
  70. " jz 0f\n"
  71. " la %0,0\n"
  72. "0:"
  73. : "=a" (real_addr) : "a" (addr) : "cc");
  74. return (void *)real_addr;
  75. }
  76. /*
  77. * Copy real to virtual or real memory
  78. */
  79. static int copy_from_realmem(void *dest, void *src, size_t count)
  80. {
  81. unsigned long size;
  82. if (!count)
  83. return 0;
  84. if (!is_vmalloc_or_module_addr(dest))
  85. return memcpy_real(dest, src, count);
  86. do {
  87. size = min(count, PAGE_SIZE - (__pa(dest) & ~PAGE_MASK));
  88. if (memcpy_real(load_real_addr(dest), src, size))
  89. return -EFAULT;
  90. count -= size;
  91. dest += size;
  92. src += size;
  93. } while (count);
  94. return 0;
  95. }
  96. /*
  97. * Pointer to ELF header in new kernel
  98. */
  99. static void *elfcorehdr_newmem;
  100. /*
  101. * Copy one page from zfcpdump "oldmem"
  102. *
  103. * For pages below HSA size memory from the HSA is copied. Otherwise
  104. * real memory copy is used.
  105. */
  106. static ssize_t copy_oldmem_page_zfcpdump(char *buf, size_t csize,
  107. unsigned long src, int userbuf)
  108. {
  109. int rc;
  110. if (src < sclp_get_hsa_size()) {
  111. rc = memcpy_hsa(buf, src, csize, userbuf);
  112. } else {
  113. if (userbuf)
  114. rc = copy_to_user_real((void __force __user *) buf,
  115. (void *) src, csize);
  116. else
  117. rc = memcpy_real(buf, (void *) src, csize);
  118. }
  119. return rc ? rc : csize;
  120. }
  121. /*
  122. * Copy one page from kdump "oldmem"
  123. *
  124. * For the kdump reserved memory this functions performs a swap operation:
  125. * - [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE] is mapped to [0 - OLDMEM_SIZE].
  126. * - [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE]
  127. */
  128. static ssize_t copy_oldmem_page_kdump(char *buf, size_t csize,
  129. unsigned long src, int userbuf)
  130. {
  131. int rc;
  132. if (src < OLDMEM_SIZE)
  133. src += OLDMEM_BASE;
  134. else if (src > OLDMEM_BASE &&
  135. src < OLDMEM_BASE + OLDMEM_SIZE)
  136. src -= OLDMEM_BASE;
  137. if (userbuf)
  138. rc = copy_to_user_real((void __force __user *) buf,
  139. (void *) src, csize);
  140. else
  141. rc = copy_from_realmem(buf, (void *) src, csize);
  142. return (rc == 0) ? rc : csize;
  143. }
  144. /*
  145. * Copy one page from "oldmem"
  146. */
  147. ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
  148. unsigned long offset, int userbuf)
  149. {
  150. unsigned long src;
  151. if (!csize)
  152. return 0;
  153. src = (pfn << PAGE_SHIFT) + offset;
  154. if (OLDMEM_BASE)
  155. return copy_oldmem_page_kdump(buf, csize, src, userbuf);
  156. else
  157. return copy_oldmem_page_zfcpdump(buf, csize, src, userbuf);
  158. }
  159. /*
  160. * Remap "oldmem" for kdump
  161. *
  162. * For the kdump reserved memory this functions performs a swap operation:
  163. * [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE]
  164. */
  165. static int remap_oldmem_pfn_range_kdump(struct vm_area_struct *vma,
  166. unsigned long from, unsigned long pfn,
  167. unsigned long size, pgprot_t prot)
  168. {
  169. unsigned long size_old;
  170. int rc;
  171. if (pfn < OLDMEM_SIZE >> PAGE_SHIFT) {
  172. size_old = min(size, OLDMEM_SIZE - (pfn << PAGE_SHIFT));
  173. rc = remap_pfn_range(vma, from,
  174. pfn + (OLDMEM_BASE >> PAGE_SHIFT),
  175. size_old, prot);
  176. if (rc || size == size_old)
  177. return rc;
  178. size -= size_old;
  179. from += size_old;
  180. pfn += size_old >> PAGE_SHIFT;
  181. }
  182. return remap_pfn_range(vma, from, pfn, size, prot);
  183. }
  184. /*
  185. * Remap "oldmem" for zfcpdump
  186. *
  187. * We only map available memory above HSA size. Memory below HSA size
  188. * is read on demand using the copy_oldmem_page() function.
  189. */
  190. static int remap_oldmem_pfn_range_zfcpdump(struct vm_area_struct *vma,
  191. unsigned long from,
  192. unsigned long pfn,
  193. unsigned long size, pgprot_t prot)
  194. {
  195. unsigned long hsa_end = sclp_get_hsa_size();
  196. unsigned long size_hsa;
  197. if (pfn < hsa_end >> PAGE_SHIFT) {
  198. size_hsa = min(size, hsa_end - (pfn << PAGE_SHIFT));
  199. if (size == size_hsa)
  200. return 0;
  201. size -= size_hsa;
  202. from += size_hsa;
  203. pfn += size_hsa >> PAGE_SHIFT;
  204. }
  205. return remap_pfn_range(vma, from, pfn, size, prot);
  206. }
  207. /*
  208. * Remap "oldmem" for kdump or zfcpdump
  209. */
  210. int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from,
  211. unsigned long pfn, unsigned long size, pgprot_t prot)
  212. {
  213. if (OLDMEM_BASE)
  214. return remap_oldmem_pfn_range_kdump(vma, from, pfn, size, prot);
  215. else
  216. return remap_oldmem_pfn_range_zfcpdump(vma, from, pfn, size,
  217. prot);
  218. }
  219. /*
  220. * Copy memory from old kernel
  221. */
  222. int copy_from_oldmem(void *dest, void *src, size_t count)
  223. {
  224. unsigned long copied = 0;
  225. int rc;
  226. if (OLDMEM_BASE) {
  227. if ((unsigned long) src < OLDMEM_SIZE) {
  228. copied = min(count, OLDMEM_SIZE - (unsigned long) src);
  229. rc = copy_from_realmem(dest, src + OLDMEM_BASE, copied);
  230. if (rc)
  231. return rc;
  232. }
  233. } else {
  234. unsigned long hsa_end = sclp_get_hsa_size();
  235. if ((unsigned long) src < hsa_end) {
  236. copied = min(count, hsa_end - (unsigned long) src);
  237. rc = memcpy_hsa(dest, (unsigned long) src, copied, 0);
  238. if (rc)
  239. return rc;
  240. }
  241. }
  242. return copy_from_realmem(dest + copied, src + copied, count - copied);
  243. }
  244. /*
  245. * Alloc memory and panic in case of ENOMEM
  246. */
  247. static void *kzalloc_panic(int len)
  248. {
  249. void *rc;
  250. rc = kzalloc(len, GFP_KERNEL);
  251. if (!rc)
  252. panic("s390 kdump kzalloc (%d) failed", len);
  253. return rc;
  254. }
  255. /*
  256. * Initialize ELF note
  257. */
  258. static void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len,
  259. const char *name)
  260. {
  261. Elf64_Nhdr *note;
  262. u64 len;
  263. note = (Elf64_Nhdr *)buf;
  264. note->n_namesz = strlen(name) + 1;
  265. note->n_descsz = d_len;
  266. note->n_type = type;
  267. len = sizeof(Elf64_Nhdr);
  268. memcpy(buf + len, name, note->n_namesz);
  269. len = roundup(len + note->n_namesz, 4);
  270. memcpy(buf + len, desc, note->n_descsz);
  271. len = roundup(len + note->n_descsz, 4);
  272. return PTR_ADD(buf, len);
  273. }
  274. /*
  275. * Initialize prstatus note
  276. */
  277. static void *nt_prstatus(void *ptr, struct save_area *sa)
  278. {
  279. struct elf_prstatus nt_prstatus;
  280. static int cpu_nr = 1;
  281. memset(&nt_prstatus, 0, sizeof(nt_prstatus));
  282. memcpy(&nt_prstatus.pr_reg.gprs, sa->gp_regs, sizeof(sa->gp_regs));
  283. memcpy(&nt_prstatus.pr_reg.psw, sa->psw, sizeof(sa->psw));
  284. memcpy(&nt_prstatus.pr_reg.acrs, sa->acc_regs, sizeof(sa->acc_regs));
  285. nt_prstatus.pr_pid = cpu_nr;
  286. cpu_nr++;
  287. return nt_init(ptr, NT_PRSTATUS, &nt_prstatus, sizeof(nt_prstatus),
  288. "CORE");
  289. }
  290. /*
  291. * Initialize fpregset (floating point) note
  292. */
  293. static void *nt_fpregset(void *ptr, struct save_area *sa)
  294. {
  295. elf_fpregset_t nt_fpregset;
  296. memset(&nt_fpregset, 0, sizeof(nt_fpregset));
  297. memcpy(&nt_fpregset.fpc, &sa->fp_ctrl_reg, sizeof(sa->fp_ctrl_reg));
  298. memcpy(&nt_fpregset.fprs, &sa->fp_regs, sizeof(sa->fp_regs));
  299. return nt_init(ptr, NT_PRFPREG, &nt_fpregset, sizeof(nt_fpregset),
  300. "CORE");
  301. }
  302. /*
  303. * Initialize timer note
  304. */
  305. static void *nt_s390_timer(void *ptr, struct save_area *sa)
  306. {
  307. return nt_init(ptr, NT_S390_TIMER, &sa->timer, sizeof(sa->timer),
  308. KEXEC_CORE_NOTE_NAME);
  309. }
  310. /*
  311. * Initialize TOD clock comparator note
  312. */
  313. static void *nt_s390_tod_cmp(void *ptr, struct save_area *sa)
  314. {
  315. return nt_init(ptr, NT_S390_TODCMP, &sa->clk_cmp,
  316. sizeof(sa->clk_cmp), KEXEC_CORE_NOTE_NAME);
  317. }
  318. /*
  319. * Initialize TOD programmable register note
  320. */
  321. static void *nt_s390_tod_preg(void *ptr, struct save_area *sa)
  322. {
  323. return nt_init(ptr, NT_S390_TODPREG, &sa->tod_reg,
  324. sizeof(sa->tod_reg), KEXEC_CORE_NOTE_NAME);
  325. }
  326. /*
  327. * Initialize control register note
  328. */
  329. static void *nt_s390_ctrs(void *ptr, struct save_area *sa)
  330. {
  331. return nt_init(ptr, NT_S390_CTRS, &sa->ctrl_regs,
  332. sizeof(sa->ctrl_regs), KEXEC_CORE_NOTE_NAME);
  333. }
  334. /*
  335. * Initialize prefix register note
  336. */
  337. static void *nt_s390_prefix(void *ptr, struct save_area *sa)
  338. {
  339. return nt_init(ptr, NT_S390_PREFIX, &sa->pref_reg,
  340. sizeof(sa->pref_reg), KEXEC_CORE_NOTE_NAME);
  341. }
  342. /*
  343. * Initialize vxrs high note (full 128 bit VX registers 16-31)
  344. */
  345. static void *nt_s390_vx_high(void *ptr, __vector128 *vx_regs)
  346. {
  347. return nt_init(ptr, NT_S390_VXRS_HIGH, &vx_regs[16],
  348. 16 * sizeof(__vector128), KEXEC_CORE_NOTE_NAME);
  349. }
  350. /*
  351. * Initialize vxrs low note (lower halves of VX registers 0-15)
  352. */
  353. static void *nt_s390_vx_low(void *ptr, __vector128 *vx_regs)
  354. {
  355. Elf64_Nhdr *note;
  356. u64 len;
  357. int i;
  358. note = (Elf64_Nhdr *)ptr;
  359. note->n_namesz = strlen(KEXEC_CORE_NOTE_NAME) + 1;
  360. note->n_descsz = 16 * 8;
  361. note->n_type = NT_S390_VXRS_LOW;
  362. len = sizeof(Elf64_Nhdr);
  363. memcpy(ptr + len, KEXEC_CORE_NOTE_NAME, note->n_namesz);
  364. len = roundup(len + note->n_namesz, 4);
  365. ptr += len;
  366. /* Copy lower halves of SIMD registers 0-15 */
  367. for (i = 0; i < 16; i++) {
  368. memcpy(ptr, &vx_regs[i], 8);
  369. ptr += 8;
  370. }
  371. return ptr;
  372. }
  373. /*
  374. * Fill ELF notes for one CPU with save area registers
  375. */
  376. void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vx_regs)
  377. {
  378. ptr = nt_prstatus(ptr, sa);
  379. ptr = nt_fpregset(ptr, sa);
  380. ptr = nt_s390_timer(ptr, sa);
  381. ptr = nt_s390_tod_cmp(ptr, sa);
  382. ptr = nt_s390_tod_preg(ptr, sa);
  383. ptr = nt_s390_ctrs(ptr, sa);
  384. ptr = nt_s390_prefix(ptr, sa);
  385. if (MACHINE_HAS_VX && vx_regs) {
  386. ptr = nt_s390_vx_low(ptr, vx_regs);
  387. ptr = nt_s390_vx_high(ptr, vx_regs);
  388. }
  389. return ptr;
  390. }
  391. /*
  392. * Initialize prpsinfo note (new kernel)
  393. */
  394. static void *nt_prpsinfo(void *ptr)
  395. {
  396. struct elf_prpsinfo prpsinfo;
  397. memset(&prpsinfo, 0, sizeof(prpsinfo));
  398. prpsinfo.pr_sname = 'R';
  399. strcpy(prpsinfo.pr_fname, "vmlinux");
  400. return nt_init(ptr, NT_PRPSINFO, &prpsinfo, sizeof(prpsinfo),
  401. KEXEC_CORE_NOTE_NAME);
  402. }
  403. /*
  404. * Get vmcoreinfo using lowcore->vmcore_info (new kernel)
  405. */
  406. static void *get_vmcoreinfo_old(unsigned long *size)
  407. {
  408. char nt_name[11], *vmcoreinfo;
  409. Elf64_Nhdr note;
  410. void *addr;
  411. if (copy_from_oldmem(&addr, &S390_lowcore.vmcore_info, sizeof(addr)))
  412. return NULL;
  413. memset(nt_name, 0, sizeof(nt_name));
  414. if (copy_from_oldmem(&note, addr, sizeof(note)))
  415. return NULL;
  416. if (copy_from_oldmem(nt_name, addr + sizeof(note), sizeof(nt_name) - 1))
  417. return NULL;
  418. if (strcmp(nt_name, "VMCOREINFO") != 0)
  419. return NULL;
  420. vmcoreinfo = kzalloc_panic(note.n_descsz);
  421. if (copy_from_oldmem(vmcoreinfo, addr + 24, note.n_descsz))
  422. return NULL;
  423. *size = note.n_descsz;
  424. return vmcoreinfo;
  425. }
  426. /*
  427. * Initialize vmcoreinfo note (new kernel)
  428. */
  429. static void *nt_vmcoreinfo(void *ptr)
  430. {
  431. unsigned long size;
  432. void *vmcoreinfo;
  433. vmcoreinfo = os_info_old_entry(OS_INFO_VMCOREINFO, &size);
  434. if (!vmcoreinfo)
  435. vmcoreinfo = get_vmcoreinfo_old(&size);
  436. if (!vmcoreinfo)
  437. return ptr;
  438. return nt_init(ptr, 0, vmcoreinfo, size, "VMCOREINFO");
  439. }
  440. /*
  441. * Initialize ELF header (new kernel)
  442. */
  443. static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
  444. {
  445. memset(ehdr, 0, sizeof(*ehdr));
  446. memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
  447. ehdr->e_ident[EI_CLASS] = ELFCLASS64;
  448. ehdr->e_ident[EI_DATA] = ELFDATA2MSB;
  449. ehdr->e_ident[EI_VERSION] = EV_CURRENT;
  450. memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
  451. ehdr->e_type = ET_CORE;
  452. ehdr->e_machine = EM_S390;
  453. ehdr->e_version = EV_CURRENT;
  454. ehdr->e_phoff = sizeof(Elf64_Ehdr);
  455. ehdr->e_ehsize = sizeof(Elf64_Ehdr);
  456. ehdr->e_phentsize = sizeof(Elf64_Phdr);
  457. ehdr->e_phnum = mem_chunk_cnt + 1;
  458. return ehdr + 1;
  459. }
  460. /*
  461. * Return CPU count for ELF header (new kernel)
  462. */
  463. static int get_cpu_cnt(void)
  464. {
  465. int i, cpus = 0;
  466. for (i = 0; i < dump_save_areas.count; i++) {
  467. if (dump_save_areas.areas[i]->sa.pref_reg == 0)
  468. continue;
  469. cpus++;
  470. }
  471. return cpus;
  472. }
  473. /*
  474. * Return memory chunk count for ELF header (new kernel)
  475. */
  476. static int get_mem_chunk_cnt(void)
  477. {
  478. int cnt = 0;
  479. u64 idx;
  480. for_each_dump_mem_range(idx, NUMA_NO_NODE, NULL, NULL, NULL)
  481. cnt++;
  482. return cnt;
  483. }
  484. /*
  485. * Initialize ELF loads (new kernel)
  486. */
  487. static void loads_init(Elf64_Phdr *phdr, u64 loads_offset)
  488. {
  489. phys_addr_t start, end;
  490. u64 idx;
  491. for_each_dump_mem_range(idx, NUMA_NO_NODE, &start, &end, NULL) {
  492. phdr->p_filesz = end - start;
  493. phdr->p_type = PT_LOAD;
  494. phdr->p_offset = start;
  495. phdr->p_vaddr = start;
  496. phdr->p_paddr = start;
  497. phdr->p_memsz = end - start;
  498. phdr->p_flags = PF_R | PF_W | PF_X;
  499. phdr->p_align = PAGE_SIZE;
  500. phdr++;
  501. }
  502. }
  503. /*
  504. * Initialize notes (new kernel)
  505. */
  506. static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
  507. {
  508. struct save_area_ext *sa_ext;
  509. void *ptr_start = ptr;
  510. int i;
  511. ptr = nt_prpsinfo(ptr);
  512. for (i = 0; i < dump_save_areas.count; i++) {
  513. sa_ext = dump_save_areas.areas[i];
  514. if (sa_ext->sa.pref_reg == 0)
  515. continue;
  516. ptr = fill_cpu_elf_notes(ptr, &sa_ext->sa, sa_ext->vx_regs);
  517. }
  518. ptr = nt_vmcoreinfo(ptr);
  519. memset(phdr, 0, sizeof(*phdr));
  520. phdr->p_type = PT_NOTE;
  521. phdr->p_offset = notes_offset;
  522. phdr->p_filesz = (unsigned long) PTR_SUB(ptr, ptr_start);
  523. phdr->p_memsz = phdr->p_filesz;
  524. return ptr;
  525. }
  526. /*
  527. * Create ELF core header (new kernel)
  528. */
  529. int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
  530. {
  531. Elf64_Phdr *phdr_notes, *phdr_loads;
  532. int mem_chunk_cnt;
  533. void *ptr, *hdr;
  534. u32 alloc_size;
  535. u64 hdr_off;
  536. /* If we are not in kdump or zfcpdump mode return */
  537. if (!OLDMEM_BASE && ipl_info.type != IPL_TYPE_FCP_DUMP)
  538. return 0;
  539. /* If elfcorehdr= has been passed via cmdline, we use that one */
  540. if (elfcorehdr_addr != ELFCORE_ADDR_MAX)
  541. return 0;
  542. /* If we cannot get HSA size for zfcpdump return error */
  543. if (ipl_info.type == IPL_TYPE_FCP_DUMP && !sclp_get_hsa_size())
  544. return -ENODEV;
  545. /* For kdump, exclude previous crashkernel memory */
  546. if (OLDMEM_BASE) {
  547. oldmem_region.base = OLDMEM_BASE;
  548. oldmem_region.size = OLDMEM_SIZE;
  549. oldmem_type.total_size = OLDMEM_SIZE;
  550. }
  551. mem_chunk_cnt = get_mem_chunk_cnt();
  552. alloc_size = 0x1000 + get_cpu_cnt() * 0x4a0 +
  553. mem_chunk_cnt * sizeof(Elf64_Phdr);
  554. hdr = kzalloc_panic(alloc_size);
  555. /* Init elf header */
  556. ptr = ehdr_init(hdr, mem_chunk_cnt);
  557. /* Init program headers */
  558. phdr_notes = ptr;
  559. ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr));
  560. phdr_loads = ptr;
  561. ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr) * mem_chunk_cnt);
  562. /* Init notes */
  563. hdr_off = PTR_DIFF(ptr, hdr);
  564. ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off);
  565. /* Init loads */
  566. hdr_off = PTR_DIFF(ptr, hdr);
  567. loads_init(phdr_loads, hdr_off);
  568. *addr = (unsigned long long) hdr;
  569. elfcorehdr_newmem = hdr;
  570. *size = (unsigned long long) hdr_off;
  571. BUG_ON(elfcorehdr_size > alloc_size);
  572. return 0;
  573. }
  574. /*
  575. * Free ELF core header (new kernel)
  576. */
  577. void elfcorehdr_free(unsigned long long addr)
  578. {
  579. if (!elfcorehdr_newmem)
  580. return;
  581. kfree((void *)(unsigned long)addr);
  582. }
  583. /*
  584. * Read from ELF header
  585. */
  586. ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
  587. {
  588. void *src = (void *)(unsigned long)*ppos;
  589. src = elfcorehdr_newmem ? src : src - OLDMEM_BASE;
  590. memcpy(buf, src, count);
  591. *ppos += count;
  592. return count;
  593. }
  594. /*
  595. * Read from ELF notes data
  596. */
  597. ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
  598. {
  599. void *src = (void *)(unsigned long)*ppos;
  600. int rc;
  601. if (elfcorehdr_newmem) {
  602. memcpy(buf, src, count);
  603. } else {
  604. rc = copy_from_oldmem(buf, src, count);
  605. if (rc)
  606. return rc;
  607. }
  608. *ppos += count;
  609. return count;
  610. }