crash_dump.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647
  1. /*
  2. * S390 kdump implementation
  3. *
  4. * Copyright IBM Corp. 2011
  5. * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
  6. */
  7. #include <linux/crash_dump.h>
  8. #include <asm/lowcore.h>
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/gfp.h>
  12. #include <linux/slab.h>
  13. #include <linux/bootmem.h>
  14. #include <linux/elf.h>
  15. #include <linux/memblock.h>
  16. #include <asm/os_info.h>
  17. #include <asm/elf.h>
  18. #include <asm/ipl.h>
  19. #include <asm/sclp.h>
  20. #define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y)))
  21. #define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
  22. #define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y))))
  23. static struct memblock_region oldmem_region;
  24. static struct memblock_type oldmem_type = {
  25. .cnt = 1,
  26. .max = 1,
  27. .total_size = 0,
  28. .regions = &oldmem_region,
  29. };
  30. #define for_each_dump_mem_range(i, nid, p_start, p_end, p_nid) \
  31. for (i = 0, __next_mem_range(&i, nid, &memblock.physmem, \
  32. &oldmem_type, p_start, \
  33. p_end, p_nid); \
  34. i != (u64)ULLONG_MAX; \
  35. __next_mem_range(&i, nid, &memblock.physmem, \
  36. &oldmem_type, \
  37. p_start, p_end, p_nid))
  38. struct dump_save_areas dump_save_areas;
  39. /*
  40. * Allocate and add a save area for a CPU
  41. */
  42. struct save_area *dump_save_area_create(int cpu)
  43. {
  44. struct save_area **save_areas, *save_area;
  45. save_area = kmalloc(sizeof(*save_area), GFP_KERNEL);
  46. if (!save_area)
  47. return NULL;
  48. if (cpu + 1 > dump_save_areas.count) {
  49. dump_save_areas.count = cpu + 1;
  50. save_areas = krealloc(dump_save_areas.areas,
  51. dump_save_areas.count * sizeof(void *),
  52. GFP_KERNEL | __GFP_ZERO);
  53. if (!save_areas) {
  54. kfree(save_area);
  55. return NULL;
  56. }
  57. dump_save_areas.areas = save_areas;
  58. }
  59. dump_save_areas.areas[cpu] = save_area;
  60. return save_area;
  61. }
  62. /*
  63. * Return physical address for virtual address
  64. */
  65. static inline void *load_real_addr(void *addr)
  66. {
  67. unsigned long real_addr;
  68. asm volatile(
  69. " lra %0,0(%1)\n"
  70. " jz 0f\n"
  71. " la %0,0\n"
  72. "0:"
  73. : "=a" (real_addr) : "a" (addr) : "cc");
  74. return (void *)real_addr;
  75. }
  76. /*
  77. * Copy real to virtual or real memory
  78. */
  79. static int copy_from_realmem(void *dest, void *src, size_t count)
  80. {
  81. unsigned long size;
  82. if (!count)
  83. return 0;
  84. if (!is_vmalloc_or_module_addr(dest))
  85. return memcpy_real(dest, src, count);
  86. do {
  87. size = min(count, PAGE_SIZE - (__pa(dest) & ~PAGE_MASK));
  88. if (memcpy_real(load_real_addr(dest), src, size))
  89. return -EFAULT;
  90. count -= size;
  91. dest += size;
  92. src += size;
  93. } while (count);
  94. return 0;
  95. }
  96. /*
  97. * Pointer to ELF header in new kernel
  98. */
  99. static void *elfcorehdr_newmem;
  100. /*
  101. * Copy one page from zfcpdump "oldmem"
  102. *
  103. * For pages below HSA size memory from the HSA is copied. Otherwise
  104. * real memory copy is used.
  105. */
  106. static ssize_t copy_oldmem_page_zfcpdump(char *buf, size_t csize,
  107. unsigned long src, int userbuf)
  108. {
  109. int rc;
  110. if (src < sclp_get_hsa_size()) {
  111. rc = memcpy_hsa(buf, src, csize, userbuf);
  112. } else {
  113. if (userbuf)
  114. rc = copy_to_user_real((void __force __user *) buf,
  115. (void *) src, csize);
  116. else
  117. rc = memcpy_real(buf, (void *) src, csize);
  118. }
  119. return rc ? rc : csize;
  120. }
  121. /*
  122. * Copy one page from kdump "oldmem"
  123. *
  124. * For the kdump reserved memory this functions performs a swap operation:
  125. * - [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE] is mapped to [0 - OLDMEM_SIZE].
  126. * - [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE]
  127. */
  128. static ssize_t copy_oldmem_page_kdump(char *buf, size_t csize,
  129. unsigned long src, int userbuf)
  130. {
  131. int rc;
  132. if (src < OLDMEM_SIZE)
  133. src += OLDMEM_BASE;
  134. else if (src > OLDMEM_BASE &&
  135. src < OLDMEM_BASE + OLDMEM_SIZE)
  136. src -= OLDMEM_BASE;
  137. if (userbuf)
  138. rc = copy_to_user_real((void __force __user *) buf,
  139. (void *) src, csize);
  140. else
  141. rc = copy_from_realmem(buf, (void *) src, csize);
  142. return (rc == 0) ? rc : csize;
  143. }
  144. /*
  145. * Copy one page from "oldmem"
  146. */
  147. ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
  148. unsigned long offset, int userbuf)
  149. {
  150. unsigned long src;
  151. if (!csize)
  152. return 0;
  153. src = (pfn << PAGE_SHIFT) + offset;
  154. if (OLDMEM_BASE)
  155. return copy_oldmem_page_kdump(buf, csize, src, userbuf);
  156. else
  157. return copy_oldmem_page_zfcpdump(buf, csize, src, userbuf);
  158. }
  159. /*
  160. * Remap "oldmem" for kdump
  161. *
  162. * For the kdump reserved memory this functions performs a swap operation:
  163. * [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE]
  164. */
  165. static int remap_oldmem_pfn_range_kdump(struct vm_area_struct *vma,
  166. unsigned long from, unsigned long pfn,
  167. unsigned long size, pgprot_t prot)
  168. {
  169. unsigned long size_old;
  170. int rc;
  171. if (pfn < OLDMEM_SIZE >> PAGE_SHIFT) {
  172. size_old = min(size, OLDMEM_SIZE - (pfn << PAGE_SHIFT));
  173. rc = remap_pfn_range(vma, from,
  174. pfn + (OLDMEM_BASE >> PAGE_SHIFT),
  175. size_old, prot);
  176. if (rc || size == size_old)
  177. return rc;
  178. size -= size_old;
  179. from += size_old;
  180. pfn += size_old >> PAGE_SHIFT;
  181. }
  182. return remap_pfn_range(vma, from, pfn, size, prot);
  183. }
  184. /*
  185. * Remap "oldmem" for zfcpdump
  186. *
  187. * We only map available memory above HSA size. Memory below HSA size
  188. * is read on demand using the copy_oldmem_page() function.
  189. */
  190. static int remap_oldmem_pfn_range_zfcpdump(struct vm_area_struct *vma,
  191. unsigned long from,
  192. unsigned long pfn,
  193. unsigned long size, pgprot_t prot)
  194. {
  195. unsigned long hsa_end = sclp_get_hsa_size();
  196. unsigned long size_hsa;
  197. if (pfn < hsa_end >> PAGE_SHIFT) {
  198. size_hsa = min(size, hsa_end - (pfn << PAGE_SHIFT));
  199. if (size == size_hsa)
  200. return 0;
  201. size -= size_hsa;
  202. from += size_hsa;
  203. pfn += size_hsa >> PAGE_SHIFT;
  204. }
  205. return remap_pfn_range(vma, from, pfn, size, prot);
  206. }
  207. /*
  208. * Remap "oldmem" for kdump or zfcpdump
  209. */
  210. int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from,
  211. unsigned long pfn, unsigned long size, pgprot_t prot)
  212. {
  213. if (OLDMEM_BASE)
  214. return remap_oldmem_pfn_range_kdump(vma, from, pfn, size, prot);
  215. else
  216. return remap_oldmem_pfn_range_zfcpdump(vma, from, pfn, size,
  217. prot);
  218. }
  219. /*
  220. * Copy memory from old kernel
  221. */
  222. int copy_from_oldmem(void *dest, void *src, size_t count)
  223. {
  224. unsigned long copied = 0;
  225. int rc;
  226. if (OLDMEM_BASE) {
  227. if ((unsigned long) src < OLDMEM_SIZE) {
  228. copied = min(count, OLDMEM_SIZE - (unsigned long) src);
  229. rc = copy_from_realmem(dest, src + OLDMEM_BASE, copied);
  230. if (rc)
  231. return rc;
  232. }
  233. } else {
  234. unsigned long hsa_end = sclp_get_hsa_size();
  235. if ((unsigned long) src < hsa_end) {
  236. copied = min(count, hsa_end - (unsigned long) src);
  237. rc = memcpy_hsa(dest, (unsigned long) src, copied, 0);
  238. if (rc)
  239. return rc;
  240. }
  241. }
  242. return copy_from_realmem(dest + copied, src + copied, count - copied);
  243. }
  244. /*
  245. * Alloc memory and panic in case of ENOMEM
  246. */
  247. static void *kzalloc_panic(int len)
  248. {
  249. void *rc;
  250. rc = kzalloc(len, GFP_KERNEL);
  251. if (!rc)
  252. panic("s390 kdump kzalloc (%d) failed", len);
  253. return rc;
  254. }
  255. /*
  256. * Initialize ELF note
  257. */
  258. static void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len,
  259. const char *name)
  260. {
  261. Elf64_Nhdr *note;
  262. u64 len;
  263. note = (Elf64_Nhdr *)buf;
  264. note->n_namesz = strlen(name) + 1;
  265. note->n_descsz = d_len;
  266. note->n_type = type;
  267. len = sizeof(Elf64_Nhdr);
  268. memcpy(buf + len, name, note->n_namesz);
  269. len = roundup(len + note->n_namesz, 4);
  270. memcpy(buf + len, desc, note->n_descsz);
  271. len = roundup(len + note->n_descsz, 4);
  272. return PTR_ADD(buf, len);
  273. }
  274. /*
  275. * Initialize prstatus note
  276. */
  277. static void *nt_prstatus(void *ptr, struct save_area *sa)
  278. {
  279. struct elf_prstatus nt_prstatus;
  280. static int cpu_nr = 1;
  281. memset(&nt_prstatus, 0, sizeof(nt_prstatus));
  282. memcpy(&nt_prstatus.pr_reg.gprs, sa->gp_regs, sizeof(sa->gp_regs));
  283. memcpy(&nt_prstatus.pr_reg.psw, sa->psw, sizeof(sa->psw));
  284. memcpy(&nt_prstatus.pr_reg.acrs, sa->acc_regs, sizeof(sa->acc_regs));
  285. nt_prstatus.pr_pid = cpu_nr;
  286. cpu_nr++;
  287. return nt_init(ptr, NT_PRSTATUS, &nt_prstatus, sizeof(nt_prstatus),
  288. "CORE");
  289. }
  290. /*
  291. * Initialize fpregset (floating point) note
  292. */
  293. static void *nt_fpregset(void *ptr, struct save_area *sa)
  294. {
  295. elf_fpregset_t nt_fpregset;
  296. memset(&nt_fpregset, 0, sizeof(nt_fpregset));
  297. memcpy(&nt_fpregset.fpc, &sa->fp_ctrl_reg, sizeof(sa->fp_ctrl_reg));
  298. memcpy(&nt_fpregset.fprs, &sa->fp_regs, sizeof(sa->fp_regs));
  299. return nt_init(ptr, NT_PRFPREG, &nt_fpregset, sizeof(nt_fpregset),
  300. "CORE");
  301. }
  302. /*
  303. * Initialize timer note
  304. */
  305. static void *nt_s390_timer(void *ptr, struct save_area *sa)
  306. {
  307. return nt_init(ptr, NT_S390_TIMER, &sa->timer, sizeof(sa->timer),
  308. KEXEC_CORE_NOTE_NAME);
  309. }
  310. /*
  311. * Initialize TOD clock comparator note
  312. */
  313. static void *nt_s390_tod_cmp(void *ptr, struct save_area *sa)
  314. {
  315. return nt_init(ptr, NT_S390_TODCMP, &sa->clk_cmp,
  316. sizeof(sa->clk_cmp), KEXEC_CORE_NOTE_NAME);
  317. }
  318. /*
  319. * Initialize TOD programmable register note
  320. */
  321. static void *nt_s390_tod_preg(void *ptr, struct save_area *sa)
  322. {
  323. return nt_init(ptr, NT_S390_TODPREG, &sa->tod_reg,
  324. sizeof(sa->tod_reg), KEXEC_CORE_NOTE_NAME);
  325. }
  326. /*
  327. * Initialize control register note
  328. */
  329. static void *nt_s390_ctrs(void *ptr, struct save_area *sa)
  330. {
  331. return nt_init(ptr, NT_S390_CTRS, &sa->ctrl_regs,
  332. sizeof(sa->ctrl_regs), KEXEC_CORE_NOTE_NAME);
  333. }
  334. /*
  335. * Initialize prefix register note
  336. */
  337. static void *nt_s390_prefix(void *ptr, struct save_area *sa)
  338. {
  339. return nt_init(ptr, NT_S390_PREFIX, &sa->pref_reg,
  340. sizeof(sa->pref_reg), KEXEC_CORE_NOTE_NAME);
  341. }
  342. /*
  343. * Fill ELF notes for one CPU with save area registers
  344. */
  345. void *fill_cpu_elf_notes(void *ptr, struct save_area *sa)
  346. {
  347. ptr = nt_prstatus(ptr, sa);
  348. ptr = nt_fpregset(ptr, sa);
  349. ptr = nt_s390_timer(ptr, sa);
  350. ptr = nt_s390_tod_cmp(ptr, sa);
  351. ptr = nt_s390_tod_preg(ptr, sa);
  352. ptr = nt_s390_ctrs(ptr, sa);
  353. ptr = nt_s390_prefix(ptr, sa);
  354. return ptr;
  355. }
  356. /*
  357. * Initialize prpsinfo note (new kernel)
  358. */
  359. static void *nt_prpsinfo(void *ptr)
  360. {
  361. struct elf_prpsinfo prpsinfo;
  362. memset(&prpsinfo, 0, sizeof(prpsinfo));
  363. prpsinfo.pr_sname = 'R';
  364. strcpy(prpsinfo.pr_fname, "vmlinux");
  365. return nt_init(ptr, NT_PRPSINFO, &prpsinfo, sizeof(prpsinfo),
  366. KEXEC_CORE_NOTE_NAME);
  367. }
  368. /*
  369. * Get vmcoreinfo using lowcore->vmcore_info (new kernel)
  370. */
  371. static void *get_vmcoreinfo_old(unsigned long *size)
  372. {
  373. char nt_name[11], *vmcoreinfo;
  374. Elf64_Nhdr note;
  375. void *addr;
  376. if (copy_from_oldmem(&addr, &S390_lowcore.vmcore_info, sizeof(addr)))
  377. return NULL;
  378. memset(nt_name, 0, sizeof(nt_name));
  379. if (copy_from_oldmem(&note, addr, sizeof(note)))
  380. return NULL;
  381. if (copy_from_oldmem(nt_name, addr + sizeof(note), sizeof(nt_name) - 1))
  382. return NULL;
  383. if (strcmp(nt_name, "VMCOREINFO") != 0)
  384. return NULL;
  385. vmcoreinfo = kzalloc_panic(note.n_descsz);
  386. if (copy_from_oldmem(vmcoreinfo, addr + 24, note.n_descsz))
  387. return NULL;
  388. *size = note.n_descsz;
  389. return vmcoreinfo;
  390. }
  391. /*
  392. * Initialize vmcoreinfo note (new kernel)
  393. */
  394. static void *nt_vmcoreinfo(void *ptr)
  395. {
  396. unsigned long size;
  397. void *vmcoreinfo;
  398. vmcoreinfo = os_info_old_entry(OS_INFO_VMCOREINFO, &size);
  399. if (!vmcoreinfo)
  400. vmcoreinfo = get_vmcoreinfo_old(&size);
  401. if (!vmcoreinfo)
  402. return ptr;
  403. return nt_init(ptr, 0, vmcoreinfo, size, "VMCOREINFO");
  404. }
  405. /*
  406. * Initialize ELF header (new kernel)
  407. */
  408. static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
  409. {
  410. memset(ehdr, 0, sizeof(*ehdr));
  411. memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
  412. ehdr->e_ident[EI_CLASS] = ELFCLASS64;
  413. ehdr->e_ident[EI_DATA] = ELFDATA2MSB;
  414. ehdr->e_ident[EI_VERSION] = EV_CURRENT;
  415. memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
  416. ehdr->e_type = ET_CORE;
  417. ehdr->e_machine = EM_S390;
  418. ehdr->e_version = EV_CURRENT;
  419. ehdr->e_phoff = sizeof(Elf64_Ehdr);
  420. ehdr->e_ehsize = sizeof(Elf64_Ehdr);
  421. ehdr->e_phentsize = sizeof(Elf64_Phdr);
  422. ehdr->e_phnum = mem_chunk_cnt + 1;
  423. return ehdr + 1;
  424. }
  425. /*
  426. * Return CPU count for ELF header (new kernel)
  427. */
  428. static int get_cpu_cnt(void)
  429. {
  430. int i, cpus = 0;
  431. for (i = 0; i < dump_save_areas.count; i++) {
  432. if (dump_save_areas.areas[i]->pref_reg == 0)
  433. continue;
  434. cpus++;
  435. }
  436. return cpus;
  437. }
  438. /*
  439. * Return memory chunk count for ELF header (new kernel)
  440. */
  441. static int get_mem_chunk_cnt(void)
  442. {
  443. int cnt = 0;
  444. u64 idx;
  445. for_each_dump_mem_range(idx, NUMA_NO_NODE, NULL, NULL, NULL)
  446. cnt++;
  447. return cnt;
  448. }
  449. /*
  450. * Initialize ELF loads (new kernel)
  451. */
  452. static void loads_init(Elf64_Phdr *phdr, u64 loads_offset)
  453. {
  454. phys_addr_t start, end;
  455. u64 idx;
  456. for_each_dump_mem_range(idx, NUMA_NO_NODE, &start, &end, NULL) {
  457. phdr->p_filesz = end - start;
  458. phdr->p_type = PT_LOAD;
  459. phdr->p_offset = start;
  460. phdr->p_vaddr = start;
  461. phdr->p_paddr = start;
  462. phdr->p_memsz = end - start;
  463. phdr->p_flags = PF_R | PF_W | PF_X;
  464. phdr->p_align = PAGE_SIZE;
  465. phdr++;
  466. }
  467. }
  468. /*
  469. * Initialize notes (new kernel)
  470. */
  471. static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
  472. {
  473. struct save_area *sa;
  474. void *ptr_start = ptr;
  475. int i;
  476. ptr = nt_prpsinfo(ptr);
  477. for (i = 0; i < dump_save_areas.count; i++) {
  478. sa = dump_save_areas.areas[i];
  479. if (sa->pref_reg == 0)
  480. continue;
  481. ptr = fill_cpu_elf_notes(ptr, sa);
  482. }
  483. ptr = nt_vmcoreinfo(ptr);
  484. memset(phdr, 0, sizeof(*phdr));
  485. phdr->p_type = PT_NOTE;
  486. phdr->p_offset = notes_offset;
  487. phdr->p_filesz = (unsigned long) PTR_SUB(ptr, ptr_start);
  488. phdr->p_memsz = phdr->p_filesz;
  489. return ptr;
  490. }
  491. /*
  492. * Create ELF core header (new kernel)
  493. */
  494. int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
  495. {
  496. Elf64_Phdr *phdr_notes, *phdr_loads;
  497. int mem_chunk_cnt;
  498. void *ptr, *hdr;
  499. u32 alloc_size;
  500. u64 hdr_off;
  501. /* If we are not in kdump or zfcpdump mode return */
  502. if (!OLDMEM_BASE && ipl_info.type != IPL_TYPE_FCP_DUMP)
  503. return 0;
  504. /* If elfcorehdr= has been passed via cmdline, we use that one */
  505. if (elfcorehdr_addr != ELFCORE_ADDR_MAX)
  506. return 0;
  507. /* If we cannot get HSA size for zfcpdump return error */
  508. if (ipl_info.type == IPL_TYPE_FCP_DUMP && !sclp_get_hsa_size())
  509. return -ENODEV;
  510. /* For kdump, exclude previous crashkernel memory */
  511. if (OLDMEM_BASE) {
  512. oldmem_region.base = OLDMEM_BASE;
  513. oldmem_region.size = OLDMEM_SIZE;
  514. oldmem_type.total_size = OLDMEM_SIZE;
  515. }
  516. mem_chunk_cnt = get_mem_chunk_cnt();
  517. alloc_size = 0x1000 + get_cpu_cnt() * 0x300 +
  518. mem_chunk_cnt * sizeof(Elf64_Phdr);
  519. hdr = kzalloc_panic(alloc_size);
  520. /* Init elf header */
  521. ptr = ehdr_init(hdr, mem_chunk_cnt);
  522. /* Init program headers */
  523. phdr_notes = ptr;
  524. ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr));
  525. phdr_loads = ptr;
  526. ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr) * mem_chunk_cnt);
  527. /* Init notes */
  528. hdr_off = PTR_DIFF(ptr, hdr);
  529. ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off);
  530. /* Init loads */
  531. hdr_off = PTR_DIFF(ptr, hdr);
  532. loads_init(phdr_loads, hdr_off);
  533. *addr = (unsigned long long) hdr;
  534. elfcorehdr_newmem = hdr;
  535. *size = (unsigned long long) hdr_off;
  536. BUG_ON(elfcorehdr_size > alloc_size);
  537. return 0;
  538. }
  539. /*
  540. * Free ELF core header (new kernel)
  541. */
  542. void elfcorehdr_free(unsigned long long addr)
  543. {
  544. if (!elfcorehdr_newmem)
  545. return;
  546. kfree((void *)(unsigned long)addr);
  547. }
  548. /*
  549. * Read from ELF header
  550. */
  551. ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
  552. {
  553. void *src = (void *)(unsigned long)*ppos;
  554. src = elfcorehdr_newmem ? src : src - OLDMEM_BASE;
  555. memcpy(buf, src, count);
  556. *ppos += count;
  557. return count;
  558. }
  559. /*
  560. * Read from ELF notes data
  561. */
  562. ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
  563. {
  564. void *src = (void *)(unsigned long)*ppos;
  565. int rc;
  566. if (elfcorehdr_newmem) {
  567. memcpy(buf, src, count);
  568. } else {
  569. rc = copy_from_oldmem(buf, src, count);
  570. if (rc)
  571. return rc;
  572. }
  573. *ppos += count;
  574. return count;
  575. }