vmcore.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451
  1. /*
  2. * fs/proc/vmcore.c Interface for accessing the crash
  3. * dump from the system's previous life.
  4. * Heavily borrowed from fs/proc/kcore.c
  5. * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
  6. * Copyright (C) IBM Corporation, 2004. All rights reserved
  7. *
  8. */
  9. #include <linux/config.h>
  10. #include <linux/mm.h>
  11. #include <linux/proc_fs.h>
  12. #include <linux/user.h>
  13. #include <linux/a.out.h>
  14. #include <linux/elf.h>
  15. #include <linux/elfcore.h>
  16. #include <linux/proc_fs.h>
  17. #include <linux/highmem.h>
  18. #include <linux/bootmem.h>
  19. #include <linux/init.h>
  20. #include <linux/crash_dump.h>
  21. #include <linux/list.h>
  22. #include <asm/uaccess.h>
  23. #include <asm/io.h>
  24. /* List representing chunks of contiguous memory areas and their offsets in
  25. * vmcore file.
  26. */
  27. static LIST_HEAD(vmcore_list);
  28. /* Stores the pointer to the buffer containing kernel elf core headers. */
  29. static char *elfcorebuf;
  30. static size_t elfcorebuf_sz;
  31. /* Total size of vmcore file. */
  32. static u64 vmcore_size;
  33. struct proc_dir_entry *proc_vmcore = NULL;
  34. /* Reads a page from the oldmem device from given offset. */
  35. static ssize_t read_from_oldmem(char *buf, size_t count,
  36. loff_t *ppos, int userbuf)
  37. {
  38. unsigned long pfn, offset;
  39. size_t nr_bytes;
  40. ssize_t read = 0, tmp;
  41. if (!count)
  42. return 0;
  43. offset = (unsigned long)(*ppos % PAGE_SIZE);
  44. pfn = (unsigned long)(*ppos / PAGE_SIZE);
  45. if (pfn > saved_max_pfn)
  46. return -EINVAL;
  47. do {
  48. if (count > (PAGE_SIZE - offset))
  49. nr_bytes = PAGE_SIZE - offset;
  50. else
  51. nr_bytes = count;
  52. tmp = copy_oldmem_page(pfn, buf, nr_bytes, offset, userbuf);
  53. if (tmp < 0)
  54. return tmp;
  55. *ppos += nr_bytes;
  56. count -= nr_bytes;
  57. buf += nr_bytes;
  58. read += nr_bytes;
  59. ++pfn;
  60. offset = 0;
  61. } while (count);
  62. return read;
  63. }
  64. /* Maps vmcore file offset to respective physical address in memroy. */
  65. static u64 map_offset_to_paddr(loff_t offset, struct list_head *vc_list,
  66. struct vmcore **m_ptr)
  67. {
  68. struct vmcore *m;
  69. u64 paddr;
  70. list_for_each_entry(m, vc_list, list) {
  71. u64 start, end;
  72. start = m->offset;
  73. end = m->offset + m->size - 1;
  74. if (offset >= start && offset <= end) {
  75. paddr = m->paddr + offset - start;
  76. *m_ptr = m;
  77. return paddr;
  78. }
  79. }
  80. *m_ptr = NULL;
  81. return 0;
  82. }
  83. /* Read from the ELF header and then the crash dump. On error, negative value is
  84. * returned otherwise number of bytes read are returned.
  85. */
  86. static ssize_t read_vmcore(struct file *file, char __user *buffer,
  87. size_t buflen, loff_t *fpos)
  88. {
  89. ssize_t acc = 0, tmp;
  90. size_t tsz, nr_bytes;
  91. u64 start;
  92. struct vmcore *curr_m = NULL;
  93. if (buflen == 0 || *fpos >= vmcore_size)
  94. return 0;
  95. /* trim buflen to not go beyond EOF */
  96. if (buflen > vmcore_size - *fpos)
  97. buflen = vmcore_size - *fpos;
  98. /* Read ELF core header */
  99. if (*fpos < elfcorebuf_sz) {
  100. tsz = elfcorebuf_sz - *fpos;
  101. if (buflen < tsz)
  102. tsz = buflen;
  103. if (copy_to_user(buffer, elfcorebuf + *fpos, tsz))
  104. return -EFAULT;
  105. buflen -= tsz;
  106. *fpos += tsz;
  107. buffer += tsz;
  108. acc += tsz;
  109. /* leave now if filled buffer already */
  110. if (buflen == 0)
  111. return acc;
  112. }
  113. start = map_offset_to_paddr(*fpos, &vmcore_list, &curr_m);
  114. if (!curr_m)
  115. return -EINVAL;
  116. if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
  117. tsz = buflen;
  118. /* Calculate left bytes in current memory segment. */
  119. nr_bytes = (curr_m->size - (start - curr_m->paddr));
  120. if (tsz > nr_bytes)
  121. tsz = nr_bytes;
  122. while (buflen) {
  123. tmp = read_from_oldmem(buffer, tsz, &start, 1);
  124. if (tmp < 0)
  125. return tmp;
  126. buflen -= tsz;
  127. *fpos += tsz;
  128. buffer += tsz;
  129. acc += tsz;
  130. if (start >= (curr_m->paddr + curr_m->size)) {
  131. if (curr_m->list.next == &vmcore_list)
  132. return acc; /*EOF*/
  133. curr_m = list_entry(curr_m->list.next,
  134. struct vmcore, list);
  135. start = curr_m->paddr;
  136. }
  137. if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
  138. tsz = buflen;
  139. /* Calculate left bytes in current memory segment. */
  140. nr_bytes = (curr_m->size - (start - curr_m->paddr));
  141. if (tsz > nr_bytes)
  142. tsz = nr_bytes;
  143. }
  144. return acc;
  145. }
  146. static int open_vmcore(struct inode *inode, struct file *filp)
  147. {
  148. return 0;
  149. }
  150. struct file_operations proc_vmcore_operations = {
  151. .read = read_vmcore,
  152. .open = open_vmcore,
  153. };
  154. static struct vmcore* __init get_new_element(void)
  155. {
  156. struct vmcore *p;
  157. p = kmalloc(sizeof(*p), GFP_KERNEL);
  158. if (p)
  159. memset(p, 0, sizeof(*p));
  160. return p;
  161. }
  162. static u64 __init get_vmcore_size_elf64(char *elfptr)
  163. {
  164. int i;
  165. u64 size;
  166. Elf64_Ehdr *ehdr_ptr;
  167. Elf64_Phdr *phdr_ptr;
  168. ehdr_ptr = (Elf64_Ehdr *)elfptr;
  169. phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
  170. size = sizeof(Elf64_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr));
  171. for (i = 0; i < ehdr_ptr->e_phnum; i++) {
  172. size += phdr_ptr->p_memsz;
  173. phdr_ptr++;
  174. }
  175. return size;
  176. }
  177. /* Merges all the PT_NOTE headers into one. */
  178. static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
  179. struct list_head *vc_list)
  180. {
  181. int i, nr_ptnote=0, rc=0;
  182. char *tmp;
  183. Elf64_Ehdr *ehdr_ptr;
  184. Elf64_Phdr phdr, *phdr_ptr;
  185. Elf64_Nhdr *nhdr_ptr;
  186. u64 phdr_sz = 0, note_off;
  187. ehdr_ptr = (Elf64_Ehdr *)elfptr;
  188. phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
  189. for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
  190. int j;
  191. void *notes_section;
  192. struct vmcore *new;
  193. u64 offset, max_sz, sz, real_sz = 0;
  194. if (phdr_ptr->p_type != PT_NOTE)
  195. continue;
  196. nr_ptnote++;
  197. max_sz = phdr_ptr->p_memsz;
  198. offset = phdr_ptr->p_offset;
  199. notes_section = kmalloc(max_sz, GFP_KERNEL);
  200. if (!notes_section)
  201. return -ENOMEM;
  202. rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
  203. if (rc < 0) {
  204. kfree(notes_section);
  205. return rc;
  206. }
  207. nhdr_ptr = notes_section;
  208. for (j = 0; j < max_sz; j += sz) {
  209. if (nhdr_ptr->n_namesz == 0)
  210. break;
  211. sz = sizeof(Elf64_Nhdr) +
  212. ((nhdr_ptr->n_namesz + 3) & ~3) +
  213. ((nhdr_ptr->n_descsz + 3) & ~3);
  214. real_sz += sz;
  215. nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
  216. }
  217. /* Add this contiguous chunk of notes section to vmcore list.*/
  218. new = get_new_element();
  219. if (!new) {
  220. kfree(notes_section);
  221. return -ENOMEM;
  222. }
  223. new->paddr = phdr_ptr->p_offset;
  224. new->size = real_sz;
  225. list_add_tail(&new->list, vc_list);
  226. phdr_sz += real_sz;
  227. kfree(notes_section);
  228. }
  229. /* Prepare merged PT_NOTE program header. */
  230. phdr.p_type = PT_NOTE;
  231. phdr.p_flags = 0;
  232. note_off = sizeof(Elf64_Ehdr) +
  233. (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
  234. phdr.p_offset = note_off;
  235. phdr.p_vaddr = phdr.p_paddr = 0;
  236. phdr.p_filesz = phdr.p_memsz = phdr_sz;
  237. phdr.p_align = 0;
  238. /* Add merged PT_NOTE program header*/
  239. tmp = elfptr + sizeof(Elf64_Ehdr);
  240. memcpy(tmp, &phdr, sizeof(phdr));
  241. tmp += sizeof(phdr);
  242. /* Remove unwanted PT_NOTE program headers. */
  243. i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
  244. *elfsz = *elfsz - i;
  245. memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
  246. /* Modify e_phnum to reflect merged headers. */
  247. ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
  248. return 0;
  249. }
  250. /* Add memory chunks represented by program headers to vmcore list. Also update
  251. * the new offset fields of exported program headers. */
  252. static int __init process_ptload_program_headers_elf64(char *elfptr,
  253. size_t elfsz,
  254. struct list_head *vc_list)
  255. {
  256. int i;
  257. Elf64_Ehdr *ehdr_ptr;
  258. Elf64_Phdr *phdr_ptr;
  259. loff_t vmcore_off;
  260. struct vmcore *new;
  261. ehdr_ptr = (Elf64_Ehdr *)elfptr;
  262. phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
  263. /* First program header is PT_NOTE header. */
  264. vmcore_off = sizeof(Elf64_Ehdr) +
  265. (ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr) +
  266. phdr_ptr->p_memsz; /* Note sections */
  267. for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
  268. if (phdr_ptr->p_type != PT_LOAD)
  269. continue;
  270. /* Add this contiguous chunk of memory to vmcore list.*/
  271. new = get_new_element();
  272. if (!new)
  273. return -ENOMEM;
  274. new->paddr = phdr_ptr->p_offset;
  275. new->size = phdr_ptr->p_memsz;
  276. list_add_tail(&new->list, vc_list);
  277. /* Update the program header offset. */
  278. phdr_ptr->p_offset = vmcore_off;
  279. vmcore_off = vmcore_off + phdr_ptr->p_memsz;
  280. }
  281. return 0;
  282. }
  283. /* Sets offset fields of vmcore elements. */
  284. static void __init set_vmcore_list_offsets_elf64(char *elfptr,
  285. struct list_head *vc_list)
  286. {
  287. loff_t vmcore_off;
  288. Elf64_Ehdr *ehdr_ptr;
  289. struct vmcore *m;
  290. ehdr_ptr = (Elf64_Ehdr *)elfptr;
  291. /* Skip Elf header and program headers. */
  292. vmcore_off = sizeof(Elf64_Ehdr) +
  293. (ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr);
  294. list_for_each_entry(m, vc_list, list) {
  295. m->offset = vmcore_off;
  296. vmcore_off += m->size;
  297. }
  298. }
  299. static int __init parse_crash_elf64_headers(void)
  300. {
  301. int rc=0;
  302. Elf64_Ehdr ehdr;
  303. u64 addr;
  304. addr = elfcorehdr_addr;
  305. /* Read Elf header */
  306. rc = read_from_oldmem((char*)&ehdr, sizeof(Elf64_Ehdr), &addr, 0);
  307. if (rc < 0)
  308. return rc;
  309. /* Do some basic Verification. */
  310. if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
  311. (ehdr.e_type != ET_CORE) ||
  312. !elf_check_arch(&ehdr) ||
  313. ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
  314. ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
  315. ehdr.e_version != EV_CURRENT ||
  316. ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
  317. ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
  318. ehdr.e_phnum == 0) {
  319. printk(KERN_WARNING "Warning: Core image elf header is not"
  320. "sane\n");
  321. return -EINVAL;
  322. }
  323. /* Read in all elf headers. */
  324. elfcorebuf_sz = sizeof(Elf64_Ehdr) + ehdr.e_phnum * sizeof(Elf64_Phdr);
  325. elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL);
  326. if (!elfcorebuf)
  327. return -ENOMEM;
  328. addr = elfcorehdr_addr;
  329. rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0);
  330. if (rc < 0) {
  331. kfree(elfcorebuf);
  332. return rc;
  333. }
  334. /* Merge all PT_NOTE headers into one. */
  335. rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
  336. if (rc) {
  337. kfree(elfcorebuf);
  338. return rc;
  339. }
  340. rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
  341. &vmcore_list);
  342. if (rc) {
  343. kfree(elfcorebuf);
  344. return rc;
  345. }
  346. set_vmcore_list_offsets_elf64(elfcorebuf, &vmcore_list);
  347. return 0;
  348. }
  349. static int __init parse_crash_elf_headers(void)
  350. {
  351. unsigned char e_ident[EI_NIDENT];
  352. u64 addr;
  353. int rc=0;
  354. addr = elfcorehdr_addr;
  355. rc = read_from_oldmem(e_ident, EI_NIDENT, &addr, 0);
  356. if (rc < 0)
  357. return rc;
  358. if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
  359. printk(KERN_WARNING "Warning: Core image elf header"
  360. " not found\n");
  361. return -EINVAL;
  362. }
  363. if (e_ident[EI_CLASS] == ELFCLASS64) {
  364. rc = parse_crash_elf64_headers();
  365. if (rc)
  366. return rc;
  367. /* Determine vmcore size. */
  368. vmcore_size = get_vmcore_size_elf64(elfcorebuf);
  369. } else {
  370. printk(KERN_WARNING "Warning: Core image elf header is not"
  371. " sane\n");
  372. return -EINVAL;
  373. }
  374. return 0;
  375. }
  376. /* Init function for vmcore module. */
  377. static int __init vmcore_init(void)
  378. {
  379. int rc = 0;
  380. /* If elfcorehdr= has been passed in cmdline, then capture the dump.*/
  381. if (!(elfcorehdr_addr < ELFCORE_ADDR_MAX))
  382. return rc;
  383. rc = parse_crash_elf_headers();
  384. if (rc) {
  385. printk(KERN_WARNING "Kdump: vmcore not initialized\n");
  386. return rc;
  387. }
  388. /* Initialize /proc/vmcore size if proc is already up. */
  389. if (proc_vmcore)
  390. proc_vmcore->size = vmcore_size;
  391. return 0;
  392. }
  393. module_init(vmcore_init)