|
@@ -38,19 +38,6 @@
|
|
|
#include <asm/virtext.h>
|
|
|
#include <asm/intel_pt.h>
|
|
|
|
|
|
-/* Alignment required for elf header segment */
|
|
|
-#define ELF_CORE_HEADER_ALIGN 4096
|
|
|
-
|
|
|
-struct crash_mem_range {
|
|
|
- u64 start, end;
|
|
|
-};
|
|
|
-
|
|
|
-struct crash_mem {
|
|
|
- unsigned int max_nr_ranges;
|
|
|
- unsigned int nr_ranges;
|
|
|
- struct crash_mem_range ranges[0];
|
|
|
-};
|
|
|
-
|
|
|
/* Used while preparing memory map entries for second kernel */
|
|
|
struct crash_memmap_data {
|
|
|
struct boot_params *params;
|
|
@@ -227,77 +214,6 @@ static struct crash_mem *fill_up_crash_elf_data(void)
|
|
|
return cmem;
|
|
|
}
|
|
|
|
|
|
-static int exclude_mem_range(struct crash_mem *mem,
|
|
|
- unsigned long long mstart, unsigned long long mend)
|
|
|
-{
|
|
|
- int i, j;
|
|
|
- unsigned long long start, end;
|
|
|
- struct crash_mem_range temp_range = {0, 0};
|
|
|
-
|
|
|
- for (i = 0; i < mem->nr_ranges; i++) {
|
|
|
- start = mem->ranges[i].start;
|
|
|
- end = mem->ranges[i].end;
|
|
|
-
|
|
|
- if (mstart > end || mend < start)
|
|
|
- continue;
|
|
|
-
|
|
|
- /* Truncate any area outside of range */
|
|
|
- if (mstart < start)
|
|
|
- mstart = start;
|
|
|
- if (mend > end)
|
|
|
- mend = end;
|
|
|
-
|
|
|
- /* Found completely overlapping range */
|
|
|
- if (mstart == start && mend == end) {
|
|
|
- mem->ranges[i].start = 0;
|
|
|
- mem->ranges[i].end = 0;
|
|
|
- if (i < mem->nr_ranges - 1) {
|
|
|
- /* Shift rest of the ranges to left */
|
|
|
- for (j = i; j < mem->nr_ranges - 1; j++) {
|
|
|
- mem->ranges[j].start =
|
|
|
- mem->ranges[j+1].start;
|
|
|
- mem->ranges[j].end =
|
|
|
- mem->ranges[j+1].end;
|
|
|
- }
|
|
|
- }
|
|
|
- mem->nr_ranges--;
|
|
|
- return 0;
|
|
|
- }
|
|
|
-
|
|
|
- if (mstart > start && mend < end) {
|
|
|
- /* Split original range */
|
|
|
- mem->ranges[i].end = mstart - 1;
|
|
|
- temp_range.start = mend + 1;
|
|
|
- temp_range.end = end;
|
|
|
- } else if (mstart != start)
|
|
|
- mem->ranges[i].end = mstart - 1;
|
|
|
- else
|
|
|
- mem->ranges[i].start = mend + 1;
|
|
|
- break;
|
|
|
- }
|
|
|
-
|
|
|
- /* If a split happend, add the split to array */
|
|
|
- if (!temp_range.end)
|
|
|
- return 0;
|
|
|
-
|
|
|
- /* Split happened */
|
|
|
- if (i == mem->max_nr_ranges - 1)
|
|
|
- return -ENOMEM;
|
|
|
-
|
|
|
- /* Location where new range should go */
|
|
|
- j = i + 1;
|
|
|
- if (j < mem->nr_ranges) {
|
|
|
- /* Move over all ranges one slot towards the end */
|
|
|
- for (i = mem->nr_ranges - 1; i >= j; i--)
|
|
|
- mem->ranges[i + 1] = mem->ranges[i];
|
|
|
- }
|
|
|
-
|
|
|
- mem->ranges[j].start = temp_range.start;
|
|
|
- mem->ranges[j].end = temp_range.end;
|
|
|
- mem->nr_ranges++;
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Look for any unwanted ranges between mstart, mend and remove them. This
|
|
|
* might lead to split and split ranges are put in cmem->ranges[] array
|
|
@@ -307,12 +223,13 @@ static int elf_header_exclude_ranges(struct crash_mem *cmem)
|
|
|
int ret = 0;
|
|
|
|
|
|
/* Exclude crashkernel region */
|
|
|
- ret = exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
|
|
|
+ ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
|
if (crashk_low_res.end) {
|
|
|
- ret = exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end);
|
|
|
+ ret = crash_exclude_mem_range(cmem, crashk_low_res.start,
|
|
|
+ crashk_low_res.end);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
}
|
|
@@ -331,105 +248,6 @@ static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int prepare_elf64_headers(struct crash_mem *cmem, bool kernel_map,
|
|
|
- void **addr, unsigned long *sz)
|
|
|
-{
|
|
|
- Elf64_Ehdr *ehdr;
|
|
|
- Elf64_Phdr *phdr;
|
|
|
- unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
|
|
|
- unsigned char *buf;
|
|
|
- unsigned int cpu, i;
|
|
|
- unsigned long long notes_addr;
|
|
|
- unsigned long mstart, mend;
|
|
|
-
|
|
|
- /* extra phdr for vmcoreinfo elf note */
|
|
|
- nr_phdr = nr_cpus + 1;
|
|
|
- nr_phdr += cmem->nr_ranges;
|
|
|
-
|
|
|
- /*
|
|
|
- * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
|
|
|
- * area on x86_64 (ffffffff80000000 - ffffffffa0000000).
|
|
|
- * I think this is required by tools like gdb. So same physical
|
|
|
- * memory will be mapped in two elf headers. One will contain kernel
|
|
|
- * text virtual addresses and other will have __va(physical) addresses.
|
|
|
- */
|
|
|
-
|
|
|
- nr_phdr++;
|
|
|
- elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr);
|
|
|
- elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN);
|
|
|
-
|
|
|
- buf = vzalloc(elf_sz);
|
|
|
- if (!buf)
|
|
|
- return -ENOMEM;
|
|
|
-
|
|
|
- ehdr = (Elf64_Ehdr *)buf;
|
|
|
- phdr = (Elf64_Phdr *)(ehdr + 1);
|
|
|
- memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
|
|
|
- ehdr->e_ident[EI_CLASS] = ELFCLASS64;
|
|
|
- ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
|
|
|
- ehdr->e_ident[EI_VERSION] = EV_CURRENT;
|
|
|
- ehdr->e_ident[EI_OSABI] = ELF_OSABI;
|
|
|
- memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
|
|
|
- ehdr->e_type = ET_CORE;
|
|
|
- ehdr->e_machine = ELF_ARCH;
|
|
|
- ehdr->e_version = EV_CURRENT;
|
|
|
- ehdr->e_phoff = sizeof(Elf64_Ehdr);
|
|
|
- ehdr->e_ehsize = sizeof(Elf64_Ehdr);
|
|
|
- ehdr->e_phentsize = sizeof(Elf64_Phdr);
|
|
|
-
|
|
|
- /* Prepare one phdr of type PT_NOTE for each present cpu */
|
|
|
- for_each_present_cpu(cpu) {
|
|
|
- phdr->p_type = PT_NOTE;
|
|
|
- notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
|
|
|
- phdr->p_offset = phdr->p_paddr = notes_addr;
|
|
|
- phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t);
|
|
|
- (ehdr->e_phnum)++;
|
|
|
- phdr++;
|
|
|
- }
|
|
|
-
|
|
|
- /* Prepare one PT_NOTE header for vmcoreinfo */
|
|
|
- phdr->p_type = PT_NOTE;
|
|
|
- phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
|
|
|
- phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE;
|
|
|
- (ehdr->e_phnum)++;
|
|
|
- phdr++;
|
|
|
-
|
|
|
- /* Prepare PT_LOAD type program header for kernel text region */
|
|
|
- if (kernel_map) {
|
|
|
- phdr->p_type = PT_LOAD;
|
|
|
- phdr->p_flags = PF_R|PF_W|PF_X;
|
|
|
- phdr->p_vaddr = (Elf64_Addr)_text;
|
|
|
- phdr->p_filesz = phdr->p_memsz = _end - _text;
|
|
|
- phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
|
|
|
- ehdr->e_phnum++;
|
|
|
- phdr++;
|
|
|
- }
|
|
|
-
|
|
|
- /* Go through all the ranges in cmem->ranges[] and prepare phdr */
|
|
|
- for (i = 0; i < cmem->nr_ranges; i++) {
|
|
|
- mstart = cmem->ranges[i].start;
|
|
|
- mend = cmem->ranges[i].end;
|
|
|
-
|
|
|
- phdr->p_type = PT_LOAD;
|
|
|
- phdr->p_flags = PF_R|PF_W|PF_X;
|
|
|
- phdr->p_offset = mstart;
|
|
|
-
|
|
|
- phdr->p_paddr = mstart;
|
|
|
- phdr->p_vaddr = (unsigned long long) __va(mstart);
|
|
|
- phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
|
|
|
- phdr->p_align = 0;
|
|
|
- ehdr->e_phnum++;
|
|
|
- phdr++;
|
|
|
- pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
|
|
|
- phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
|
|
|
- ehdr->e_phnum, phdr->p_offset);
|
|
|
- }
|
|
|
-
|
|
|
- *addr = buf;
|
|
|
- *sz = elf_sz;
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
/* Prepare elf headers. Return addr and size */
|
|
|
static int prepare_elf_headers(struct kimage *image, void **addr,
|
|
|
unsigned long *sz)
|
|
@@ -454,7 +272,8 @@ static int prepare_elf_headers(struct kimage *image, void **addr,
|
|
|
goto out;
|
|
|
|
|
|
/* By default prepare 64bit headers */
|
|
|
- ret = prepare_elf64_headers(cmem, IS_ENABLED(CONFIG_X86_64), addr, sz);
|
|
|
+ ret = crash_prepare_elf64_headers(cmem,
|
|
|
+ IS_ENABLED(CONFIG_X86_64), addr, sz);
|
|
|
if (ret)
|
|
|
goto out;
|
|
|
|
|
@@ -518,14 +337,14 @@ static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
|
|
|
/* Exclude Backup region */
|
|
|
start = image->arch.backup_load_addr;
|
|
|
end = start + image->arch.backup_src_sz - 1;
|
|
|
- ret = exclude_mem_range(cmem, start, end);
|
|
|
+ ret = crash_exclude_mem_range(cmem, start, end);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
|
/* Exclude elf header region */
|
|
|
start = image->arch.elf_load_addr;
|
|
|
end = start + image->arch.elf_headers_sz - 1;
|
|
|
- return exclude_mem_range(cmem, start, end);
|
|
|
+ return crash_exclude_mem_range(cmem, start, end);
|
|
|
}
|
|
|
|
|
|
/* Prepare memory map for crash dump kernel */
|