|
@@ -30,6 +30,7 @@
|
|
|
#include <linux/gfp.h>
|
|
|
#include <linux/memblock.h>
|
|
|
#include <linux/sort.h>
|
|
|
+#include <linux/of.h>
|
|
|
#include <linux/of_fdt.h>
|
|
|
#include <linux/dma-mapping.h>
|
|
|
#include <linux/dma-contiguous.h>
|
|
@@ -37,6 +38,7 @@
|
|
|
#include <linux/swiotlb.h>
|
|
|
#include <linux/vmalloc.h>
|
|
|
#include <linux/mm.h>
|
|
|
+#include <linux/kexec.h>
|
|
|
|
|
|
#include <asm/boot.h>
|
|
|
#include <asm/fixmap.h>
|
|
@@ -77,6 +79,67 @@ static int __init early_initrd(char *p)
|
|
|
early_param("initrd", early_initrd);
|
|
|
#endif
|
|
|
|
|
|
+#ifdef CONFIG_KEXEC_CORE
|
|
|
+/*
|
|
|
+ * reserve_crashkernel() - reserves memory for crash kernel
|
|
|
+ *
|
|
|
+ * This function reserves memory area given in "crashkernel=" kernel command
|
|
|
+ * line parameter. The memory reserved is used by dump capture kernel when
|
|
|
+ * primary kernel is crashing.
|
|
|
+ */
|
|
|
+static void __init reserve_crashkernel(void)
|
|
|
+{
|
|
|
+ unsigned long long crash_base, crash_size;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
|
|
|
+ &crash_size, &crash_base);
|
|
|
+ /* no crashkernel= or invalid value specified */
|
|
|
+ if (ret || !crash_size)
|
|
|
+ return;
|
|
|
+
|
|
|
+ crash_size = PAGE_ALIGN(crash_size);
|
|
|
+
|
|
|
+ if (crash_base == 0) {
|
|
|
+ /* Current arm64 boot protocol requires 2MB alignment */
|
|
|
+ crash_base = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT,
|
|
|
+ crash_size, SZ_2M);
|
|
|
+ if (crash_base == 0) {
|
|
|
+ pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
|
|
|
+ crash_size);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ /* User specifies base address explicitly. */
|
|
|
+ if (!memblock_is_region_memory(crash_base, crash_size)) {
|
|
|
+ pr_warn("cannot reserve crashkernel: region is not memory\n");
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (memblock_is_region_reserved(crash_base, crash_size)) {
|
|
|
+ pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n");
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!IS_ALIGNED(crash_base, SZ_2M)) {
|
|
|
+ pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n");
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ memblock_reserve(crash_base, crash_size);
|
|
|
+
|
|
|
+ pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
|
|
|
+ crash_base, crash_base + crash_size, crash_size >> 20);
|
|
|
+
|
|
|
+ crashk_res.start = crash_base;
|
|
|
+ crashk_res.end = crash_base + crash_size - 1;
|
|
|
+}
|
|
|
+#else
|
|
|
+static void __init reserve_crashkernel(void)
|
|
|
+{
|
|
|
+}
|
|
|
+#endif /* CONFIG_KEXEC_CORE */
|
|
|
+
|
|
|
/*
|
|
|
* Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
|
|
|
* currently assumes that for memory starting above 4G, 32-bit devices will
|
|
@@ -332,6 +395,9 @@ void __init arm64_memblock_init(void)
|
|
|
arm64_dma_phys_limit = max_zone_dma_phys();
|
|
|
else
|
|
|
arm64_dma_phys_limit = PHYS_MASK + 1;
|
|
|
+
|
|
|
+ reserve_crashkernel();
|
|
|
+
|
|
|
dma_contiguous_reserve(arm64_dma_phys_limit);
|
|
|
|
|
|
memblock_allow_resize();
|