kaslr.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674
  1. /*
  2. * kaslr.c
  3. *
  4. * This contains the routines needed to generate a reasonable level of
  5. * entropy to choose a randomized kernel base address offset in support
  6. * of Kernel Address Space Layout Randomization (KASLR). Additionally
  7. * handles walking the physical memory maps (and tracking memory regions
  8. * to avoid) in order to select a physical memory location that can
  9. * contain the entire properly aligned running kernel image.
  10. *
  11. */
  12. /*
  13. * isspace() in linux/ctype.h is expected by next_args() to filter
  14. * out "space/lf/tab". While boot/ctype.h conflicts with linux/ctype.h,
  15. * since isdigit() is implemented in both of them. Hence disable it
  16. * here.
  17. */
  18. #define BOOT_CTYPE_H
  19. /*
  20. * _ctype[] in lib/ctype.c is needed by isspace() of linux/ctype.h.
  21. * While both lib/ctype.c and lib/cmdline.c will bring EXPORT_SYMBOL
  22. * which is meaningless and will cause compiling error in some cases.
  23. * So do not include linux/export.h and define EXPORT_SYMBOL(sym)
  24. * as empty.
  25. */
  26. #define _LINUX_EXPORT_H
  27. #define EXPORT_SYMBOL(sym)
  28. #include "misc.h"
  29. #include "error.h"
  30. #include "../string.h"
  31. #include <generated/compile.h>
  32. #include <linux/module.h>
  33. #include <linux/uts.h>
  34. #include <linux/utsname.h>
  35. #include <linux/ctype.h>
  36. #include <generated/utsrelease.h>
  37. /* Macros used by the included decompressor code below. */
  38. #define STATIC
  39. #include <linux/decompress/mm.h>
  40. extern unsigned long get_cmd_line_ptr(void);
  41. /* Simplified build-specific string for starting entropy. */
  42. static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
  43. LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
  44. static unsigned long rotate_xor(unsigned long hash, const void *area,
  45. size_t size)
  46. {
  47. size_t i;
  48. unsigned long *ptr = (unsigned long *)area;
  49. for (i = 0; i < size / sizeof(hash); i++) {
  50. /* Rotate by odd number of bits and XOR. */
  51. hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
  52. hash ^= ptr[i];
  53. }
  54. return hash;
  55. }
  56. /* Attempt to create a simple but unpredictable starting entropy. */
  57. static unsigned long get_boot_seed(void)
  58. {
  59. unsigned long hash = 0;
  60. hash = rotate_xor(hash, build_str, sizeof(build_str));
  61. hash = rotate_xor(hash, boot_params, sizeof(*boot_params));
  62. return hash;
  63. }
  64. #define KASLR_COMPRESSED_BOOT
  65. #include "../../lib/kaslr.c"
  66. struct mem_vector {
  67. unsigned long long start;
  68. unsigned long long size;
  69. };
  70. /* Only supporting at most 4 unusable memmap regions with kaslr */
  71. #define MAX_MEMMAP_REGIONS 4
  72. static bool memmap_too_large;
  73. /* Store memory limit specified by "mem=nn[KMG]" or "memmap=nn[KMG]" */
  74. unsigned long long mem_limit = ULLONG_MAX;
  75. enum mem_avoid_index {
  76. MEM_AVOID_ZO_RANGE = 0,
  77. MEM_AVOID_INITRD,
  78. MEM_AVOID_CMDLINE,
  79. MEM_AVOID_BOOTPARAMS,
  80. MEM_AVOID_MEMMAP_BEGIN,
  81. MEM_AVOID_MEMMAP_END = MEM_AVOID_MEMMAP_BEGIN + MAX_MEMMAP_REGIONS - 1,
  82. MEM_AVOID_MAX,
  83. };
  84. static struct mem_vector mem_avoid[MEM_AVOID_MAX];
  85. static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two)
  86. {
  87. /* Item one is entirely before item two. */
  88. if (one->start + one->size <= two->start)
  89. return false;
  90. /* Item one is entirely after item two. */
  91. if (one->start >= two->start + two->size)
  92. return false;
  93. return true;
  94. }
  95. char *skip_spaces(const char *str)
  96. {
  97. while (isspace(*str))
  98. ++str;
  99. return (char *)str;
  100. }
  101. #include "../../../../lib/ctype.c"
  102. #include "../../../../lib/cmdline.c"
  103. static int
  104. parse_memmap(char *p, unsigned long long *start, unsigned long long *size)
  105. {
  106. char *oldp;
  107. if (!p)
  108. return -EINVAL;
  109. /* We don't care about this option here */
  110. if (!strncmp(p, "exactmap", 8))
  111. return -EINVAL;
  112. oldp = p;
  113. *size = memparse(p, &p);
  114. if (p == oldp)
  115. return -EINVAL;
  116. switch (*p) {
  117. case '#':
  118. case '$':
  119. case '!':
  120. *start = memparse(p + 1, &p);
  121. return 0;
  122. case '@':
  123. /* memmap=nn@ss specifies usable region, should be skipped */
  124. *size = 0;
  125. /* Fall through */
  126. default:
  127. /*
  128. * If w/o offset, only size specified, memmap=nn[KMG] has the
  129. * same behaviour as mem=nn[KMG]. It limits the max address
  130. * system can use. Region above the limit should be avoided.
  131. */
  132. *start = 0;
  133. return 0;
  134. }
  135. return -EINVAL;
  136. }
  137. static void mem_avoid_memmap(char *str)
  138. {
  139. static int i;
  140. int rc;
  141. if (i >= MAX_MEMMAP_REGIONS)
  142. return;
  143. while (str && (i < MAX_MEMMAP_REGIONS)) {
  144. int rc;
  145. unsigned long long start, size;
  146. char *k = strchr(str, ',');
  147. if (k)
  148. *k++ = 0;
  149. rc = parse_memmap(str, &start, &size);
  150. if (rc < 0)
  151. break;
  152. str = k;
  153. if (start == 0) {
  154. /* Store the specified memory limit if size > 0 */
  155. if (size > 0)
  156. mem_limit = size;
  157. continue;
  158. }
  159. mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].start = start;
  160. mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].size = size;
  161. i++;
  162. }
  163. /* More than 4 memmaps, fail kaslr */
  164. if ((i >= MAX_MEMMAP_REGIONS) && str)
  165. memmap_too_large = true;
  166. }
  167. static int handle_mem_memmap(void)
  168. {
  169. char *args = (char *)get_cmd_line_ptr();
  170. size_t len = strlen((char *)args);
  171. char *tmp_cmdline;
  172. char *param, *val;
  173. u64 mem_size;
  174. if (!strstr(args, "memmap=") && !strstr(args, "mem="))
  175. return 0;
  176. tmp_cmdline = malloc(len + 1);
  177. if (!tmp_cmdline )
  178. error("Failed to allocate space for tmp_cmdline");
  179. memcpy(tmp_cmdline, args, len);
  180. tmp_cmdline[len] = 0;
  181. args = tmp_cmdline;
  182. /* Chew leading spaces */
  183. args = skip_spaces(args);
  184. while (*args) {
  185. args = next_arg(args, &param, &val);
  186. /* Stop at -- */
  187. if (!val && strcmp(param, "--") == 0) {
  188. warn("Only '--' specified in cmdline");
  189. free(tmp_cmdline);
  190. return -1;
  191. }
  192. if (!strcmp(param, "memmap")) {
  193. mem_avoid_memmap(val);
  194. } else if (!strcmp(param, "mem")) {
  195. char *p = val;
  196. if (!strcmp(p, "nopentium"))
  197. continue;
  198. mem_size = memparse(p, &p);
  199. if (mem_size == 0) {
  200. free(tmp_cmdline);
  201. return -EINVAL;
  202. }
  203. mem_limit = mem_size;
  204. }
  205. }
  206. free(tmp_cmdline);
  207. return 0;
  208. }
  209. /*
  210. * In theory, KASLR can put the kernel anywhere in the range of [16M, 64T).
  211. * The mem_avoid array is used to store the ranges that need to be avoided
  212. * when KASLR searches for an appropriate random address. We must avoid any
  213. * regions that are unsafe to overlap with during decompression, and other
  214. * things like the initrd, cmdline and boot_params. This comment seeks to
  215. * explain mem_avoid as clearly as possible since incorrect mem_avoid
  216. * memory ranges lead to really hard to debug boot failures.
  217. *
  218. * The initrd, cmdline, and boot_params are trivial to identify for
  219. * avoiding. They are MEM_AVOID_INITRD, MEM_AVOID_CMDLINE, and
  220. * MEM_AVOID_BOOTPARAMS respectively below.
  221. *
  222. * What is not obvious how to avoid is the range of memory that is used
  223. * during decompression (MEM_AVOID_ZO_RANGE below). This range must cover
  224. * the compressed kernel (ZO) and its run space, which is used to extract
  225. * the uncompressed kernel (VO) and relocs.
  226. *
  227. * ZO's full run size sits against the end of the decompression buffer, so
  228. * we can calculate where text, data, bss, etc of ZO are positioned more
  229. * easily.
  230. *
  231. * For additional background, the decompression calculations can be found
  232. * in header.S, and the memory diagram is based on the one found in misc.c.
  233. *
  234. * The following conditions are already enforced by the image layouts and
  235. * associated code:
  236. * - input + input_size >= output + output_size
  237. * - kernel_total_size <= init_size
  238. * - kernel_total_size <= output_size (see Note below)
  239. * - output + init_size >= output + output_size
  240. *
  241. * (Note that kernel_total_size and output_size have no fundamental
  242. * relationship, but output_size is passed to choose_random_location
  243. * as a maximum of the two. The diagram is showing a case where
  244. * kernel_total_size is larger than output_size, but this case is
  245. * handled by bumping output_size.)
  246. *
  247. * The above conditions can be illustrated by a diagram:
  248. *
  249. * 0 output input input+input_size output+init_size
  250. * | | | | |
  251. * | | | | |
  252. * |-----|--------|--------|--------------|-----------|--|-------------|
  253. * | | |
  254. * | | |
  255. * output+init_size-ZO_INIT_SIZE output+output_size output+kernel_total_size
  256. *
  257. * [output, output+init_size) is the entire memory range used for
  258. * extracting the compressed image.
  259. *
  260. * [output, output+kernel_total_size) is the range needed for the
  261. * uncompressed kernel (VO) and its run size (bss, brk, etc).
  262. *
  263. * [output, output+output_size) is VO plus relocs (i.e. the entire
  264. * uncompressed payload contained by ZO). This is the area of the buffer
  265. * written to during decompression.
  266. *
  267. * [output+init_size-ZO_INIT_SIZE, output+init_size) is the worst-case
  268. * range of the copied ZO and decompression code. (i.e. the range
  269. * covered backwards of size ZO_INIT_SIZE, starting from output+init_size.)
  270. *
  271. * [input, input+input_size) is the original copied compressed image (ZO)
  272. * (i.e. it does not include its run size). This range must be avoided
  273. * because it contains the data used for decompression.
  274. *
  275. * [input+input_size, output+init_size) is [_text, _end) for ZO. This
  276. * range includes ZO's heap and stack, and must be avoided since it
  277. * performs the decompression.
  278. *
  279. * Since the above two ranges need to be avoided and they are adjacent,
  280. * they can be merged, resulting in: [input, output+init_size) which
  281. * becomes the MEM_AVOID_ZO_RANGE below.
  282. */
  283. static void mem_avoid_init(unsigned long input, unsigned long input_size,
  284. unsigned long output)
  285. {
  286. unsigned long init_size = boot_params->hdr.init_size;
  287. u64 initrd_start, initrd_size;
  288. u64 cmd_line, cmd_line_size;
  289. char *ptr;
  290. /*
  291. * Avoid the region that is unsafe to overlap during
  292. * decompression.
  293. */
  294. mem_avoid[MEM_AVOID_ZO_RANGE].start = input;
  295. mem_avoid[MEM_AVOID_ZO_RANGE].size = (output + init_size) - input;
  296. add_identity_map(mem_avoid[MEM_AVOID_ZO_RANGE].start,
  297. mem_avoid[MEM_AVOID_ZO_RANGE].size);
  298. /* Avoid initrd. */
  299. initrd_start = (u64)boot_params->ext_ramdisk_image << 32;
  300. initrd_start |= boot_params->hdr.ramdisk_image;
  301. initrd_size = (u64)boot_params->ext_ramdisk_size << 32;
  302. initrd_size |= boot_params->hdr.ramdisk_size;
  303. mem_avoid[MEM_AVOID_INITRD].start = initrd_start;
  304. mem_avoid[MEM_AVOID_INITRD].size = initrd_size;
  305. /* No need to set mapping for initrd, it will be handled in VO. */
  306. /* Avoid kernel command line. */
  307. cmd_line = (u64)boot_params->ext_cmd_line_ptr << 32;
  308. cmd_line |= boot_params->hdr.cmd_line_ptr;
  309. /* Calculate size of cmd_line. */
  310. ptr = (char *)(unsigned long)cmd_line;
  311. for (cmd_line_size = 0; ptr[cmd_line_size++]; )
  312. ;
  313. mem_avoid[MEM_AVOID_CMDLINE].start = cmd_line;
  314. mem_avoid[MEM_AVOID_CMDLINE].size = cmd_line_size;
  315. add_identity_map(mem_avoid[MEM_AVOID_CMDLINE].start,
  316. mem_avoid[MEM_AVOID_CMDLINE].size);
  317. /* Avoid boot parameters. */
  318. mem_avoid[MEM_AVOID_BOOTPARAMS].start = (unsigned long)boot_params;
  319. mem_avoid[MEM_AVOID_BOOTPARAMS].size = sizeof(*boot_params);
  320. add_identity_map(mem_avoid[MEM_AVOID_BOOTPARAMS].start,
  321. mem_avoid[MEM_AVOID_BOOTPARAMS].size);
  322. /* We don't need to set a mapping for setup_data. */
  323. /* Mark the memmap regions we need to avoid */
  324. handle_mem_memmap();
  325. #ifdef CONFIG_X86_VERBOSE_BOOTUP
  326. /* Make sure video RAM can be used. */
  327. add_identity_map(0, PMD_SIZE);
  328. #endif
  329. }
  330. /*
  331. * Does this memory vector overlap a known avoided area? If so, record the
  332. * overlap region with the lowest address.
  333. */
  334. static bool mem_avoid_overlap(struct mem_vector *img,
  335. struct mem_vector *overlap)
  336. {
  337. int i;
  338. struct setup_data *ptr;
  339. unsigned long earliest = img->start + img->size;
  340. bool is_overlapping = false;
  341. for (i = 0; i < MEM_AVOID_MAX; i++) {
  342. if (mem_overlaps(img, &mem_avoid[i]) &&
  343. mem_avoid[i].start < earliest) {
  344. *overlap = mem_avoid[i];
  345. earliest = overlap->start;
  346. is_overlapping = true;
  347. }
  348. }
  349. /* Avoid all entries in the setup_data linked list. */
  350. ptr = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data;
  351. while (ptr) {
  352. struct mem_vector avoid;
  353. avoid.start = (unsigned long)ptr;
  354. avoid.size = sizeof(*ptr) + ptr->len;
  355. if (mem_overlaps(img, &avoid) && (avoid.start < earliest)) {
  356. *overlap = avoid;
  357. earliest = overlap->start;
  358. is_overlapping = true;
  359. }
  360. ptr = (struct setup_data *)(unsigned long)ptr->next;
  361. }
  362. return is_overlapping;
  363. }
  364. struct slot_area {
  365. unsigned long addr;
  366. int num;
  367. };
  368. #define MAX_SLOT_AREA 100
  369. static struct slot_area slot_areas[MAX_SLOT_AREA];
  370. static unsigned long slot_max;
  371. static unsigned long slot_area_index;
  372. static void store_slot_info(struct mem_vector *region, unsigned long image_size)
  373. {
  374. struct slot_area slot_area;
  375. if (slot_area_index == MAX_SLOT_AREA)
  376. return;
  377. slot_area.addr = region->start;
  378. slot_area.num = (region->size - image_size) /
  379. CONFIG_PHYSICAL_ALIGN + 1;
  380. if (slot_area.num > 0) {
  381. slot_areas[slot_area_index++] = slot_area;
  382. slot_max += slot_area.num;
  383. }
  384. }
  385. static unsigned long slots_fetch_random(void)
  386. {
  387. unsigned long slot;
  388. int i;
  389. /* Handle case of no slots stored. */
  390. if (slot_max == 0)
  391. return 0;
  392. slot = kaslr_get_random_long("Physical") % slot_max;
  393. for (i = 0; i < slot_area_index; i++) {
  394. if (slot >= slot_areas[i].num) {
  395. slot -= slot_areas[i].num;
  396. continue;
  397. }
  398. return slot_areas[i].addr + slot * CONFIG_PHYSICAL_ALIGN;
  399. }
  400. if (i == slot_area_index)
  401. debug_putstr("slots_fetch_random() failed!?\n");
  402. return 0;
  403. }
  404. static void process_e820_entry(struct boot_e820_entry *entry,
  405. unsigned long minimum,
  406. unsigned long image_size)
  407. {
  408. struct mem_vector region, overlap;
  409. struct slot_area slot_area;
  410. unsigned long start_orig, end;
  411. struct boot_e820_entry cur_entry;
  412. /* Skip non-RAM entries. */
  413. if (entry->type != E820_TYPE_RAM)
  414. return;
  415. /* On 32-bit, ignore entries entirely above our maximum. */
  416. if (IS_ENABLED(CONFIG_X86_32) && entry->addr >= KERNEL_IMAGE_SIZE)
  417. return;
  418. /* Ignore entries entirely below our minimum. */
  419. if (entry->addr + entry->size < minimum)
  420. return;
  421. /* Ignore entries above memory limit */
  422. end = min(entry->size + entry->addr, mem_limit);
  423. if (entry->addr >= end)
  424. return;
  425. cur_entry.addr = entry->addr;
  426. cur_entry.size = end - entry->addr;
  427. region.start = cur_entry.addr;
  428. region.size = cur_entry.size;
  429. /* Give up if slot area array is full. */
  430. while (slot_area_index < MAX_SLOT_AREA) {
  431. start_orig = region.start;
  432. /* Potentially raise address to minimum location. */
  433. if (region.start < minimum)
  434. region.start = minimum;
  435. /* Potentially raise address to meet alignment needs. */
  436. region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
  437. /* Did we raise the address above this e820 region? */
  438. if (region.start > cur_entry.addr + cur_entry.size)
  439. return;
  440. /* Reduce size by any delta from the original address. */
  441. region.size -= region.start - start_orig;
  442. /* On 32-bit, reduce region size to fit within max size. */
  443. if (IS_ENABLED(CONFIG_X86_32) &&
  444. region.start + region.size > KERNEL_IMAGE_SIZE)
  445. region.size = KERNEL_IMAGE_SIZE - region.start;
  446. /* Return if region can't contain decompressed kernel */
  447. if (region.size < image_size)
  448. return;
  449. /* If nothing overlaps, store the region and return. */
  450. if (!mem_avoid_overlap(&region, &overlap)) {
  451. store_slot_info(&region, image_size);
  452. return;
  453. }
  454. /* Store beginning of region if holds at least image_size. */
  455. if (overlap.start > region.start + image_size) {
  456. struct mem_vector beginning;
  457. beginning.start = region.start;
  458. beginning.size = overlap.start - region.start;
  459. store_slot_info(&beginning, image_size);
  460. }
  461. /* Return if overlap extends to or past end of region. */
  462. if (overlap.start + overlap.size >= region.start + region.size)
  463. return;
  464. /* Clip off the overlapping region and start over. */
  465. region.size -= overlap.start - region.start + overlap.size;
  466. region.start = overlap.start + overlap.size;
  467. }
  468. }
  469. static unsigned long find_random_phys_addr(unsigned long minimum,
  470. unsigned long image_size)
  471. {
  472. int i;
  473. unsigned long addr;
  474. /* Check if we had too many memmaps. */
  475. if (memmap_too_large) {
  476. debug_putstr("Aborted e820 scan (more than 4 memmap= args)!\n");
  477. return 0;
  478. }
  479. /* Make sure minimum is aligned. */
  480. minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
  481. /* Verify potential e820 positions, appending to slots list. */
  482. for (i = 0; i < boot_params->e820_entries; i++) {
  483. process_e820_entry(&boot_params->e820_table[i], minimum,
  484. image_size);
  485. if (slot_area_index == MAX_SLOT_AREA) {
  486. debug_putstr("Aborted e820 scan (slot_areas full)!\n");
  487. break;
  488. }
  489. }
  490. return slots_fetch_random();
  491. }
  492. static unsigned long find_random_virt_addr(unsigned long minimum,
  493. unsigned long image_size)
  494. {
  495. unsigned long slots, random_addr;
  496. /* Make sure minimum is aligned. */
  497. minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
  498. /* Align image_size for easy slot calculations. */
  499. image_size = ALIGN(image_size, CONFIG_PHYSICAL_ALIGN);
  500. /*
  501. * There are how many CONFIG_PHYSICAL_ALIGN-sized slots
  502. * that can hold image_size within the range of minimum to
  503. * KERNEL_IMAGE_SIZE?
  504. */
  505. slots = (KERNEL_IMAGE_SIZE - minimum - image_size) /
  506. CONFIG_PHYSICAL_ALIGN + 1;
  507. random_addr = kaslr_get_random_long("Virtual") % slots;
  508. return random_addr * CONFIG_PHYSICAL_ALIGN + minimum;
  509. }
  510. /*
  511. * Since this function examines addresses much more numerically,
  512. * it takes the input and output pointers as 'unsigned long'.
  513. */
  514. void choose_random_location(unsigned long input,
  515. unsigned long input_size,
  516. unsigned long *output,
  517. unsigned long output_size,
  518. unsigned long *virt_addr)
  519. {
  520. unsigned long random_addr, min_addr;
  521. if (cmdline_find_option_bool("nokaslr")) {
  522. warn("KASLR disabled: 'nokaslr' on cmdline.");
  523. return;
  524. }
  525. boot_params->hdr.loadflags |= KASLR_FLAG;
  526. /* Prepare to add new identity pagetables on demand. */
  527. initialize_identity_maps();
  528. /* Record the various known unsafe memory ranges. */
  529. mem_avoid_init(input, input_size, *output);
  530. /*
  531. * Low end of the randomization range should be the
  532. * smaller of 512M or the initial kernel image
  533. * location:
  534. */
  535. min_addr = min(*output, 512UL << 20);
  536. /* Walk e820 and find a random address. */
  537. random_addr = find_random_phys_addr(min_addr, output_size);
  538. if (!random_addr) {
  539. warn("Physical KASLR disabled: no suitable memory region!");
  540. } else {
  541. /* Update the new physical address location. */
  542. if (*output != random_addr) {
  543. add_identity_map(random_addr, output_size);
  544. *output = random_addr;
  545. }
  546. /*
  547. * This loads the identity mapping page table.
  548. * This should only be done if a new physical address
  549. * is found for the kernel, otherwise we should keep
  550. * the old page table to make it be like the "nokaslr"
  551. * case.
  552. */
  553. finalize_identity_maps();
  554. }
  555. /* Pick random virtual address starting from LOAD_PHYSICAL_ADDR. */
  556. if (IS_ENABLED(CONFIG_X86_64))
  557. random_addr = find_random_virt_addr(LOAD_PHYSICAL_ADDR, output_size);
  558. *virt_addr = random_addr;
  559. }