kaslr.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * This file implements KASLR memory randomization for x86_64. It randomizes
  4. * the virtual address space of kernel memory regions (physical memory
  5. * mapping, vmalloc & vmemmap) for x86_64. This security feature mitigates
  6. * exploits relying on predictable kernel addresses.
  7. *
  8. * Entropy is generated using the KASLR early boot functions now shared in
  9. * the lib directory (originally written by Kees Cook). Randomization is
  10. * done on PGD & P4D/PUD page table levels to increase possible addresses.
  11. * The physical memory mapping code was adapted to support P4D/PUD level
  12. * virtual addresses. This implementation on the best configuration provides
  13. * 30,000 possible virtual addresses in average for each memory region.
  14. * An additional low memory page is used to ensure each CPU can start with
  15. * a PGD aligned virtual address (for realmode).
  16. *
  17. * The order of each memory region is not changed. The feature looks at
  18. * the available space for the regions based on different configuration
  19. * options and randomizes the base and space between each. The size of the
  20. * physical memory mapping is the available physical memory.
  21. */
  22. #include <linux/kernel.h>
  23. #include <linux/init.h>
  24. #include <linux/random.h>
  25. #include <asm/pgalloc.h>
  26. #include <asm/pgtable.h>
  27. #include <asm/setup.h>
  28. #include <asm/kaslr.h>
  29. #include "mm_internal.h"
  30. #define TB_SHIFT 40
  31. /*
  32. * The end address could depend on more configuration options to make the
  33. * highest amount of space for randomization available, but that's too hard
  34. * to keep straight and caused issues already.
  35. */
  36. static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE;
  37. /*
  38. * Memory regions randomized by KASLR (except modules that use a separate logic
  39. * earlier during boot). The list is ordered based on virtual addresses. This
  40. * order is kept after randomization.
  41. */
  42. static __initdata struct kaslr_memory_region {
  43. unsigned long *base;
  44. unsigned long size_tb;
  45. } kaslr_regions[] = {
  46. { &page_offset_base, 0 },
  47. { &vmalloc_base, 0 },
  48. { &vmemmap_base, 1 },
  49. };
  50. /* Get size in bytes used by the memory region */
  51. static inline unsigned long get_padding(struct kaslr_memory_region *region)
  52. {
  53. return (region->size_tb << TB_SHIFT);
  54. }
  55. /*
  56. * Apply no randomization if KASLR was disabled at boot or if KASAN
  57. * is enabled. KASAN shadow mappings rely on regions being PGD aligned.
  58. */
  59. static inline bool kaslr_memory_enabled(void)
  60. {
  61. return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN);
  62. }
  63. /* Initialize base and padding for each memory region randomized with KASLR */
  64. void __init kernel_randomize_memory(void)
  65. {
  66. size_t i;
  67. unsigned long vaddr_start, vaddr;
  68. unsigned long rand, memory_tb;
  69. struct rnd_state rand_state;
  70. unsigned long remain_entropy;
  71. vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4;
  72. vaddr = vaddr_start;
  73. /*
  74. * These BUILD_BUG_ON checks ensure the memory layout is consistent
  75. * with the vaddr_start/vaddr_end variables. These checks are very
  76. * limited....
  77. */
  78. BUILD_BUG_ON(vaddr_start >= vaddr_end);
  79. BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE);
  80. BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
  81. if (!kaslr_memory_enabled())
  82. return;
  83. kaslr_regions[0].size_tb = 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT);
  84. kaslr_regions[1].size_tb = VMALLOC_SIZE_TB;
  85. /*
  86. * Update Physical memory mapping to available and
  87. * add padding if needed (especially for memory hotplug support).
  88. */
  89. BUG_ON(kaslr_regions[0].base != &page_offset_base);
  90. memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
  91. CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
  92. /* Adapt phyiscal memory region size based on available memory */
  93. if (memory_tb < kaslr_regions[0].size_tb)
  94. kaslr_regions[0].size_tb = memory_tb;
  95. /* Calculate entropy available between regions */
  96. remain_entropy = vaddr_end - vaddr_start;
  97. for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++)
  98. remain_entropy -= get_padding(&kaslr_regions[i]);
  99. prandom_seed_state(&rand_state, kaslr_get_random_long("Memory"));
  100. for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++) {
  101. unsigned long entropy;
  102. /*
  103. * Select a random virtual address using the extra entropy
  104. * available.
  105. */
  106. entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i);
  107. prandom_bytes_state(&rand_state, &rand, sizeof(rand));
  108. if (pgtable_l5_enabled())
  109. entropy = (rand % (entropy + 1)) & P4D_MASK;
  110. else
  111. entropy = (rand % (entropy + 1)) & PUD_MASK;
  112. vaddr += entropy;
  113. *kaslr_regions[i].base = vaddr;
  114. /*
  115. * Jump the region and add a minimum padding based on
  116. * randomization alignment.
  117. */
  118. vaddr += get_padding(&kaslr_regions[i]);
  119. if (pgtable_l5_enabled())
  120. vaddr = round_up(vaddr + 1, P4D_SIZE);
  121. else
  122. vaddr = round_up(vaddr + 1, PUD_SIZE);
  123. remain_entropy -= entropy;
  124. }
  125. }
  126. static void __meminit init_trampoline_pud(void)
  127. {
  128. unsigned long paddr, paddr_next;
  129. pgd_t *pgd;
  130. pud_t *pud_page, *pud_page_tramp;
  131. int i;
  132. pud_page_tramp = alloc_low_page();
  133. paddr = 0;
  134. pgd = pgd_offset_k((unsigned long)__va(paddr));
  135. pud_page = (pud_t *) pgd_page_vaddr(*pgd);
  136. for (i = pud_index(paddr); i < PTRS_PER_PUD; i++, paddr = paddr_next) {
  137. pud_t *pud, *pud_tramp;
  138. unsigned long vaddr = (unsigned long)__va(paddr);
  139. pud_tramp = pud_page_tramp + pud_index(paddr);
  140. pud = pud_page + pud_index(vaddr);
  141. paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
  142. *pud_tramp = *pud;
  143. }
  144. set_pgd(&trampoline_pgd_entry,
  145. __pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
  146. }
  147. static void __meminit init_trampoline_p4d(void)
  148. {
  149. unsigned long paddr, paddr_next;
  150. pgd_t *pgd;
  151. p4d_t *p4d_page, *p4d_page_tramp;
  152. int i;
  153. p4d_page_tramp = alloc_low_page();
  154. paddr = 0;
  155. pgd = pgd_offset_k((unsigned long)__va(paddr));
  156. p4d_page = (p4d_t *) pgd_page_vaddr(*pgd);
  157. for (i = p4d_index(paddr); i < PTRS_PER_P4D; i++, paddr = paddr_next) {
  158. p4d_t *p4d, *p4d_tramp;
  159. unsigned long vaddr = (unsigned long)__va(paddr);
  160. p4d_tramp = p4d_page_tramp + p4d_index(paddr);
  161. p4d = p4d_page + p4d_index(vaddr);
  162. paddr_next = (paddr & P4D_MASK) + P4D_SIZE;
  163. *p4d_tramp = *p4d;
  164. }
  165. set_pgd(&trampoline_pgd_entry,
  166. __pgd(_KERNPG_TABLE | __pa(p4d_page_tramp)));
  167. }
  168. /*
  169. * Create PGD aligned trampoline table to allow real mode initialization
  170. * of additional CPUs. Consume only 1 low memory page.
  171. */
  172. void __meminit init_trampoline(void)
  173. {
  174. if (!kaslr_memory_enabled()) {
  175. init_trampoline_default();
  176. return;
  177. }
  178. if (pgtable_l5_enabled())
  179. init_trampoline_p4d();
  180. else
  181. init_trampoline_pud();
  182. }