aslr.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317
  1. #include "misc.h"
  2. #ifdef CONFIG_RANDOMIZE_BASE
  3. #include <asm/msr.h>
  4. #include <asm/archrandom.h>
  5. #include <asm/e820.h>
  6. #include <generated/compile.h>
  7. #include <linux/module.h>
  8. #include <linux/uts.h>
  9. #include <linux/utsname.h>
  10. #include <generated/utsrelease.h>
  11. /* Simplified build-specific string for starting entropy. */
  12. static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
  13. LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
  14. #define I8254_PORT_CONTROL 0x43
  15. #define I8254_PORT_COUNTER0 0x40
  16. #define I8254_CMD_READBACK 0xC0
  17. #define I8254_SELECT_COUNTER0 0x02
  18. #define I8254_STATUS_NOTREADY 0x40
  19. static inline u16 i8254(void)
  20. {
  21. u16 status, timer;
  22. do {
  23. outb(I8254_PORT_CONTROL,
  24. I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
  25. status = inb(I8254_PORT_COUNTER0);
  26. timer = inb(I8254_PORT_COUNTER0);
  27. timer |= inb(I8254_PORT_COUNTER0) << 8;
  28. } while (status & I8254_STATUS_NOTREADY);
  29. return timer;
  30. }
  31. static unsigned long rotate_xor(unsigned long hash, const void *area,
  32. size_t size)
  33. {
  34. size_t i;
  35. unsigned long *ptr = (unsigned long *)area;
  36. for (i = 0; i < size / sizeof(hash); i++) {
  37. /* Rotate by odd number of bits and XOR. */
  38. hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
  39. hash ^= ptr[i];
  40. }
  41. return hash;
  42. }
  43. /* Attempt to create a simple but unpredictable starting entropy. */
  44. static unsigned long get_random_boot(void)
  45. {
  46. unsigned long hash = 0;
  47. hash = rotate_xor(hash, build_str, sizeof(build_str));
  48. hash = rotate_xor(hash, real_mode, sizeof(*real_mode));
  49. return hash;
  50. }
  51. static unsigned long get_random_long(void)
  52. {
  53. #ifdef CONFIG_X86_64
  54. const unsigned long mix_const = 0x5d6008cbf3848dd3UL;
  55. #else
  56. const unsigned long mix_const = 0x3f39e593UL;
  57. #endif
  58. unsigned long raw, random = get_random_boot();
  59. bool use_i8254 = true;
  60. debug_putstr("KASLR using");
  61. if (has_cpuflag(X86_FEATURE_RDRAND)) {
  62. debug_putstr(" RDRAND");
  63. if (rdrand_long(&raw)) {
  64. random ^= raw;
  65. use_i8254 = false;
  66. }
  67. }
  68. if (has_cpuflag(X86_FEATURE_TSC)) {
  69. debug_putstr(" RDTSC");
  70. rdtscll(raw);
  71. random ^= raw;
  72. use_i8254 = false;
  73. }
  74. if (use_i8254) {
  75. debug_putstr(" i8254");
  76. random ^= i8254();
  77. }
  78. /* Circular multiply for better bit diffusion */
  79. asm("mul %3"
  80. : "=a" (random), "=d" (raw)
  81. : "a" (random), "rm" (mix_const));
  82. random += raw;
  83. debug_putstr("...\n");
  84. return random;
  85. }
  86. struct mem_vector {
  87. unsigned long start;
  88. unsigned long size;
  89. };
  90. #define MEM_AVOID_MAX 5
  91. static struct mem_vector mem_avoid[MEM_AVOID_MAX];
  92. static bool mem_contains(struct mem_vector *region, struct mem_vector *item)
  93. {
  94. /* Item at least partially before region. */
  95. if (item->start < region->start)
  96. return false;
  97. /* Item at least partially after region. */
  98. if (item->start + item->size > region->start + region->size)
  99. return false;
  100. return true;
  101. }
  102. static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two)
  103. {
  104. /* Item one is entirely before item two. */
  105. if (one->start + one->size <= two->start)
  106. return false;
  107. /* Item one is entirely after item two. */
  108. if (one->start >= two->start + two->size)
  109. return false;
  110. return true;
  111. }
  112. static void mem_avoid_init(unsigned long input, unsigned long input_size,
  113. unsigned long output, unsigned long output_size)
  114. {
  115. u64 initrd_start, initrd_size;
  116. u64 cmd_line, cmd_line_size;
  117. unsigned long unsafe, unsafe_len;
  118. char *ptr;
  119. /*
  120. * Avoid the region that is unsafe to overlap during
  121. * decompression (see calculations at top of misc.c).
  122. */
  123. unsafe_len = (output_size >> 12) + 32768 + 18;
  124. unsafe = (unsigned long)input + input_size - unsafe_len;
  125. mem_avoid[0].start = unsafe;
  126. mem_avoid[0].size = unsafe_len;
  127. /* Avoid initrd. */
  128. initrd_start = (u64)real_mode->ext_ramdisk_image << 32;
  129. initrd_start |= real_mode->hdr.ramdisk_image;
  130. initrd_size = (u64)real_mode->ext_ramdisk_size << 32;
  131. initrd_size |= real_mode->hdr.ramdisk_size;
  132. mem_avoid[1].start = initrd_start;
  133. mem_avoid[1].size = initrd_size;
  134. /* Avoid kernel command line. */
  135. cmd_line = (u64)real_mode->ext_cmd_line_ptr << 32;
  136. cmd_line |= real_mode->hdr.cmd_line_ptr;
  137. /* Calculate size of cmd_line. */
  138. ptr = (char *)(unsigned long)cmd_line;
  139. for (cmd_line_size = 0; ptr[cmd_line_size++]; )
  140. ;
  141. mem_avoid[2].start = cmd_line;
  142. mem_avoid[2].size = cmd_line_size;
  143. /* Avoid heap memory. */
  144. mem_avoid[3].start = (unsigned long)free_mem_ptr;
  145. mem_avoid[3].size = BOOT_HEAP_SIZE;
  146. /* Avoid stack memory. */
  147. mem_avoid[4].start = (unsigned long)free_mem_end_ptr;
  148. mem_avoid[4].size = BOOT_STACK_SIZE;
  149. }
  150. /* Does this memory vector overlap a known avoided area? */
  151. static bool mem_avoid_overlap(struct mem_vector *img)
  152. {
  153. int i;
  154. for (i = 0; i < MEM_AVOID_MAX; i++) {
  155. if (mem_overlaps(img, &mem_avoid[i]))
  156. return true;
  157. }
  158. return false;
  159. }
  160. static unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET /
  161. CONFIG_PHYSICAL_ALIGN];
  162. static unsigned long slot_max;
  163. static void slots_append(unsigned long addr)
  164. {
  165. /* Overflowing the slots list should be impossible. */
  166. if (slot_max >= CONFIG_RANDOMIZE_BASE_MAX_OFFSET /
  167. CONFIG_PHYSICAL_ALIGN)
  168. return;
  169. slots[slot_max++] = addr;
  170. }
  171. static unsigned long slots_fetch_random(void)
  172. {
  173. /* Handle case of no slots stored. */
  174. if (slot_max == 0)
  175. return 0;
  176. return slots[get_random_long() % slot_max];
  177. }
  178. static void process_e820_entry(struct e820entry *entry,
  179. unsigned long minimum,
  180. unsigned long image_size)
  181. {
  182. struct mem_vector region, img;
  183. /* Skip non-RAM entries. */
  184. if (entry->type != E820_RAM)
  185. return;
  186. /* Ignore entries entirely above our maximum. */
  187. if (entry->addr >= CONFIG_RANDOMIZE_BASE_MAX_OFFSET)
  188. return;
  189. /* Ignore entries entirely below our minimum. */
  190. if (entry->addr + entry->size < minimum)
  191. return;
  192. region.start = entry->addr;
  193. region.size = entry->size;
  194. /* Potentially raise address to minimum location. */
  195. if (region.start < minimum)
  196. region.start = minimum;
  197. /* Potentially raise address to meet alignment requirements. */
  198. region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
  199. /* Did we raise the address above the bounds of this e820 region? */
  200. if (region.start > entry->addr + entry->size)
  201. return;
  202. /* Reduce size by any delta from the original address. */
  203. region.size -= region.start - entry->addr;
  204. /* Reduce maximum size to fit end of image within maximum limit. */
  205. if (region.start + region.size > CONFIG_RANDOMIZE_BASE_MAX_OFFSET)
  206. region.size = CONFIG_RANDOMIZE_BASE_MAX_OFFSET - region.start;
  207. /* Walk each aligned slot and check for avoided areas. */
  208. for (img.start = region.start, img.size = image_size ;
  209. mem_contains(&region, &img) ;
  210. img.start += CONFIG_PHYSICAL_ALIGN) {
  211. if (mem_avoid_overlap(&img))
  212. continue;
  213. slots_append(img.start);
  214. }
  215. }
  216. static unsigned long find_random_addr(unsigned long minimum,
  217. unsigned long size)
  218. {
  219. int i;
  220. unsigned long addr;
  221. /* Make sure minimum is aligned. */
  222. minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
  223. /* Verify potential e820 positions, appending to slots list. */
  224. for (i = 0; i < real_mode->e820_entries; i++) {
  225. process_e820_entry(&real_mode->e820_map[i], minimum, size);
  226. }
  227. return slots_fetch_random();
  228. }
  229. unsigned char *choose_kernel_location(unsigned char *input,
  230. unsigned long input_size,
  231. unsigned char *output,
  232. unsigned long output_size)
  233. {
  234. unsigned long choice = (unsigned long)output;
  235. unsigned long random;
  236. if (cmdline_find_option_bool("nokaslr")) {
  237. debug_putstr("KASLR disabled...\n");
  238. goto out;
  239. }
  240. /* Record the various known unsafe memory ranges. */
  241. mem_avoid_init((unsigned long)input, input_size,
  242. (unsigned long)output, output_size);
  243. /* Walk e820 and find a random address. */
  244. random = find_random_addr(choice, output_size);
  245. if (!random) {
  246. debug_putstr("KASLR could not find suitable E820 region...\n");
  247. goto out;
  248. }
  249. /* Always enforce the minimum. */
  250. if (random < choice)
  251. goto out;
  252. choice = random;
  253. out:
  254. return (unsigned char *)choice;
  255. }
  256. #endif /* CONFIG_RANDOMIZE_BASE */