kasan_init_64.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. #define pr_fmt(fmt) "kasan: " fmt
  2. #include <linux/bootmem.h>
  3. #include <linux/kasan.h>
  4. #include <linux/kdebug.h>
  5. #include <linux/mm.h>
  6. #include <linux/sched.h>
  7. #include <linux/vmalloc.h>
  8. #include <asm/tlbflush.h>
  9. #include <asm/sections.h>
  10. extern pgd_t early_level4_pgt[PTRS_PER_PGD];
  11. extern struct range pfn_mapped[E820_X_MAX];
  12. static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
  13. static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
  14. static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
  15. /*
  16. * This page used as early shadow. We don't use empty_zero_page
  17. * at early stages, stack instrumentation could write some garbage
  18. * to this page.
  19. * Latter we reuse it as zero shadow for large ranges of memory
  20. * that allowed to access, but not instrumented by kasan
  21. * (vmalloc/vmemmap ...).
  22. */
  23. static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
  24. static int __init map_range(struct range *range)
  25. {
  26. unsigned long start;
  27. unsigned long end;
  28. start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
  29. end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
  30. /*
  31. * end + 1 here is intentional. We check several shadow bytes in advance
  32. * to slightly speed up fastpath. In some rare cases we could cross
  33. * boundary of mapped shadow, so we just map some more here.
  34. */
  35. return vmemmap_populate(start, end + 1, NUMA_NO_NODE);
  36. }
  37. static void __init clear_pgds(unsigned long start,
  38. unsigned long end)
  39. {
  40. for (; start < end; start += PGDIR_SIZE)
  41. pgd_clear(pgd_offset_k(start));
  42. }
  43. static void __init kasan_map_early_shadow(pgd_t *pgd)
  44. {
  45. int i;
  46. unsigned long start = KASAN_SHADOW_START;
  47. unsigned long end = KASAN_SHADOW_END;
  48. for (i = pgd_index(start); start < end; i++) {
  49. pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud)
  50. | _KERNPG_TABLE);
  51. start += PGDIR_SIZE;
  52. }
  53. }
  54. static int __init zero_pte_populate(pmd_t *pmd, unsigned long addr,
  55. unsigned long end)
  56. {
  57. pte_t *pte = pte_offset_kernel(pmd, addr);
  58. while (addr + PAGE_SIZE <= end) {
  59. WARN_ON(!pte_none(*pte));
  60. set_pte(pte, __pte(__pa_nodebug(kasan_zero_page)
  61. | __PAGE_KERNEL_RO));
  62. addr += PAGE_SIZE;
  63. pte = pte_offset_kernel(pmd, addr);
  64. }
  65. return 0;
  66. }
  67. static int __init zero_pmd_populate(pud_t *pud, unsigned long addr,
  68. unsigned long end)
  69. {
  70. int ret = 0;
  71. pmd_t *pmd = pmd_offset(pud, addr);
  72. while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) {
  73. WARN_ON(!pmd_none(*pmd));
  74. set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte)
  75. | _KERNPG_TABLE));
  76. addr += PMD_SIZE;
  77. pmd = pmd_offset(pud, addr);
  78. }
  79. if (addr < end) {
  80. if (pmd_none(*pmd)) {
  81. void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
  82. if (!p)
  83. return -ENOMEM;
  84. set_pmd(pmd, __pmd(__pa_nodebug(p) | _KERNPG_TABLE));
  85. }
  86. ret = zero_pte_populate(pmd, addr, end);
  87. }
  88. return ret;
  89. }
  90. static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
  91. unsigned long end)
  92. {
  93. int ret = 0;
  94. pud_t *pud = pud_offset(pgd, addr);
  95. while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) {
  96. WARN_ON(!pud_none(*pud));
  97. set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd)
  98. | _KERNPG_TABLE));
  99. addr += PUD_SIZE;
  100. pud = pud_offset(pgd, addr);
  101. }
  102. if (addr < end) {
  103. if (pud_none(*pud)) {
  104. void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
  105. if (!p)
  106. return -ENOMEM;
  107. set_pud(pud, __pud(__pa_nodebug(p) | _KERNPG_TABLE));
  108. }
  109. ret = zero_pmd_populate(pud, addr, end);
  110. }
  111. return ret;
  112. }
  113. static int __init zero_pgd_populate(unsigned long addr, unsigned long end)
  114. {
  115. int ret = 0;
  116. pgd_t *pgd = pgd_offset_k(addr);
  117. while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) {
  118. WARN_ON(!pgd_none(*pgd));
  119. set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud)
  120. | _KERNPG_TABLE));
  121. addr += PGDIR_SIZE;
  122. pgd = pgd_offset_k(addr);
  123. }
  124. if (addr < end) {
  125. if (pgd_none(*pgd)) {
  126. void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
  127. if (!p)
  128. return -ENOMEM;
  129. set_pgd(pgd, __pgd(__pa_nodebug(p) | _KERNPG_TABLE));
  130. }
  131. ret = zero_pud_populate(pgd, addr, end);
  132. }
  133. return ret;
  134. }
  135. static void __init populate_zero_shadow(const void *start, const void *end)
  136. {
  137. if (zero_pgd_populate((unsigned long)start, (unsigned long)end))
  138. panic("kasan: unable to map zero shadow!");
  139. }
  140. #ifdef CONFIG_KASAN_INLINE
  141. static int kasan_die_handler(struct notifier_block *self,
  142. unsigned long val,
  143. void *data)
  144. {
  145. if (val == DIE_GPF) {
  146. pr_emerg("CONFIG_KASAN_INLINE enabled");
  147. pr_emerg("GPF could be caused by NULL-ptr deref or user memory access");
  148. }
  149. return NOTIFY_OK;
  150. }
  151. static struct notifier_block kasan_die_notifier = {
  152. .notifier_call = kasan_die_handler,
  153. };
  154. #endif
  155. void __init kasan_early_init(void)
  156. {
  157. int i;
  158. pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
  159. pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
  160. pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
  161. for (i = 0; i < PTRS_PER_PTE; i++)
  162. kasan_zero_pte[i] = __pte(pte_val);
  163. for (i = 0; i < PTRS_PER_PMD; i++)
  164. kasan_zero_pmd[i] = __pmd(pmd_val);
  165. for (i = 0; i < PTRS_PER_PUD; i++)
  166. kasan_zero_pud[i] = __pud(pud_val);
  167. kasan_map_early_shadow(early_level4_pgt);
  168. kasan_map_early_shadow(init_level4_pgt);
  169. }
  170. void __init kasan_init(void)
  171. {
  172. int i;
  173. #ifdef CONFIG_KASAN_INLINE
  174. register_die_notifier(&kasan_die_notifier);
  175. #endif
  176. memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
  177. load_cr3(early_level4_pgt);
  178. __flush_tlb_all();
  179. clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
  180. populate_zero_shadow((void *)KASAN_SHADOW_START,
  181. kasan_mem_to_shadow((void *)PAGE_OFFSET));
  182. for (i = 0; i < E820_X_MAX; i++) {
  183. if (pfn_mapped[i].end == 0)
  184. break;
  185. if (map_range(&pfn_mapped[i]))
  186. panic("kasan: unable to allocate shadow!");
  187. }
  188. populate_zero_shadow(kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
  189. kasan_mem_to_shadow((void *)__START_KERNEL_map));
  190. vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
  191. (unsigned long)kasan_mem_to_shadow(_end),
  192. NUMA_NO_NODE);
  193. populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
  194. (void *)KASAN_SHADOW_END);
  195. memset(kasan_zero_page, 0, PAGE_SIZE);
  196. load_cr3(init_level4_pgt);
  197. __flush_tlb_all();
  198. init_task.kasan_depth = 0;
  199. pr_info("Kernel address sanitizer initialized\n");
  200. }