kasan_init_64.c 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. #define pr_fmt(fmt) "kasan: " fmt
  2. #include <linux/bootmem.h>
  3. #include <linux/kasan.h>
  4. #include <linux/kdebug.h>
  5. #include <linux/mm.h>
  6. #include <linux/sched.h>
  7. #include <linux/vmalloc.h>
  8. #include <asm/tlbflush.h>
  9. #include <asm/sections.h>
  10. extern pgd_t early_level4_pgt[PTRS_PER_PGD];
  11. extern struct range pfn_mapped[E820_X_MAX];
  12. static int __init map_range(struct range *range)
  13. {
  14. unsigned long start;
  15. unsigned long end;
  16. start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
  17. end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
  18. /*
  19. * end + 1 here is intentional. We check several shadow bytes in advance
  20. * to slightly speed up fastpath. In some rare cases we could cross
  21. * boundary of mapped shadow, so we just map some more here.
  22. */
  23. return vmemmap_populate(start, end + 1, NUMA_NO_NODE);
  24. }
  25. static void __init clear_pgds(unsigned long start,
  26. unsigned long end)
  27. {
  28. for (; start < end; start += PGDIR_SIZE)
  29. pgd_clear(pgd_offset_k(start));
  30. }
  31. static void __init kasan_map_early_shadow(pgd_t *pgd)
  32. {
  33. int i;
  34. unsigned long start = KASAN_SHADOW_START;
  35. unsigned long end = KASAN_SHADOW_END;
  36. for (i = pgd_index(start); start < end; i++) {
  37. pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud)
  38. | _KERNPG_TABLE);
  39. start += PGDIR_SIZE;
  40. }
  41. }
  42. #ifdef CONFIG_KASAN_INLINE
  43. static int kasan_die_handler(struct notifier_block *self,
  44. unsigned long val,
  45. void *data)
  46. {
  47. if (val == DIE_GPF) {
  48. pr_emerg("CONFIG_KASAN_INLINE enabled\n");
  49. pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
  50. }
  51. return NOTIFY_OK;
  52. }
  53. static struct notifier_block kasan_die_notifier = {
  54. .notifier_call = kasan_die_handler,
  55. };
  56. #endif
  57. void __init kasan_early_init(void)
  58. {
  59. int i;
  60. pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
  61. pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
  62. pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
  63. for (i = 0; i < PTRS_PER_PTE; i++)
  64. kasan_zero_pte[i] = __pte(pte_val);
  65. for (i = 0; i < PTRS_PER_PMD; i++)
  66. kasan_zero_pmd[i] = __pmd(pmd_val);
  67. for (i = 0; i < PTRS_PER_PUD; i++)
  68. kasan_zero_pud[i] = __pud(pud_val);
  69. kasan_map_early_shadow(early_level4_pgt);
  70. kasan_map_early_shadow(init_level4_pgt);
  71. }
  72. void __init kasan_init(void)
  73. {
  74. int i;
  75. #ifdef CONFIG_KASAN_INLINE
  76. register_die_notifier(&kasan_die_notifier);
  77. #endif
  78. memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
  79. load_cr3(early_level4_pgt);
  80. __flush_tlb_all();
  81. clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
  82. kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
  83. kasan_mem_to_shadow((void *)PAGE_OFFSET));
  84. for (i = 0; i < E820_X_MAX; i++) {
  85. if (pfn_mapped[i].end == 0)
  86. break;
  87. if (map_range(&pfn_mapped[i]))
  88. panic("kasan: unable to allocate shadow!");
  89. }
  90. kasan_populate_zero_shadow(
  91. kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
  92. kasan_mem_to_shadow((void *)__START_KERNEL_map));
  93. vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
  94. (unsigned long)kasan_mem_to_shadow(_end),
  95. NUMA_NO_NODE);
  96. kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
  97. (void *)KASAN_SHADOW_END);
  98. load_cr3(init_level4_pgt);
  99. __flush_tlb_all();
  100. /*
  101. * kasan_zero_page has been used as early shadow memory, thus it may
  102. * contain some garbage. Now we can clear and write protect it, since
  103. * after the TLB flush no one should write to it.
  104. */
  105. memset(kasan_zero_page, 0, PAGE_SIZE);
  106. for (i = 0; i < PTRS_PER_PTE; i++) {
  107. pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO);
  108. set_pte(&kasan_zero_pte[i], pte);
  109. }
  110. /* Flush TLBs again to be sure that write protection applied. */
  111. __flush_tlb_all();
  112. init_task.kasan_depth = 0;
  113. pr_info("KernelAddressSanitizer initialized\n");
  114. }