kasan_init_64.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206
  1. #include <linux/bootmem.h>
  2. #include <linux/kasan.h>
  3. #include <linux/kdebug.h>
  4. #include <linux/mm.h>
  5. #include <linux/sched.h>
  6. #include <linux/vmalloc.h>
  7. #include <asm/tlbflush.h>
  8. #include <asm/sections.h>
  9. extern pgd_t early_level4_pgt[PTRS_PER_PGD];
  10. extern struct range pfn_mapped[E820_X_MAX];
  11. extern unsigned char kasan_zero_page[PAGE_SIZE];
  12. static int __init map_range(struct range *range)
  13. {
  14. unsigned long start;
  15. unsigned long end;
  16. start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
  17. end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
  18. /*
  19. * end + 1 here is intentional. We check several shadow bytes in advance
  20. * to slightly speed up fastpath. In some rare cases we could cross
  21. * boundary of mapped shadow, so we just map some more here.
  22. */
  23. return vmemmap_populate(start, end + 1, NUMA_NO_NODE);
  24. }
  25. static void __init clear_pgds(unsigned long start,
  26. unsigned long end)
  27. {
  28. for (; start < end; start += PGDIR_SIZE)
  29. pgd_clear(pgd_offset_k(start));
  30. }
  31. void __init kasan_map_early_shadow(pgd_t *pgd)
  32. {
  33. int i;
  34. unsigned long start = KASAN_SHADOW_START;
  35. unsigned long end = KASAN_SHADOW_END;
  36. for (i = pgd_index(start); start < end; i++) {
  37. pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud)
  38. | _KERNPG_TABLE);
  39. start += PGDIR_SIZE;
  40. }
  41. }
  42. static int __init zero_pte_populate(pmd_t *pmd, unsigned long addr,
  43. unsigned long end)
  44. {
  45. pte_t *pte = pte_offset_kernel(pmd, addr);
  46. while (addr + PAGE_SIZE <= end) {
  47. WARN_ON(!pte_none(*pte));
  48. set_pte(pte, __pte(__pa_nodebug(kasan_zero_page)
  49. | __PAGE_KERNEL_RO));
  50. addr += PAGE_SIZE;
  51. pte = pte_offset_kernel(pmd, addr);
  52. }
  53. return 0;
  54. }
  55. static int __init zero_pmd_populate(pud_t *pud, unsigned long addr,
  56. unsigned long end)
  57. {
  58. int ret = 0;
  59. pmd_t *pmd = pmd_offset(pud, addr);
  60. while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) {
  61. WARN_ON(!pmd_none(*pmd));
  62. set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte)
  63. | __PAGE_KERNEL_RO));
  64. addr += PMD_SIZE;
  65. pmd = pmd_offset(pud, addr);
  66. }
  67. if (addr < end) {
  68. if (pmd_none(*pmd)) {
  69. void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
  70. if (!p)
  71. return -ENOMEM;
  72. set_pmd(pmd, __pmd(__pa_nodebug(p) | _KERNPG_TABLE));
  73. }
  74. ret = zero_pte_populate(pmd, addr, end);
  75. }
  76. return ret;
  77. }
  78. static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
  79. unsigned long end)
  80. {
  81. int ret = 0;
  82. pud_t *pud = pud_offset(pgd, addr);
  83. while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) {
  84. WARN_ON(!pud_none(*pud));
  85. set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd)
  86. | __PAGE_KERNEL_RO));
  87. addr += PUD_SIZE;
  88. pud = pud_offset(pgd, addr);
  89. }
  90. if (addr < end) {
  91. if (pud_none(*pud)) {
  92. void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
  93. if (!p)
  94. return -ENOMEM;
  95. set_pud(pud, __pud(__pa_nodebug(p) | _KERNPG_TABLE));
  96. }
  97. ret = zero_pmd_populate(pud, addr, end);
  98. }
  99. return ret;
  100. }
  101. static int __init zero_pgd_populate(unsigned long addr, unsigned long end)
  102. {
  103. int ret = 0;
  104. pgd_t *pgd = pgd_offset_k(addr);
  105. while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) {
  106. WARN_ON(!pgd_none(*pgd));
  107. set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud)
  108. | __PAGE_KERNEL_RO));
  109. addr += PGDIR_SIZE;
  110. pgd = pgd_offset_k(addr);
  111. }
  112. if (addr < end) {
  113. if (pgd_none(*pgd)) {
  114. void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
  115. if (!p)
  116. return -ENOMEM;
  117. set_pgd(pgd, __pgd(__pa_nodebug(p) | _KERNPG_TABLE));
  118. }
  119. ret = zero_pud_populate(pgd, addr, end);
  120. }
  121. return ret;
  122. }
  123. static void __init populate_zero_shadow(const void *start, const void *end)
  124. {
  125. if (zero_pgd_populate((unsigned long)start, (unsigned long)end))
  126. panic("kasan: unable to map zero shadow!");
  127. }
  128. #ifdef CONFIG_KASAN_INLINE
  129. static int kasan_die_handler(struct notifier_block *self,
  130. unsigned long val,
  131. void *data)
  132. {
  133. if (val == DIE_GPF) {
  134. pr_emerg("CONFIG_KASAN_INLINE enabled");
  135. pr_emerg("GPF could be caused by NULL-ptr deref or user memory access");
  136. }
  137. return NOTIFY_OK;
  138. }
  139. static struct notifier_block kasan_die_notifier = {
  140. .notifier_call = kasan_die_handler,
  141. };
  142. #endif
  143. void __init kasan_init(void)
  144. {
  145. int i;
  146. #ifdef CONFIG_KASAN_INLINE
  147. register_die_notifier(&kasan_die_notifier);
  148. #endif
  149. memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
  150. load_cr3(early_level4_pgt);
  151. clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
  152. populate_zero_shadow((void *)KASAN_SHADOW_START,
  153. kasan_mem_to_shadow((void *)PAGE_OFFSET));
  154. for (i = 0; i < E820_X_MAX; i++) {
  155. if (pfn_mapped[i].end == 0)
  156. break;
  157. if (map_range(&pfn_mapped[i]))
  158. panic("kasan: unable to allocate shadow!");
  159. }
  160. populate_zero_shadow(kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
  161. kasan_mem_to_shadow((void *)__START_KERNEL_map));
  162. vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
  163. (unsigned long)kasan_mem_to_shadow(_end),
  164. NUMA_NO_NODE);
  165. populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
  166. (void *)KASAN_SHADOW_END);
  167. memset(kasan_zero_page, 0, PAGE_SIZE);
  168. load_cr3(init_level4_pgt);
  169. init_task.kasan_depth = 0;
  170. }