kasan_init.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. /*
  2. * This file contains kasan initialization code for ARM64.
  3. *
  4. * Copyright (c) 2015 Samsung Electronics Co., Ltd.
  5. * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. */
  12. #define pr_fmt(fmt) "kasan: " fmt
  13. #include <linux/bootmem.h>
  14. #include <linux/kasan.h>
  15. #include <linux/kernel.h>
  16. #include <linux/sched/task.h>
  17. #include <linux/memblock.h>
  18. #include <linux/start_kernel.h>
  19. #include <linux/mm.h>
  20. #include <asm/mmu_context.h>
  21. #include <asm/kernel-pgtable.h>
  22. #include <asm/page.h>
  23. #include <asm/pgalloc.h>
  24. #include <asm/pgtable.h>
  25. #include <asm/sections.h>
  26. #include <asm/tlbflush.h>
  27. static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
  28. /*
  29. * The p*d_populate functions call virt_to_phys implicitly so they can't be used
  30. * directly on kernel symbols (bm_p*d). All the early functions are called too
  31. * early to use lm_alias so __p*d_populate functions must be used to populate
  32. * with the physical address from __pa_symbol.
  33. */
  34. static phys_addr_t __init kasan_alloc_zeroed_page(int node)
  35. {
  36. void *p = memblock_virt_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
  37. __pa(MAX_DMA_ADDRESS),
  38. MEMBLOCK_ALLOC_ACCESSIBLE, node);
  39. return __pa(p);
  40. }
  41. static pte_t *__init kasan_pte_offset(pmd_t *pmd, unsigned long addr, int node,
  42. bool early)
  43. {
  44. if (pmd_none(*pmd)) {
  45. phys_addr_t pte_phys = early ? __pa_symbol(kasan_zero_pte)
  46. : kasan_alloc_zeroed_page(node);
  47. __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
  48. }
  49. return early ? pte_offset_kimg(pmd, addr)
  50. : pte_offset_kernel(pmd, addr);
  51. }
  52. static pmd_t *__init kasan_pmd_offset(pud_t *pud, unsigned long addr, int node,
  53. bool early)
  54. {
  55. if (pud_none(*pud)) {
  56. phys_addr_t pmd_phys = early ? __pa_symbol(kasan_zero_pmd)
  57. : kasan_alloc_zeroed_page(node);
  58. __pud_populate(pud, pmd_phys, PMD_TYPE_TABLE);
  59. }
  60. return early ? pmd_offset_kimg(pud, addr) : pmd_offset(pud, addr);
  61. }
  62. static pud_t *__init kasan_pud_offset(pgd_t *pgd, unsigned long addr, int node,
  63. bool early)
  64. {
  65. if (pgd_none(*pgd)) {
  66. phys_addr_t pud_phys = early ? __pa_symbol(kasan_zero_pud)
  67. : kasan_alloc_zeroed_page(node);
  68. __pgd_populate(pgd, pud_phys, PMD_TYPE_TABLE);
  69. }
  70. return early ? pud_offset_kimg(pgd, addr) : pud_offset(pgd, addr);
  71. }
  72. static void __init kasan_pte_populate(pmd_t *pmd, unsigned long addr,
  73. unsigned long end, int node, bool early)
  74. {
  75. unsigned long next;
  76. pte_t *pte = kasan_pte_offset(pmd, addr, node, early);
  77. do {
  78. phys_addr_t page_phys = early ? __pa_symbol(kasan_zero_page)
  79. : kasan_alloc_zeroed_page(node);
  80. next = addr + PAGE_SIZE;
  81. set_pte(pte, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
  82. } while (pte++, addr = next, addr != end && pte_none(*pte));
  83. }
  84. static void __init kasan_pmd_populate(pud_t *pud, unsigned long addr,
  85. unsigned long end, int node, bool early)
  86. {
  87. unsigned long next;
  88. pmd_t *pmd = kasan_pmd_offset(pud, addr, node, early);
  89. do {
  90. next = pmd_addr_end(addr, end);
  91. kasan_pte_populate(pmd, addr, next, node, early);
  92. } while (pmd++, addr = next, addr != end && pmd_none(*pmd));
  93. }
  94. static void __init kasan_pud_populate(pgd_t *pgd, unsigned long addr,
  95. unsigned long end, int node, bool early)
  96. {
  97. unsigned long next;
  98. pud_t *pud = kasan_pud_offset(pgd, addr, node, early);
  99. do {
  100. next = pud_addr_end(addr, end);
  101. kasan_pmd_populate(pud, addr, next, node, early);
  102. } while (pud++, addr = next, addr != end && pud_none(*pud));
  103. }
  104. static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
  105. int node, bool early)
  106. {
  107. unsigned long next;
  108. pgd_t *pgd;
  109. pgd = pgd_offset_k(addr);
  110. do {
  111. next = pgd_addr_end(addr, end);
  112. kasan_pud_populate(pgd, addr, next, node, early);
  113. } while (pgd++, addr = next, addr != end);
  114. }
  115. /* The early shadow maps everything to a single page of zeroes */
  116. asmlinkage void __init kasan_early_init(void)
  117. {
  118. BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_END - (1UL << 61));
  119. BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
  120. BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
  121. kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE,
  122. true);
  123. }
  124. /* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
  125. static void __init kasan_map_populate(unsigned long start, unsigned long end,
  126. int node)
  127. {
  128. kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
  129. }
  130. /*
  131. * Copy the current shadow region into a new pgdir.
  132. */
  133. void __init kasan_copy_shadow(pgd_t *pgdir)
  134. {
  135. pgd_t *pgd, *pgd_new, *pgd_end;
  136. pgd = pgd_offset_k(KASAN_SHADOW_START);
  137. pgd_end = pgd_offset_k(KASAN_SHADOW_END);
  138. pgd_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
  139. do {
  140. set_pgd(pgd_new, *pgd);
  141. } while (pgd++, pgd_new++, pgd != pgd_end);
  142. }
  143. static void __init clear_pgds(unsigned long start,
  144. unsigned long end)
  145. {
  146. /*
  147. * Remove references to kasan page tables from
  148. * swapper_pg_dir. pgd_clear() can't be used
  149. * here because it's nop on 2,3-level pagetable setups
  150. */
  151. for (; start < end; start += PGDIR_SIZE)
  152. set_pgd(pgd_offset_k(start), __pgd(0));
  153. }
  154. void __init kasan_init(void)
  155. {
  156. u64 kimg_shadow_start, kimg_shadow_end;
  157. u64 mod_shadow_start, mod_shadow_end;
  158. struct memblock_region *reg;
  159. int i;
  160. kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK;
  161. kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(_end));
  162. mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
  163. mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
  164. /*
  165. * We are going to perform proper setup of shadow memory.
  166. * At first we should unmap early shadow (clear_pgds() call bellow).
  167. * However, instrumented code couldn't execute without shadow memory.
  168. * tmp_pg_dir used to keep early shadow mapped until full shadow
  169. * setup will be finished.
  170. */
  171. memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
  172. dsb(ishst);
  173. cpu_replace_ttbr1(lm_alias(tmp_pg_dir));
  174. clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
  175. kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
  176. pfn_to_nid(virt_to_pfn(lm_alias(_text))));
  177. kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
  178. (void *)mod_shadow_start);
  179. kasan_populate_zero_shadow((void *)kimg_shadow_end,
  180. kasan_mem_to_shadow((void *)PAGE_OFFSET));
  181. if (kimg_shadow_start > mod_shadow_end)
  182. kasan_populate_zero_shadow((void *)mod_shadow_end,
  183. (void *)kimg_shadow_start);
  184. for_each_memblock(memory, reg) {
  185. void *start = (void *)__phys_to_virt(reg->base);
  186. void *end = (void *)__phys_to_virt(reg->base + reg->size);
  187. if (start >= end)
  188. break;
  189. kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
  190. (unsigned long)kasan_mem_to_shadow(end),
  191. pfn_to_nid(virt_to_pfn(start)));
  192. }
  193. /*
  194. * KAsan may reuse the contents of kasan_zero_pte directly, so we
  195. * should make sure that it maps the zero page read-only.
  196. */
  197. for (i = 0; i < PTRS_PER_PTE; i++)
  198. set_pte(&kasan_zero_pte[i],
  199. pfn_pte(sym_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
  200. memset(kasan_zero_page, 0, PAGE_SIZE);
  201. cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
  202. /* At this point kasan is fully initialized. Enable error messages */
  203. init_task.kasan_depth = 0;
  204. pr_info("KernelAddressSanitizer initialized\n");
  205. }