page_poison.c 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. #include <linux/kernel.h>
  2. #include <linux/string.h>
  3. #include <linux/mm.h>
  4. #include <linux/highmem.h>
  5. #include <linux/page_ext.h>
  6. #include <linux/poison.h>
  7. #include <linux/ratelimit.h>
  8. static bool __page_poisoning_enabled __read_mostly;
  9. static bool want_page_poisoning __read_mostly;
  10. static int early_page_poison_param(char *buf)
  11. {
  12. if (!buf)
  13. return -EINVAL;
  14. return strtobool(buf, &want_page_poisoning);
  15. }
  16. early_param("page_poison", early_page_poison_param);
  17. bool page_poisoning_enabled(void)
  18. {
  19. return __page_poisoning_enabled;
  20. }
  21. static bool need_page_poisoning(void)
  22. {
  23. return want_page_poisoning;
  24. }
  25. static void init_page_poisoning(void)
  26. {
  27. /*
  28. * page poisoning is debug page alloc for some arches. If either
  29. * of those options are enabled, enable poisoning
  30. */
  31. if (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC)) {
  32. if (!want_page_poisoning && !debug_pagealloc_enabled())
  33. return;
  34. } else {
  35. if (!want_page_poisoning)
  36. return;
  37. }
  38. __page_poisoning_enabled = true;
  39. }
  40. struct page_ext_operations page_poisoning_ops = {
  41. .need = need_page_poisoning,
  42. .init = init_page_poisoning,
  43. };
  44. static inline void set_page_poison(struct page *page)
  45. {
  46. struct page_ext *page_ext;
  47. page_ext = lookup_page_ext(page);
  48. __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
  49. }
  50. static inline void clear_page_poison(struct page *page)
  51. {
  52. struct page_ext *page_ext;
  53. page_ext = lookup_page_ext(page);
  54. __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
  55. }
  56. bool page_is_poisoned(struct page *page)
  57. {
  58. struct page_ext *page_ext;
  59. page_ext = lookup_page_ext(page);
  60. if (!page_ext)
  61. return false;
  62. return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
  63. }
  64. static void poison_page(struct page *page)
  65. {
  66. void *addr = kmap_atomic(page);
  67. set_page_poison(page);
  68. memset(addr, PAGE_POISON, PAGE_SIZE);
  69. kunmap_atomic(addr);
  70. }
  71. static void poison_pages(struct page *page, int n)
  72. {
  73. int i;
  74. for (i = 0; i < n; i++)
  75. poison_page(page + i);
  76. }
  77. static bool single_bit_flip(unsigned char a, unsigned char b)
  78. {
  79. unsigned char error = a ^ b;
  80. return error && !(error & (error - 1));
  81. }
  82. static void check_poison_mem(unsigned char *mem, size_t bytes)
  83. {
  84. static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10);
  85. unsigned char *start;
  86. unsigned char *end;
  87. if (IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY))
  88. return;
  89. start = memchr_inv(mem, PAGE_POISON, bytes);
  90. if (!start)
  91. return;
  92. for (end = mem + bytes - 1; end > start; end--) {
  93. if (*end != PAGE_POISON)
  94. break;
  95. }
  96. if (!__ratelimit(&ratelimit))
  97. return;
  98. else if (start == end && single_bit_flip(*start, PAGE_POISON))
  99. pr_err("pagealloc: single bit error\n");
  100. else
  101. pr_err("pagealloc: memory corruption\n");
  102. print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
  103. end - start + 1, 1);
  104. dump_stack();
  105. }
  106. static void unpoison_page(struct page *page)
  107. {
  108. void *addr;
  109. if (!page_is_poisoned(page))
  110. return;
  111. addr = kmap_atomic(page);
  112. check_poison_mem(addr, PAGE_SIZE);
  113. clear_page_poison(page);
  114. kunmap_atomic(addr);
  115. }
  116. static void unpoison_pages(struct page *page, int n)
  117. {
  118. int i;
  119. for (i = 0; i < n; i++)
  120. unpoison_page(page + i);
  121. }
  122. void kernel_poison_pages(struct page *page, int numpages, int enable)
  123. {
  124. if (!page_poisoning_enabled())
  125. return;
  126. if (enable)
  127. unpoison_pages(page, numpages);
  128. else
  129. poison_pages(page, numpages);
  130. }
  131. #ifndef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
  132. void __kernel_map_pages(struct page *page, int numpages, int enable)
  133. {
  134. /* This function does nothing, all work is done via poison pages */
  135. }
  136. #endif