lkdtm_bugs.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * This is for all the tests related to logic bugs (e.g. bad dereferences,
  4. * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
  5. * lockups) along with other things that don't fit well into existing LKDTM
  6. * test source files.
  7. */
  8. #include "lkdtm.h"
  9. #include <linux/list.h>
  10. #include <linux/sched.h>
  11. #include <linux/sched/signal.h>
  12. #include <linux/sched/task_stack.h>
  13. #include <linux/uaccess.h>
  14. struct lkdtm_list {
  15. struct list_head node;
  16. };
  17. /*
  18. * Make sure our attempts to over run the kernel stack doesn't trigger
  19. * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
  20. * recurse past the end of THREAD_SIZE by default.
  21. */
  22. #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
  23. #define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
  24. #else
  25. #define REC_STACK_SIZE (THREAD_SIZE / 8)
  26. #endif
  27. #define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
  28. static int recur_count = REC_NUM_DEFAULT;
  29. static DEFINE_SPINLOCK(lock_me_up);
  30. static int recursive_loop(int remaining)
  31. {
  32. char buf[REC_STACK_SIZE];
  33. /* Make sure compiler does not optimize this away. */
  34. memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE);
  35. if (!remaining)
  36. return 0;
  37. else
  38. return recursive_loop(remaining - 1);
  39. }
  40. /* If the depth is negative, use the default, otherwise keep parameter. */
  41. void __init lkdtm_bugs_init(int *recur_param)
  42. {
  43. if (*recur_param < 0)
  44. *recur_param = recur_count;
  45. else
  46. recur_count = *recur_param;
  47. }
  48. void lkdtm_PANIC(void)
  49. {
  50. panic("dumptest");
  51. }
  52. void lkdtm_BUG(void)
  53. {
  54. BUG();
  55. }
  56. static int warn_counter;
  57. void lkdtm_WARNING(void)
  58. {
  59. WARN(1, "Warning message trigger count: %d\n", warn_counter++);
  60. }
  61. void lkdtm_EXCEPTION(void)
  62. {
  63. *((volatile int *) 0) = 0;
  64. }
  65. void lkdtm_LOOP(void)
  66. {
  67. for (;;)
  68. ;
  69. }
  70. void lkdtm_OVERFLOW(void)
  71. {
  72. (void) recursive_loop(recur_count);
  73. }
  74. static noinline void __lkdtm_CORRUPT_STACK(void *stack)
  75. {
  76. memset(stack, '\xff', 64);
  77. }
  78. /* This should trip the stack canary, not corrupt the return address. */
  79. noinline void lkdtm_CORRUPT_STACK(void)
  80. {
  81. /* Use default char array length that triggers stack protection. */
  82. char data[8] __aligned(sizeof(void *));
  83. __lkdtm_CORRUPT_STACK(&data);
  84. pr_info("Corrupted stack containing char array ...\n");
  85. }
  86. /* Same as above but will only get a canary with -fstack-protector-strong */
  87. noinline void lkdtm_CORRUPT_STACK_STRONG(void)
  88. {
  89. union {
  90. unsigned short shorts[4];
  91. unsigned long *ptr;
  92. } data __aligned(sizeof(void *));
  93. __lkdtm_CORRUPT_STACK(&data);
  94. pr_info("Corrupted stack containing union ...\n");
  95. }
  96. void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
  97. {
  98. static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
  99. u32 *p;
  100. u32 val = 0x12345678;
  101. p = (u32 *)(data + 1);
  102. if (*p == 0)
  103. val = 0x87654321;
  104. *p = val;
  105. }
  106. void lkdtm_SOFTLOCKUP(void)
  107. {
  108. preempt_disable();
  109. for (;;)
  110. cpu_relax();
  111. }
  112. void lkdtm_HARDLOCKUP(void)
  113. {
  114. local_irq_disable();
  115. for (;;)
  116. cpu_relax();
  117. }
  118. void lkdtm_SPINLOCKUP(void)
  119. {
  120. /* Must be called twice to trigger. */
  121. spin_lock(&lock_me_up);
  122. /* Let sparse know we intended to exit holding the lock. */
  123. __release(&lock_me_up);
  124. }
  125. void lkdtm_HUNG_TASK(void)
  126. {
  127. set_current_state(TASK_UNINTERRUPTIBLE);
  128. schedule();
  129. }
  130. void lkdtm_CORRUPT_LIST_ADD(void)
  131. {
  132. /*
  133. * Initially, an empty list via LIST_HEAD:
  134. * test_head.next = &test_head
  135. * test_head.prev = &test_head
  136. */
  137. LIST_HEAD(test_head);
  138. struct lkdtm_list good, bad;
  139. void *target[2] = { };
  140. void *redirection = &target;
  141. pr_info("attempting good list addition\n");
  142. /*
  143. * Adding to the list performs these actions:
  144. * test_head.next->prev = &good.node
  145. * good.node.next = test_head.next
  146. * good.node.prev = test_head
  147. * test_head.next = good.node
  148. */
  149. list_add(&good.node, &test_head);
  150. pr_info("attempting corrupted list addition\n");
  151. /*
  152. * In simulating this "write what where" primitive, the "what" is
  153. * the address of &bad.node, and the "where" is the address held
  154. * by "redirection".
  155. */
  156. test_head.next = redirection;
  157. list_add(&bad.node, &test_head);
  158. if (target[0] == NULL && target[1] == NULL)
  159. pr_err("Overwrite did not happen, but no BUG?!\n");
  160. else
  161. pr_err("list_add() corruption not detected!\n");
  162. }
  163. void lkdtm_CORRUPT_LIST_DEL(void)
  164. {
  165. LIST_HEAD(test_head);
  166. struct lkdtm_list item;
  167. void *target[2] = { };
  168. void *redirection = &target;
  169. list_add(&item.node, &test_head);
  170. pr_info("attempting good list removal\n");
  171. list_del(&item.node);
  172. pr_info("attempting corrupted list removal\n");
  173. list_add(&item.node, &test_head);
  174. /* As with the list_add() test above, this corrupts "next". */
  175. item.node.next = redirection;
  176. list_del(&item.node);
  177. if (target[0] == NULL && target[1] == NULL)
  178. pr_err("Overwrite did not happen, but no BUG?!\n");
  179. else
  180. pr_err("list_del() corruption not detected!\n");
  181. }
  182. /* Test if unbalanced set_fs(KERNEL_DS)/set_fs(USER_DS) check exists. */
  183. void lkdtm_CORRUPT_USER_DS(void)
  184. {
  185. pr_info("setting bad task size limit\n");
  186. set_fs(KERNEL_DS);
  187. /* Make sure we do not keep running with a KERNEL_DS! */
  188. force_sig(SIGKILL, current);
  189. }
  190. /* Test that VMAP_STACK is actually allocating with a leading guard page */
  191. void lkdtm_STACK_GUARD_PAGE_LEADING(void)
  192. {
  193. const unsigned char *stack = task_stack_page(current);
  194. const unsigned char *ptr = stack - 1;
  195. volatile unsigned char byte;
  196. pr_info("attempting bad read from page below current stack\n");
  197. byte = *ptr;
  198. pr_err("FAIL: accessed page before stack!\n");
  199. }
  200. /* Test that VMAP_STACK is actually allocating with a trailing guard page */
  201. void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
  202. {
  203. const unsigned char *stack = task_stack_page(current);
  204. const unsigned char *ptr = stack + THREAD_SIZE;
  205. volatile unsigned char byte;
  206. pr_info("attempting bad read from page above current stack\n");
  207. byte = *ptr;
  208. pr_err("FAIL: accessed page after stack!\n");
  209. }