lkdtm_bugs.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. /*
  2. * This is for all the tests related to logic bugs (e.g. bad dereferences,
  3. * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
  4. * lockups) along with other things that don't fit well into existing LKDTM
  5. * test source files.
  6. */
  7. #include "lkdtm.h"
  8. #include <linux/list.h>
  9. #include <linux/sched.h>
  10. #include <linux/sched/signal.h>
  11. #include <linux/sched/task_stack.h>
  12. #include <linux/uaccess.h>
  13. struct lkdtm_list {
  14. struct list_head node;
  15. };
  16. /*
  17. * Make sure our attempts to over run the kernel stack doesn't trigger
  18. * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
  19. * recurse past the end of THREAD_SIZE by default.
  20. */
  21. #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
  22. #define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
  23. #else
  24. #define REC_STACK_SIZE (THREAD_SIZE / 8)
  25. #endif
  26. #define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
  27. static int recur_count = REC_NUM_DEFAULT;
  28. static DEFINE_SPINLOCK(lock_me_up);
  29. static int recursive_loop(int remaining)
  30. {
  31. char buf[REC_STACK_SIZE];
  32. /* Make sure compiler does not optimize this away. */
  33. memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE);
  34. if (!remaining)
  35. return 0;
  36. else
  37. return recursive_loop(remaining - 1);
  38. }
  39. /* If the depth is negative, use the default, otherwise keep parameter. */
  40. void __init lkdtm_bugs_init(int *recur_param)
  41. {
  42. if (*recur_param < 0)
  43. *recur_param = recur_count;
  44. else
  45. recur_count = *recur_param;
  46. }
  47. void lkdtm_PANIC(void)
  48. {
  49. panic("dumptest");
  50. }
  51. void lkdtm_BUG(void)
  52. {
  53. BUG();
  54. }
  55. void lkdtm_WARNING(void)
  56. {
  57. WARN_ON(1);
  58. }
  59. void lkdtm_EXCEPTION(void)
  60. {
  61. *((volatile int *) 0) = 0;
  62. }
  63. void lkdtm_LOOP(void)
  64. {
  65. for (;;)
  66. ;
  67. }
  68. void lkdtm_OVERFLOW(void)
  69. {
  70. (void) recursive_loop(recur_count);
  71. }
  72. static noinline void __lkdtm_CORRUPT_STACK(void *stack)
  73. {
  74. memset(stack, '\xff', 64);
  75. }
  76. /* This should trip the stack canary, not corrupt the return address. */
  77. noinline void lkdtm_CORRUPT_STACK(void)
  78. {
  79. /* Use default char array length that triggers stack protection. */
  80. char data[8] __aligned(sizeof(void *));
  81. __lkdtm_CORRUPT_STACK(&data);
  82. pr_info("Corrupted stack containing char array ...\n");
  83. }
  84. /* Same as above but will only get a canary with -fstack-protector-strong */
  85. noinline void lkdtm_CORRUPT_STACK_STRONG(void)
  86. {
  87. union {
  88. unsigned short shorts[4];
  89. unsigned long *ptr;
  90. } data __aligned(sizeof(void *));
  91. __lkdtm_CORRUPT_STACK(&data);
  92. pr_info("Corrupted stack containing union ...\n");
  93. }
  94. void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
  95. {
  96. static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
  97. u32 *p;
  98. u32 val = 0x12345678;
  99. p = (u32 *)(data + 1);
  100. if (*p == 0)
  101. val = 0x87654321;
  102. *p = val;
  103. }
  104. void lkdtm_SOFTLOCKUP(void)
  105. {
  106. preempt_disable();
  107. for (;;)
  108. cpu_relax();
  109. }
  110. void lkdtm_HARDLOCKUP(void)
  111. {
  112. local_irq_disable();
  113. for (;;)
  114. cpu_relax();
  115. }
  116. void lkdtm_SPINLOCKUP(void)
  117. {
  118. /* Must be called twice to trigger. */
  119. spin_lock(&lock_me_up);
  120. /* Let sparse know we intended to exit holding the lock. */
  121. __release(&lock_me_up);
  122. }
  123. void lkdtm_HUNG_TASK(void)
  124. {
  125. set_current_state(TASK_UNINTERRUPTIBLE);
  126. schedule();
  127. }
  128. void lkdtm_CORRUPT_LIST_ADD(void)
  129. {
  130. /*
  131. * Initially, an empty list via LIST_HEAD:
  132. * test_head.next = &test_head
  133. * test_head.prev = &test_head
  134. */
  135. LIST_HEAD(test_head);
  136. struct lkdtm_list good, bad;
  137. void *target[2] = { };
  138. void *redirection = &target;
  139. pr_info("attempting good list addition\n");
  140. /*
  141. * Adding to the list performs these actions:
  142. * test_head.next->prev = &good.node
  143. * good.node.next = test_head.next
  144. * good.node.prev = test_head
  145. * test_head.next = good.node
  146. */
  147. list_add(&good.node, &test_head);
  148. pr_info("attempting corrupted list addition\n");
  149. /*
  150. * In simulating this "write what where" primitive, the "what" is
  151. * the address of &bad.node, and the "where" is the address held
  152. * by "redirection".
  153. */
  154. test_head.next = redirection;
  155. list_add(&bad.node, &test_head);
  156. if (target[0] == NULL && target[1] == NULL)
  157. pr_err("Overwrite did not happen, but no BUG?!\n");
  158. else
  159. pr_err("list_add() corruption not detected!\n");
  160. }
  161. void lkdtm_CORRUPT_LIST_DEL(void)
  162. {
  163. LIST_HEAD(test_head);
  164. struct lkdtm_list item;
  165. void *target[2] = { };
  166. void *redirection = &target;
  167. list_add(&item.node, &test_head);
  168. pr_info("attempting good list removal\n");
  169. list_del(&item.node);
  170. pr_info("attempting corrupted list removal\n");
  171. list_add(&item.node, &test_head);
  172. /* As with the list_add() test above, this corrupts "next". */
  173. item.node.next = redirection;
  174. list_del(&item.node);
  175. if (target[0] == NULL && target[1] == NULL)
  176. pr_err("Overwrite did not happen, but no BUG?!\n");
  177. else
  178. pr_err("list_del() corruption not detected!\n");
  179. }
  180. /* Test if unbalanced set_fs(KERNEL_DS)/set_fs(USER_DS) check exists. */
  181. void lkdtm_CORRUPT_USER_DS(void)
  182. {
  183. pr_info("setting bad task size limit\n");
  184. set_fs(KERNEL_DS);
  185. /* Make sure we do not keep running with a KERNEL_DS! */
  186. force_sig(SIGKILL, current);
  187. }
  188. /* Test that VMAP_STACK is actually allocating with a leading guard page */
  189. void lkdtm_STACK_GUARD_PAGE_LEADING(void)
  190. {
  191. const unsigned char *stack = task_stack_page(current);
  192. const unsigned char *ptr = stack - 1;
  193. volatile unsigned char byte;
  194. pr_info("attempting bad read from page below current stack\n");
  195. byte = *ptr;
  196. pr_err("FAIL: accessed page before stack!\n");
  197. }
  198. /* Test that VMAP_STACK is actually allocating with a trailing guard page */
  199. void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
  200. {
  201. const unsigned char *stack = task_stack_page(current);
  202. const unsigned char *ptr = stack + THREAD_SIZE;
  203. volatile unsigned char byte;
  204. pr_info("attempting bad read from page above current stack\n");
  205. byte = *ptr;
  206. pr_err("FAIL: accessed page after stack!\n");
  207. }