lkdtm_bugs.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209
  1. /*
  2. * This is for all the tests related to logic bugs (e.g. bad dereferences,
  3. * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
  4. * lockups) along with other things that don't fit well into existing LKDTM
  5. * test source files.
  6. */
  7. #include "lkdtm.h"
  8. #include <linux/list.h>
  9. #include <linux/sched.h>
  10. #include <linux/sched/signal.h>
  11. #include <linux/uaccess.h>
  12. struct lkdtm_list {
  13. struct list_head node;
  14. };
  15. /*
  16. * Make sure our attempts to over run the kernel stack doesn't trigger
  17. * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
  18. * recurse past the end of THREAD_SIZE by default.
  19. */
  20. #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
  21. #define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
  22. #else
  23. #define REC_STACK_SIZE (THREAD_SIZE / 8)
  24. #endif
  25. #define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
  26. static int recur_count = REC_NUM_DEFAULT;
  27. static DEFINE_SPINLOCK(lock_me_up);
  28. static int recursive_loop(int remaining)
  29. {
  30. char buf[REC_STACK_SIZE];
  31. /* Make sure compiler does not optimize this away. */
  32. memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE);
  33. if (!remaining)
  34. return 0;
  35. else
  36. return recursive_loop(remaining - 1);
  37. }
  38. /* If the depth is negative, use the default, otherwise keep parameter. */
  39. void __init lkdtm_bugs_init(int *recur_param)
  40. {
  41. if (*recur_param < 0)
  42. *recur_param = recur_count;
  43. else
  44. recur_count = *recur_param;
  45. }
  46. void lkdtm_PANIC(void)
  47. {
  48. panic("dumptest");
  49. }
  50. void lkdtm_BUG(void)
  51. {
  52. BUG();
  53. }
  54. void lkdtm_WARNING(void)
  55. {
  56. WARN_ON(1);
  57. }
  58. void lkdtm_EXCEPTION(void)
  59. {
  60. *((volatile int *) 0) = 0;
  61. }
  62. void lkdtm_LOOP(void)
  63. {
  64. for (;;)
  65. ;
  66. }
  67. void lkdtm_OVERFLOW(void)
  68. {
  69. (void) recursive_loop(recur_count);
  70. }
  71. static noinline void __lkdtm_CORRUPT_STACK(void *stack)
  72. {
  73. memset(stack, 'a', 64);
  74. }
  75. noinline void lkdtm_CORRUPT_STACK(void)
  76. {
  77. /* Use default char array length that triggers stack protection. */
  78. char data[8];
  79. __lkdtm_CORRUPT_STACK(&data);
  80. pr_info("Corrupted stack with '%16s'...\n", data);
  81. }
  82. void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
  83. {
  84. static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
  85. u32 *p;
  86. u32 val = 0x12345678;
  87. p = (u32 *)(data + 1);
  88. if (*p == 0)
  89. val = 0x87654321;
  90. *p = val;
  91. }
  92. void lkdtm_SOFTLOCKUP(void)
  93. {
  94. preempt_disable();
  95. for (;;)
  96. cpu_relax();
  97. }
  98. void lkdtm_HARDLOCKUP(void)
  99. {
  100. local_irq_disable();
  101. for (;;)
  102. cpu_relax();
  103. }
  104. void lkdtm_SPINLOCKUP(void)
  105. {
  106. /* Must be called twice to trigger. */
  107. spin_lock(&lock_me_up);
  108. /* Let sparse know we intended to exit holding the lock. */
  109. __release(&lock_me_up);
  110. }
  111. void lkdtm_HUNG_TASK(void)
  112. {
  113. set_current_state(TASK_UNINTERRUPTIBLE);
  114. schedule();
  115. }
  116. void lkdtm_CORRUPT_LIST_ADD(void)
  117. {
  118. /*
  119. * Initially, an empty list via LIST_HEAD:
  120. * test_head.next = &test_head
  121. * test_head.prev = &test_head
  122. */
  123. LIST_HEAD(test_head);
  124. struct lkdtm_list good, bad;
  125. void *target[2] = { };
  126. void *redirection = &target;
  127. pr_info("attempting good list addition\n");
  128. /*
  129. * Adding to the list performs these actions:
  130. * test_head.next->prev = &good.node
  131. * good.node.next = test_head.next
  132. * good.node.prev = test_head
  133. * test_head.next = good.node
  134. */
  135. list_add(&good.node, &test_head);
  136. pr_info("attempting corrupted list addition\n");
  137. /*
  138. * In simulating this "write what where" primitive, the "what" is
  139. * the address of &bad.node, and the "where" is the address held
  140. * by "redirection".
  141. */
  142. test_head.next = redirection;
  143. list_add(&bad.node, &test_head);
  144. if (target[0] == NULL && target[1] == NULL)
  145. pr_err("Overwrite did not happen, but no BUG?!\n");
  146. else
  147. pr_err("list_add() corruption not detected!\n");
  148. }
  149. void lkdtm_CORRUPT_LIST_DEL(void)
  150. {
  151. LIST_HEAD(test_head);
  152. struct lkdtm_list item;
  153. void *target[2] = { };
  154. void *redirection = &target;
  155. list_add(&item.node, &test_head);
  156. pr_info("attempting good list removal\n");
  157. list_del(&item.node);
  158. pr_info("attempting corrupted list removal\n");
  159. list_add(&item.node, &test_head);
  160. /* As with the list_add() test above, this corrupts "next". */
  161. item.node.next = redirection;
  162. list_del(&item.node);
  163. if (target[0] == NULL && target[1] == NULL)
  164. pr_err("Overwrite did not happen, but no BUG?!\n");
  165. else
  166. pr_err("list_del() corruption not detected!\n");
  167. }
  168. void lkdtm_CORRUPT_USER_DS(void)
  169. {
  170. pr_info("setting bad task size limit\n");
  171. set_fs(KERNEL_DS);
  172. /* Make sure we do not keep running with a KERNEL_DS! */
  173. force_sig(SIGKILL, current);
  174. }