lkdtm_bugs.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217
  1. /*
  2. * This is for all the tests related to logic bugs (e.g. bad dereferences,
  3. * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
  4. * lockups) along with other things that don't fit well into existing LKDTM
  5. * test source files.
  6. */
  7. #include "lkdtm.h"
  8. #include <linux/list.h>
  9. #include <linux/sched.h>
  10. struct lkdtm_list {
  11. struct list_head node;
  12. };
  13. /*
  14. * Make sure our attempts to over run the kernel stack doesn't trigger
  15. * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
  16. * recurse past the end of THREAD_SIZE by default.
  17. */
  18. #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
  19. #define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
  20. #else
  21. #define REC_STACK_SIZE (THREAD_SIZE / 8)
  22. #endif
  23. #define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
  24. static int recur_count = REC_NUM_DEFAULT;
  25. static DEFINE_SPINLOCK(lock_me_up);
  26. static int recursive_loop(int remaining)
  27. {
  28. char buf[REC_STACK_SIZE];
  29. /* Make sure compiler does not optimize this away. */
  30. memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE);
  31. if (!remaining)
  32. return 0;
  33. else
  34. return recursive_loop(remaining - 1);
  35. }
  36. /* If the depth is negative, use the default, otherwise keep parameter. */
  37. void __init lkdtm_bugs_init(int *recur_param)
  38. {
  39. if (*recur_param < 0)
  40. *recur_param = recur_count;
  41. else
  42. recur_count = *recur_param;
  43. }
  44. void lkdtm_PANIC(void)
  45. {
  46. panic("dumptest");
  47. }
  48. void lkdtm_BUG(void)
  49. {
  50. BUG();
  51. }
  52. void lkdtm_WARNING(void)
  53. {
  54. WARN_ON(1);
  55. }
  56. void lkdtm_EXCEPTION(void)
  57. {
  58. *((int *) 0) = 0;
  59. }
  60. void lkdtm_LOOP(void)
  61. {
  62. for (;;)
  63. ;
  64. }
  65. void lkdtm_OVERFLOW(void)
  66. {
  67. (void) recursive_loop(recur_count);
  68. }
  69. noinline void lkdtm_CORRUPT_STACK(void)
  70. {
  71. /* Use default char array length that triggers stack protection. */
  72. char data[8];
  73. memset((void *)data, 'a', 64);
  74. pr_info("Corrupted stack with '%16s'...\n", data);
  75. }
  76. void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
  77. {
  78. static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
  79. u32 *p;
  80. u32 val = 0x12345678;
  81. p = (u32 *)(data + 1);
  82. if (*p == 0)
  83. val = 0x87654321;
  84. *p = val;
  85. }
  86. void lkdtm_SOFTLOCKUP(void)
  87. {
  88. preempt_disable();
  89. for (;;)
  90. cpu_relax();
  91. }
  92. void lkdtm_HARDLOCKUP(void)
  93. {
  94. local_irq_disable();
  95. for (;;)
  96. cpu_relax();
  97. }
  98. void lkdtm_SPINLOCKUP(void)
  99. {
  100. /* Must be called twice to trigger. */
  101. spin_lock(&lock_me_up);
  102. /* Let sparse know we intended to exit holding the lock. */
  103. __release(&lock_me_up);
  104. }
  105. void lkdtm_HUNG_TASK(void)
  106. {
  107. set_current_state(TASK_UNINTERRUPTIBLE);
  108. schedule();
  109. }
  110. void lkdtm_ATOMIC_UNDERFLOW(void)
  111. {
  112. atomic_t under = ATOMIC_INIT(INT_MIN);
  113. pr_info("attempting good atomic increment\n");
  114. atomic_inc(&under);
  115. atomic_dec(&under);
  116. pr_info("attempting bad atomic underflow\n");
  117. atomic_dec(&under);
  118. }
  119. void lkdtm_ATOMIC_OVERFLOW(void)
  120. {
  121. atomic_t over = ATOMIC_INIT(INT_MAX);
  122. pr_info("attempting good atomic decrement\n");
  123. atomic_dec(&over);
  124. atomic_inc(&over);
  125. pr_info("attempting bad atomic overflow\n");
  126. atomic_inc(&over);
  127. }
  128. void lkdtm_CORRUPT_LIST_ADD(void)
  129. {
  130. /*
  131. * Initially, an empty list via LIST_HEAD:
  132. * test_head.next = &test_head
  133. * test_head.prev = &test_head
  134. */
  135. LIST_HEAD(test_head);
  136. struct lkdtm_list good, bad;
  137. void *target[2] = { };
  138. void *redirection = &target;
  139. pr_info("attempting good list addition\n");
  140. /*
  141. * Adding to the list performs these actions:
  142. * test_head.next->prev = &good.node
  143. * good.node.next = test_head.next
  144. * good.node.prev = test_head
  145. * test_head.next = good.node
  146. */
  147. list_add(&good.node, &test_head);
  148. pr_info("attempting corrupted list addition\n");
  149. /*
  150. * In simulating this "write what where" primitive, the "what" is
  151. * the address of &bad.node, and the "where" is the address held
  152. * by "redirection".
  153. */
  154. test_head.next = redirection;
  155. list_add(&bad.node, &test_head);
  156. if (target[0] == NULL && target[1] == NULL)
  157. pr_err("Overwrite did not happen, but no BUG?!\n");
  158. else
  159. pr_err("list_add() corruption not detected!\n");
  160. }
  161. void lkdtm_CORRUPT_LIST_DEL(void)
  162. {
  163. LIST_HEAD(test_head);
  164. struct lkdtm_list item;
  165. void *target[2] = { };
  166. void *redirection = &target;
  167. list_add(&item.node, &test_head);
  168. pr_info("attempting good list removal\n");
  169. list_del(&item.node);
  170. pr_info("attempting corrupted list removal\n");
  171. list_add(&item.node, &test_head);
  172. /* As with the list_add() test above, this corrupts "next". */
  173. item.node.next = redirection;
  174. list_del(&item.node);
  175. if (target[0] == NULL && target[1] == NULL)
  176. pr_err("Overwrite did not happen, but no BUG?!\n");
  177. else
  178. pr_err("list_del() corruption not detected!\n");
  179. }