lkdtm_bugs.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149
  1. /*
  2. * This is for all the tests related to logic bugs (e.g. bad dereferences,
  3. * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
  4. * lockups) along with other things that don't fit well into existing LKDTM
  5. * test source files.
  6. */
  7. #include "lkdtm.h"
  8. #include <linux/sched.h>
  9. /*
  10. * Make sure our attempts to over run the kernel stack doesn't trigger
  11. * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
  12. * recurse past the end of THREAD_SIZE by default.
  13. */
  14. #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
  15. #define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
  16. #else
  17. #define REC_STACK_SIZE (THREAD_SIZE / 8)
  18. #endif
  19. #define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
  20. static int recur_count = REC_NUM_DEFAULT;
  21. static DEFINE_SPINLOCK(lock_me_up);
  22. static int recursive_loop(int remaining)
  23. {
  24. char buf[REC_STACK_SIZE];
  25. /* Make sure compiler does not optimize this away. */
  26. memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE);
  27. if (!remaining)
  28. return 0;
  29. else
  30. return recursive_loop(remaining - 1);
  31. }
  32. /* If the depth is negative, use the default, otherwise keep parameter. */
  33. void __init lkdtm_bugs_init(int *recur_param)
  34. {
  35. if (*recur_param < 0)
  36. *recur_param = recur_count;
  37. else
  38. recur_count = *recur_param;
  39. }
  40. void lkdtm_PANIC(void)
  41. {
  42. panic("dumptest");
  43. }
  44. void lkdtm_BUG(void)
  45. {
  46. BUG();
  47. }
  48. void lkdtm_WARNING(void)
  49. {
  50. WARN_ON(1);
  51. }
  52. void lkdtm_EXCEPTION(void)
  53. {
  54. *((int *) 0) = 0;
  55. }
  56. void lkdtm_LOOP(void)
  57. {
  58. for (;;)
  59. ;
  60. }
  61. void lkdtm_OVERFLOW(void)
  62. {
  63. (void) recursive_loop(recur_count);
  64. }
  65. noinline void lkdtm_CORRUPT_STACK(void)
  66. {
  67. /* Use default char array length that triggers stack protection. */
  68. char data[8];
  69. memset((void *)data, 'a', 64);
  70. pr_info("Corrupted stack with '%16s'...\n", data);
  71. }
  72. void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
  73. {
  74. static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
  75. u32 *p;
  76. u32 val = 0x12345678;
  77. p = (u32 *)(data + 1);
  78. if (*p == 0)
  79. val = 0x87654321;
  80. *p = val;
  81. }
  82. void lkdtm_SOFTLOCKUP(void)
  83. {
  84. preempt_disable();
  85. for (;;)
  86. cpu_relax();
  87. }
  88. void lkdtm_HARDLOCKUP(void)
  89. {
  90. local_irq_disable();
  91. for (;;)
  92. cpu_relax();
  93. }
  94. void lkdtm_SPINLOCKUP(void)
  95. {
  96. /* Must be called twice to trigger. */
  97. spin_lock(&lock_me_up);
  98. /* Let sparse know we intended to exit holding the lock. */
  99. __release(&lock_me_up);
  100. }
  101. void lkdtm_HUNG_TASK(void)
  102. {
  103. set_current_state(TASK_UNINTERRUPTIBLE);
  104. schedule();
  105. }
  106. void lkdtm_ATOMIC_UNDERFLOW(void)
  107. {
  108. atomic_t under = ATOMIC_INIT(INT_MIN);
  109. pr_info("attempting good atomic increment\n");
  110. atomic_inc(&under);
  111. atomic_dec(&under);
  112. pr_info("attempting bad atomic underflow\n");
  113. atomic_dec(&under);
  114. }
  115. void lkdtm_ATOMIC_OVERFLOW(void)
  116. {
  117. atomic_t over = ATOMIC_INIT(INT_MAX);
  118. pr_info("attempting good atomic decrement\n");
  119. atomic_dec(&over);
  120. atomic_inc(&over);
  121. pr_info("attempting bad atomic overflow\n");
  122. atomic_inc(&over);
  123. }