lkdtm_core.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033
  1. /*
  2. * Linux Kernel Dump Test Module for testing kernel crashes conditions:
  3. * induces system failures at predefined crashpoints and under predefined
  4. * operational conditions in order to evaluate the reliability of kernel
  5. * sanity checking and crash dumps obtained using different dumping
  6. * solutions.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  21. *
  22. * Copyright (C) IBM Corporation, 2006
  23. *
  24. * Author: Ankita Garg <ankita@in.ibm.com>
  25. *
  26. * It is adapted from the Linux Kernel Dump Test Tool by
  27. * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net>
  28. *
  29. * Debugfs support added by Simon Kagstrom <simon.kagstrom@netinsight.net>
  30. *
  31. * See Documentation/fault-injection/provoke-crashes.txt for instructions
  32. */
  33. #define pr_fmt(fmt) "lkdtm: " fmt
  34. #include <linux/kernel.h>
  35. #include <linux/fs.h>
  36. #include <linux/module.h>
  37. #include <linux/buffer_head.h>
  38. #include <linux/kprobes.h>
  39. #include <linux/list.h>
  40. #include <linux/init.h>
  41. #include <linux/interrupt.h>
  42. #include <linux/hrtimer.h>
  43. #include <linux/slab.h>
  44. #include <scsi/scsi_cmnd.h>
  45. #include <linux/debugfs.h>
  46. #include <linux/vmalloc.h>
  47. #include <linux/mman.h>
  48. #include <asm/cacheflush.h>
  49. #ifdef CONFIG_IDE
  50. #include <linux/ide.h>
  51. #endif
  52. #include "lkdtm.h"
  53. /*
  54. * Make sure our attempts to over run the kernel stack doesn't trigger
  55. * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
  56. * recurse past the end of THREAD_SIZE by default.
  57. */
  58. #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
  59. #define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
  60. #else
  61. #define REC_STACK_SIZE (THREAD_SIZE / 8)
  62. #endif
  63. #define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
  64. #define DEFAULT_COUNT 10
  65. #define EXEC_SIZE 64
  66. enum cname {
  67. CN_INVALID,
  68. CN_INT_HARDWARE_ENTRY,
  69. CN_INT_HW_IRQ_EN,
  70. CN_INT_TASKLET_ENTRY,
  71. CN_FS_DEVRW,
  72. CN_MEM_SWAPOUT,
  73. CN_TIMERADD,
  74. CN_SCSI_DISPATCH_CMD,
  75. CN_IDE_CORE_CP,
  76. CN_DIRECT,
  77. };
  78. enum ctype {
  79. CT_NONE,
  80. CT_PANIC,
  81. CT_BUG,
  82. CT_WARNING,
  83. CT_EXCEPTION,
  84. CT_LOOP,
  85. CT_OVERFLOW,
  86. CT_CORRUPT_STACK,
  87. CT_UNALIGNED_LOAD_STORE_WRITE,
  88. CT_OVERWRITE_ALLOCATION,
  89. CT_WRITE_AFTER_FREE,
  90. CT_READ_AFTER_FREE,
  91. CT_WRITE_BUDDY_AFTER_FREE,
  92. CT_READ_BUDDY_AFTER_FREE,
  93. CT_SOFTLOCKUP,
  94. CT_HARDLOCKUP,
  95. CT_SPINLOCKUP,
  96. CT_HUNG_TASK,
  97. CT_EXEC_DATA,
  98. CT_EXEC_STACK,
  99. CT_EXEC_KMALLOC,
  100. CT_EXEC_VMALLOC,
  101. CT_EXEC_RODATA,
  102. CT_EXEC_USERSPACE,
  103. CT_ACCESS_USERSPACE,
  104. CT_WRITE_RO,
  105. CT_WRITE_RO_AFTER_INIT,
  106. CT_WRITE_KERN,
  107. CT_WRAP_ATOMIC
  108. };
  109. static char* cp_name[] = {
  110. "INT_HARDWARE_ENTRY",
  111. "INT_HW_IRQ_EN",
  112. "INT_TASKLET_ENTRY",
  113. "FS_DEVRW",
  114. "MEM_SWAPOUT",
  115. "TIMERADD",
  116. "SCSI_DISPATCH_CMD",
  117. "IDE_CORE_CP",
  118. "DIRECT",
  119. };
  120. static char* cp_type[] = {
  121. "PANIC",
  122. "BUG",
  123. "WARNING",
  124. "EXCEPTION",
  125. "LOOP",
  126. "OVERFLOW",
  127. "CORRUPT_STACK",
  128. "UNALIGNED_LOAD_STORE_WRITE",
  129. "OVERWRITE_ALLOCATION",
  130. "WRITE_AFTER_FREE",
  131. "READ_AFTER_FREE",
  132. "WRITE_BUDDY_AFTER_FREE",
  133. "READ_BUDDY_AFTER_FREE",
  134. "SOFTLOCKUP",
  135. "HARDLOCKUP",
  136. "SPINLOCKUP",
  137. "HUNG_TASK",
  138. "EXEC_DATA",
  139. "EXEC_STACK",
  140. "EXEC_KMALLOC",
  141. "EXEC_VMALLOC",
  142. "EXEC_RODATA",
  143. "EXEC_USERSPACE",
  144. "ACCESS_USERSPACE",
  145. "WRITE_RO",
  146. "WRITE_RO_AFTER_INIT",
  147. "WRITE_KERN",
  148. "WRAP_ATOMIC"
  149. };
  150. static struct jprobe lkdtm;
  151. static int lkdtm_parse_commandline(void);
  152. static void lkdtm_handler(void);
  153. static char* cpoint_name;
  154. static char* cpoint_type;
  155. static int cpoint_count = DEFAULT_COUNT;
  156. static int recur_count = REC_NUM_DEFAULT;
  157. static enum cname cpoint = CN_INVALID;
  158. static enum ctype cptype = CT_NONE;
  159. static int count = DEFAULT_COUNT;
  160. static DEFINE_SPINLOCK(count_lock);
  161. static DEFINE_SPINLOCK(lock_me_up);
  162. static u8 data_area[EXEC_SIZE];
  163. static const unsigned long rodata = 0xAA55AA55;
  164. static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
  165. module_param(recur_count, int, 0644);
  166. MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test");
  167. module_param(cpoint_name, charp, 0444);
  168. MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");
  169. module_param(cpoint_type, charp, 0444);
  170. MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\
  171. "hitting the crash point");
  172. module_param(cpoint_count, int, 0644);
  173. MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
  174. "crash point is to be hit to trigger action");
  175. static unsigned int jp_do_irq(unsigned int irq)
  176. {
  177. lkdtm_handler();
  178. jprobe_return();
  179. return 0;
  180. }
  181. static irqreturn_t jp_handle_irq_event(unsigned int irq,
  182. struct irqaction *action)
  183. {
  184. lkdtm_handler();
  185. jprobe_return();
  186. return 0;
  187. }
  188. static void jp_tasklet_action(struct softirq_action *a)
  189. {
  190. lkdtm_handler();
  191. jprobe_return();
  192. }
  193. static void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
  194. {
  195. lkdtm_handler();
  196. jprobe_return();
  197. }
  198. struct scan_control;
  199. static unsigned long jp_shrink_inactive_list(unsigned long max_scan,
  200. struct zone *zone,
  201. struct scan_control *sc)
  202. {
  203. lkdtm_handler();
  204. jprobe_return();
  205. return 0;
  206. }
  207. static int jp_hrtimer_start(struct hrtimer *timer, ktime_t tim,
  208. const enum hrtimer_mode mode)
  209. {
  210. lkdtm_handler();
  211. jprobe_return();
  212. return 0;
  213. }
  214. static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd)
  215. {
  216. lkdtm_handler();
  217. jprobe_return();
  218. return 0;
  219. }
  220. #ifdef CONFIG_IDE
  221. static int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file,
  222. struct block_device *bdev, unsigned int cmd,
  223. unsigned long arg)
  224. {
  225. lkdtm_handler();
  226. jprobe_return();
  227. return 0;
  228. }
  229. #endif
  230. /* Return the crashpoint number or NONE if the name is invalid */
  231. static enum ctype parse_cp_type(const char *what, size_t count)
  232. {
  233. int i;
  234. for (i = 0; i < ARRAY_SIZE(cp_type); i++) {
  235. if (!strcmp(what, cp_type[i]))
  236. return i + 1;
  237. }
  238. return CT_NONE;
  239. }
  240. static const char *cp_type_to_str(enum ctype type)
  241. {
  242. if (type == CT_NONE || type < 0 || type > ARRAY_SIZE(cp_type))
  243. return "None";
  244. return cp_type[type - 1];
  245. }
  246. static const char *cp_name_to_str(enum cname name)
  247. {
  248. if (name == CN_INVALID || name < 0 || name > ARRAY_SIZE(cp_name))
  249. return "INVALID";
  250. return cp_name[name - 1];
  251. }
  252. static int lkdtm_parse_commandline(void)
  253. {
  254. int i;
  255. unsigned long flags;
  256. if (cpoint_count < 1 || recur_count < 1)
  257. return -EINVAL;
  258. spin_lock_irqsave(&count_lock, flags);
  259. count = cpoint_count;
  260. spin_unlock_irqrestore(&count_lock, flags);
  261. /* No special parameters */
  262. if (!cpoint_type && !cpoint_name)
  263. return 0;
  264. /* Neither or both of these need to be set */
  265. if (!cpoint_type || !cpoint_name)
  266. return -EINVAL;
  267. cptype = parse_cp_type(cpoint_type, strlen(cpoint_type));
  268. if (cptype == CT_NONE)
  269. return -EINVAL;
  270. for (i = 0; i < ARRAY_SIZE(cp_name); i++) {
  271. if (!strcmp(cpoint_name, cp_name[i])) {
  272. cpoint = i + 1;
  273. return 0;
  274. }
  275. }
  276. /* Could not find a valid crash point */
  277. return -EINVAL;
  278. }
  279. static int recursive_loop(int remaining)
  280. {
  281. char buf[REC_STACK_SIZE];
  282. /* Make sure compiler does not optimize this away. */
  283. memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE);
  284. if (!remaining)
  285. return 0;
  286. else
  287. return recursive_loop(remaining - 1);
  288. }
  289. static void do_nothing(void)
  290. {
  291. return;
  292. }
  293. /* Must immediately follow do_nothing for size calculuations to work out. */
  294. static void do_overwritten(void)
  295. {
  296. pr_info("do_overwritten wasn't overwritten!\n");
  297. return;
  298. }
  299. static noinline void corrupt_stack(void)
  300. {
  301. /* Use default char array length that triggers stack protection. */
  302. char data[8];
  303. memset((void *)data, 0, 64);
  304. }
  305. static noinline void execute_location(void *dst, bool write)
  306. {
  307. void (*func)(void) = dst;
  308. pr_info("attempting ok execution at %p\n", do_nothing);
  309. do_nothing();
  310. if (write) {
  311. memcpy(dst, do_nothing, EXEC_SIZE);
  312. flush_icache_range((unsigned long)dst,
  313. (unsigned long)dst + EXEC_SIZE);
  314. }
  315. pr_info("attempting bad execution at %p\n", func);
  316. func();
  317. }
  318. static void execute_user_location(void *dst)
  319. {
  320. /* Intentionally crossing kernel/user memory boundary. */
  321. void (*func)(void) = dst;
  322. pr_info("attempting ok execution at %p\n", do_nothing);
  323. do_nothing();
  324. if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE))
  325. return;
  326. flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE);
  327. pr_info("attempting bad execution at %p\n", func);
  328. func();
  329. }
  330. static void lkdtm_do_action(enum ctype which)
  331. {
  332. switch (which) {
  333. case CT_PANIC:
  334. panic("dumptest");
  335. break;
  336. case CT_BUG:
  337. BUG();
  338. break;
  339. case CT_WARNING:
  340. WARN_ON(1);
  341. break;
  342. case CT_EXCEPTION:
  343. *((int *) 0) = 0;
  344. break;
  345. case CT_LOOP:
  346. for (;;)
  347. ;
  348. break;
  349. case CT_OVERFLOW:
  350. (void) recursive_loop(recur_count);
  351. break;
  352. case CT_CORRUPT_STACK:
  353. corrupt_stack();
  354. break;
  355. case CT_UNALIGNED_LOAD_STORE_WRITE: {
  356. static u8 data[5] __attribute__((aligned(4))) = {1, 2,
  357. 3, 4, 5};
  358. u32 *p;
  359. u32 val = 0x12345678;
  360. p = (u32 *)(data + 1);
  361. if (*p == 0)
  362. val = 0x87654321;
  363. *p = val;
  364. break;
  365. }
  366. case CT_OVERWRITE_ALLOCATION: {
  367. size_t len = 1020;
  368. u32 *data = kmalloc(len, GFP_KERNEL);
  369. data[1024 / sizeof(u32)] = 0x12345678;
  370. kfree(data);
  371. break;
  372. }
  373. case CT_WRITE_AFTER_FREE: {
  374. int *base, *again;
  375. size_t len = 1024;
  376. /*
  377. * The slub allocator uses the first word to store the free
  378. * pointer in some configurations. Use the middle of the
  379. * allocation to avoid running into the freelist
  380. */
  381. size_t offset = (len / sizeof(*base)) / 2;
  382. base = kmalloc(len, GFP_KERNEL);
  383. pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
  384. pr_info("Attempting bad write to freed memory at %p\n",
  385. &base[offset]);
  386. kfree(base);
  387. base[offset] = 0x0abcdef0;
  388. /* Attempt to notice the overwrite. */
  389. again = kmalloc(len, GFP_KERNEL);
  390. kfree(again);
  391. if (again != base)
  392. pr_info("Hmm, didn't get the same memory range.\n");
  393. break;
  394. }
  395. case CT_READ_AFTER_FREE: {
  396. int *base, *val, saw;
  397. size_t len = 1024;
  398. /*
  399. * The slub allocator uses the first word to store the free
  400. * pointer in some configurations. Use the middle of the
  401. * allocation to avoid running into the freelist
  402. */
  403. size_t offset = (len / sizeof(*base)) / 2;
  404. base = kmalloc(len, GFP_KERNEL);
  405. if (!base)
  406. break;
  407. val = kmalloc(len, GFP_KERNEL);
  408. if (!val) {
  409. kfree(base);
  410. break;
  411. }
  412. *val = 0x12345678;
  413. base[offset] = *val;
  414. pr_info("Value in memory before free: %x\n", base[offset]);
  415. kfree(base);
  416. pr_info("Attempting bad read from freed memory\n");
  417. saw = base[offset];
  418. if (saw != *val) {
  419. /* Good! Poisoning happened, so declare a win. */
  420. pr_info("Memory correctly poisoned (%x)\n", saw);
  421. BUG();
  422. }
  423. pr_info("Memory was not poisoned\n");
  424. kfree(val);
  425. break;
  426. }
  427. case CT_WRITE_BUDDY_AFTER_FREE: {
  428. unsigned long p = __get_free_page(GFP_KERNEL);
  429. if (!p)
  430. break;
  431. pr_info("Writing to the buddy page before free\n");
  432. memset((void *)p, 0x3, PAGE_SIZE);
  433. free_page(p);
  434. schedule();
  435. pr_info("Attempting bad write to the buddy page after free\n");
  436. memset((void *)p, 0x78, PAGE_SIZE);
  437. /* Attempt to notice the overwrite. */
  438. p = __get_free_page(GFP_KERNEL);
  439. free_page(p);
  440. schedule();
  441. break;
  442. }
  443. case CT_READ_BUDDY_AFTER_FREE: {
  444. unsigned long p = __get_free_page(GFP_KERNEL);
  445. int saw, *val;
  446. int *base;
  447. if (!p)
  448. break;
  449. val = kmalloc(1024, GFP_KERNEL);
  450. if (!val) {
  451. free_page(p);
  452. break;
  453. }
  454. base = (int *)p;
  455. *val = 0x12345678;
  456. base[0] = *val;
  457. pr_info("Value in memory before free: %x\n", base[0]);
  458. free_page(p);
  459. pr_info("Attempting to read from freed memory\n");
  460. saw = base[0];
  461. if (saw != *val) {
  462. /* Good! Poisoning happened, so declare a win. */
  463. pr_info("Memory correctly poisoned (%x)\n", saw);
  464. BUG();
  465. }
  466. pr_info("Buddy page was not poisoned\n");
  467. kfree(val);
  468. break;
  469. }
  470. case CT_SOFTLOCKUP:
  471. preempt_disable();
  472. for (;;)
  473. cpu_relax();
  474. break;
  475. case CT_HARDLOCKUP:
  476. local_irq_disable();
  477. for (;;)
  478. cpu_relax();
  479. break;
  480. case CT_SPINLOCKUP:
  481. /* Must be called twice to trigger. */
  482. spin_lock(&lock_me_up);
  483. /* Let sparse know we intended to exit holding the lock. */
  484. __release(&lock_me_up);
  485. break;
  486. case CT_HUNG_TASK:
  487. set_current_state(TASK_UNINTERRUPTIBLE);
  488. schedule();
  489. break;
  490. case CT_EXEC_DATA:
  491. execute_location(data_area, true);
  492. break;
  493. case CT_EXEC_STACK: {
  494. u8 stack_area[EXEC_SIZE];
  495. execute_location(stack_area, true);
  496. break;
  497. }
  498. case CT_EXEC_KMALLOC: {
  499. u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
  500. execute_location(kmalloc_area, true);
  501. kfree(kmalloc_area);
  502. break;
  503. }
  504. case CT_EXEC_VMALLOC: {
  505. u32 *vmalloc_area = vmalloc(EXEC_SIZE);
  506. execute_location(vmalloc_area, true);
  507. vfree(vmalloc_area);
  508. break;
  509. }
  510. case CT_EXEC_RODATA:
  511. execute_location(lkdtm_rodata_do_nothing, false);
  512. break;
  513. case CT_EXEC_USERSPACE: {
  514. unsigned long user_addr;
  515. user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
  516. PROT_READ | PROT_WRITE | PROT_EXEC,
  517. MAP_ANONYMOUS | MAP_PRIVATE, 0);
  518. if (user_addr >= TASK_SIZE) {
  519. pr_warn("Failed to allocate user memory\n");
  520. return;
  521. }
  522. execute_user_location((void *)user_addr);
  523. vm_munmap(user_addr, PAGE_SIZE);
  524. break;
  525. }
  526. case CT_ACCESS_USERSPACE: {
  527. unsigned long user_addr, tmp = 0;
  528. unsigned long *ptr;
  529. user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
  530. PROT_READ | PROT_WRITE | PROT_EXEC,
  531. MAP_ANONYMOUS | MAP_PRIVATE, 0);
  532. if (user_addr >= TASK_SIZE) {
  533. pr_warn("Failed to allocate user memory\n");
  534. return;
  535. }
  536. if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) {
  537. pr_warn("copy_to_user failed\n");
  538. vm_munmap(user_addr, PAGE_SIZE);
  539. return;
  540. }
  541. ptr = (unsigned long *)user_addr;
  542. pr_info("attempting bad read at %p\n", ptr);
  543. tmp = *ptr;
  544. tmp += 0xc0dec0de;
  545. pr_info("attempting bad write at %p\n", ptr);
  546. *ptr = tmp;
  547. vm_munmap(user_addr, PAGE_SIZE);
  548. break;
  549. }
  550. case CT_WRITE_RO: {
  551. /* Explicitly cast away "const" for the test. */
  552. unsigned long *ptr = (unsigned long *)&rodata;
  553. pr_info("attempting bad rodata write at %p\n", ptr);
  554. *ptr ^= 0xabcd1234;
  555. break;
  556. }
  557. case CT_WRITE_RO_AFTER_INIT: {
  558. unsigned long *ptr = &ro_after_init;
  559. /*
  560. * Verify we were written to during init. Since an Oops
  561. * is considered a "success", a failure is to just skip the
  562. * real test.
  563. */
  564. if ((*ptr & 0xAA) != 0xAA) {
  565. pr_info("%p was NOT written during init!?\n", ptr);
  566. break;
  567. }
  568. pr_info("attempting bad ro_after_init write at %p\n", ptr);
  569. *ptr ^= 0xabcd1234;
  570. break;
  571. }
  572. case CT_WRITE_KERN: {
  573. size_t size;
  574. unsigned char *ptr;
  575. size = (unsigned long)do_overwritten -
  576. (unsigned long)do_nothing;
  577. ptr = (unsigned char *)do_overwritten;
  578. pr_info("attempting bad %zu byte write at %p\n", size, ptr);
  579. memcpy(ptr, (unsigned char *)do_nothing, size);
  580. flush_icache_range((unsigned long)ptr,
  581. (unsigned long)(ptr + size));
  582. do_overwritten();
  583. break;
  584. }
  585. case CT_WRAP_ATOMIC: {
  586. atomic_t under = ATOMIC_INIT(INT_MIN);
  587. atomic_t over = ATOMIC_INIT(INT_MAX);
  588. pr_info("attempting atomic underflow\n");
  589. atomic_dec(&under);
  590. pr_info("attempting atomic overflow\n");
  591. atomic_inc(&over);
  592. return;
  593. }
  594. case CT_NONE:
  595. default:
  596. break;
  597. }
  598. }
  599. static void lkdtm_handler(void)
  600. {
  601. unsigned long flags;
  602. bool do_it = false;
  603. spin_lock_irqsave(&count_lock, flags);
  604. count--;
  605. pr_info("Crash point %s of type %s hit, trigger in %d rounds\n",
  606. cp_name_to_str(cpoint), cp_type_to_str(cptype), count);
  607. if (count == 0) {
  608. do_it = true;
  609. count = cpoint_count;
  610. }
  611. spin_unlock_irqrestore(&count_lock, flags);
  612. if (do_it)
  613. lkdtm_do_action(cptype);
  614. }
  615. static int lkdtm_register_cpoint(enum cname which)
  616. {
  617. int ret;
  618. cpoint = CN_INVALID;
  619. if (lkdtm.entry != NULL)
  620. unregister_jprobe(&lkdtm);
  621. switch (which) {
  622. case CN_DIRECT:
  623. lkdtm_do_action(cptype);
  624. return 0;
  625. case CN_INT_HARDWARE_ENTRY:
  626. lkdtm.kp.symbol_name = "do_IRQ";
  627. lkdtm.entry = (kprobe_opcode_t*) jp_do_irq;
  628. break;
  629. case CN_INT_HW_IRQ_EN:
  630. lkdtm.kp.symbol_name = "handle_IRQ_event";
  631. lkdtm.entry = (kprobe_opcode_t*) jp_handle_irq_event;
  632. break;
  633. case CN_INT_TASKLET_ENTRY:
  634. lkdtm.kp.symbol_name = "tasklet_action";
  635. lkdtm.entry = (kprobe_opcode_t*) jp_tasklet_action;
  636. break;
  637. case CN_FS_DEVRW:
  638. lkdtm.kp.symbol_name = "ll_rw_block";
  639. lkdtm.entry = (kprobe_opcode_t*) jp_ll_rw_block;
  640. break;
  641. case CN_MEM_SWAPOUT:
  642. lkdtm.kp.symbol_name = "shrink_inactive_list";
  643. lkdtm.entry = (kprobe_opcode_t*) jp_shrink_inactive_list;
  644. break;
  645. case CN_TIMERADD:
  646. lkdtm.kp.symbol_name = "hrtimer_start";
  647. lkdtm.entry = (kprobe_opcode_t*) jp_hrtimer_start;
  648. break;
  649. case CN_SCSI_DISPATCH_CMD:
  650. lkdtm.kp.symbol_name = "scsi_dispatch_cmd";
  651. lkdtm.entry = (kprobe_opcode_t*) jp_scsi_dispatch_cmd;
  652. break;
  653. case CN_IDE_CORE_CP:
  654. #ifdef CONFIG_IDE
  655. lkdtm.kp.symbol_name = "generic_ide_ioctl";
  656. lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl;
  657. #else
  658. pr_info("Crash point not available\n");
  659. return -EINVAL;
  660. #endif
  661. break;
  662. default:
  663. pr_info("Invalid Crash Point\n");
  664. return -EINVAL;
  665. }
  666. cpoint = which;
  667. if ((ret = register_jprobe(&lkdtm)) < 0) {
  668. pr_info("Couldn't register jprobe\n");
  669. cpoint = CN_INVALID;
  670. }
  671. return ret;
  672. }
  673. static ssize_t do_register_entry(enum cname which, struct file *f,
  674. const char __user *user_buf, size_t count, loff_t *off)
  675. {
  676. char *buf;
  677. int err;
  678. if (count >= PAGE_SIZE)
  679. return -EINVAL;
  680. buf = (char *)__get_free_page(GFP_KERNEL);
  681. if (!buf)
  682. return -ENOMEM;
  683. if (copy_from_user(buf, user_buf, count)) {
  684. free_page((unsigned long) buf);
  685. return -EFAULT;
  686. }
  687. /* NULL-terminate and remove enter */
  688. buf[count] = '\0';
  689. strim(buf);
  690. cptype = parse_cp_type(buf, count);
  691. free_page((unsigned long) buf);
  692. if (cptype == CT_NONE)
  693. return -EINVAL;
  694. err = lkdtm_register_cpoint(which);
  695. if (err < 0)
  696. return err;
  697. *off += count;
  698. return count;
  699. }
  700. /* Generic read callback that just prints out the available crash types */
  701. static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
  702. size_t count, loff_t *off)
  703. {
  704. char *buf;
  705. int i, n, out;
  706. buf = (char *)__get_free_page(GFP_KERNEL);
  707. if (buf == NULL)
  708. return -ENOMEM;
  709. n = snprintf(buf, PAGE_SIZE, "Available crash types:\n");
  710. for (i = 0; i < ARRAY_SIZE(cp_type); i++)
  711. n += snprintf(buf + n, PAGE_SIZE - n, "%s\n", cp_type[i]);
  712. buf[n] = '\0';
  713. out = simple_read_from_buffer(user_buf, count, off,
  714. buf, n);
  715. free_page((unsigned long) buf);
  716. return out;
  717. }
  718. static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
  719. {
  720. return 0;
  721. }
  722. static ssize_t int_hardware_entry(struct file *f, const char __user *buf,
  723. size_t count, loff_t *off)
  724. {
  725. return do_register_entry(CN_INT_HARDWARE_ENTRY, f, buf, count, off);
  726. }
  727. static ssize_t int_hw_irq_en(struct file *f, const char __user *buf,
  728. size_t count, loff_t *off)
  729. {
  730. return do_register_entry(CN_INT_HW_IRQ_EN, f, buf, count, off);
  731. }
  732. static ssize_t int_tasklet_entry(struct file *f, const char __user *buf,
  733. size_t count, loff_t *off)
  734. {
  735. return do_register_entry(CN_INT_TASKLET_ENTRY, f, buf, count, off);
  736. }
  737. static ssize_t fs_devrw_entry(struct file *f, const char __user *buf,
  738. size_t count, loff_t *off)
  739. {
  740. return do_register_entry(CN_FS_DEVRW, f, buf, count, off);
  741. }
  742. static ssize_t mem_swapout_entry(struct file *f, const char __user *buf,
  743. size_t count, loff_t *off)
  744. {
  745. return do_register_entry(CN_MEM_SWAPOUT, f, buf, count, off);
  746. }
  747. static ssize_t timeradd_entry(struct file *f, const char __user *buf,
  748. size_t count, loff_t *off)
  749. {
  750. return do_register_entry(CN_TIMERADD, f, buf, count, off);
  751. }
  752. static ssize_t scsi_dispatch_cmd_entry(struct file *f,
  753. const char __user *buf, size_t count, loff_t *off)
  754. {
  755. return do_register_entry(CN_SCSI_DISPATCH_CMD, f, buf, count, off);
  756. }
  757. static ssize_t ide_core_cp_entry(struct file *f, const char __user *buf,
  758. size_t count, loff_t *off)
  759. {
  760. return do_register_entry(CN_IDE_CORE_CP, f, buf, count, off);
  761. }
  762. /* Special entry to just crash directly. Available without KPROBEs */
  763. static ssize_t direct_entry(struct file *f, const char __user *user_buf,
  764. size_t count, loff_t *off)
  765. {
  766. enum ctype type;
  767. char *buf;
  768. if (count >= PAGE_SIZE)
  769. return -EINVAL;
  770. if (count < 1)
  771. return -EINVAL;
  772. buf = (char *)__get_free_page(GFP_KERNEL);
  773. if (!buf)
  774. return -ENOMEM;
  775. if (copy_from_user(buf, user_buf, count)) {
  776. free_page((unsigned long) buf);
  777. return -EFAULT;
  778. }
  779. /* NULL-terminate and remove enter */
  780. buf[count] = '\0';
  781. strim(buf);
  782. type = parse_cp_type(buf, count);
  783. free_page((unsigned long) buf);
  784. if (type == CT_NONE)
  785. return -EINVAL;
  786. pr_info("Performing direct entry %s\n", cp_type_to_str(type));
  787. lkdtm_do_action(type);
  788. *off += count;
  789. return count;
  790. }
  791. struct crash_entry {
  792. const char *name;
  793. const struct file_operations fops;
  794. };
  795. static const struct crash_entry crash_entries[] = {
  796. {"DIRECT", {.read = lkdtm_debugfs_read,
  797. .llseek = generic_file_llseek,
  798. .open = lkdtm_debugfs_open,
  799. .write = direct_entry} },
  800. {"INT_HARDWARE_ENTRY", {.read = lkdtm_debugfs_read,
  801. .llseek = generic_file_llseek,
  802. .open = lkdtm_debugfs_open,
  803. .write = int_hardware_entry} },
  804. {"INT_HW_IRQ_EN", {.read = lkdtm_debugfs_read,
  805. .llseek = generic_file_llseek,
  806. .open = lkdtm_debugfs_open,
  807. .write = int_hw_irq_en} },
  808. {"INT_TASKLET_ENTRY", {.read = lkdtm_debugfs_read,
  809. .llseek = generic_file_llseek,
  810. .open = lkdtm_debugfs_open,
  811. .write = int_tasklet_entry} },
  812. {"FS_DEVRW", {.read = lkdtm_debugfs_read,
  813. .llseek = generic_file_llseek,
  814. .open = lkdtm_debugfs_open,
  815. .write = fs_devrw_entry} },
  816. {"MEM_SWAPOUT", {.read = lkdtm_debugfs_read,
  817. .llseek = generic_file_llseek,
  818. .open = lkdtm_debugfs_open,
  819. .write = mem_swapout_entry} },
  820. {"TIMERADD", {.read = lkdtm_debugfs_read,
  821. .llseek = generic_file_llseek,
  822. .open = lkdtm_debugfs_open,
  823. .write = timeradd_entry} },
  824. {"SCSI_DISPATCH_CMD", {.read = lkdtm_debugfs_read,
  825. .llseek = generic_file_llseek,
  826. .open = lkdtm_debugfs_open,
  827. .write = scsi_dispatch_cmd_entry} },
  828. {"IDE_CORE_CP", {.read = lkdtm_debugfs_read,
  829. .llseek = generic_file_llseek,
  830. .open = lkdtm_debugfs_open,
  831. .write = ide_core_cp_entry} },
  832. };
  833. static struct dentry *lkdtm_debugfs_root;
  834. static int __init lkdtm_module_init(void)
  835. {
  836. int ret = -EINVAL;
  837. int n_debugfs_entries = 1; /* Assume only the direct entry */
  838. int i;
  839. /* Make sure we can write to __ro_after_init values during __init */
  840. ro_after_init |= 0xAA;
  841. /* Register debugfs interface */
  842. lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
  843. if (!lkdtm_debugfs_root) {
  844. pr_err("creating root dir failed\n");
  845. return -ENODEV;
  846. }
  847. #ifdef CONFIG_KPROBES
  848. n_debugfs_entries = ARRAY_SIZE(crash_entries);
  849. #endif
  850. for (i = 0; i < n_debugfs_entries; i++) {
  851. const struct crash_entry *cur = &crash_entries[i];
  852. struct dentry *de;
  853. de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root,
  854. NULL, &cur->fops);
  855. if (de == NULL) {
  856. pr_err("could not create %s\n", cur->name);
  857. goto out_err;
  858. }
  859. }
  860. if (lkdtm_parse_commandline() == -EINVAL) {
  861. pr_info("Invalid command\n");
  862. goto out_err;
  863. }
  864. if (cpoint != CN_INVALID && cptype != CT_NONE) {
  865. ret = lkdtm_register_cpoint(cpoint);
  866. if (ret < 0) {
  867. pr_info("Invalid crash point %d\n", cpoint);
  868. goto out_err;
  869. }
  870. pr_info("Crash point %s of type %s registered\n",
  871. cpoint_name, cpoint_type);
  872. } else {
  873. pr_info("No crash points registered, enable through debugfs\n");
  874. }
  875. return 0;
  876. out_err:
  877. debugfs_remove_recursive(lkdtm_debugfs_root);
  878. return ret;
  879. }
  880. static void __exit lkdtm_module_exit(void)
  881. {
  882. debugfs_remove_recursive(lkdtm_debugfs_root);
  883. unregister_jprobe(&lkdtm);
  884. pr_info("Crash point unregistered\n");
  885. }
  886. module_init(lkdtm_module_init);
  887. module_exit(lkdtm_module_exit);
  888. MODULE_LICENSE("GPL");
  889. MODULE_DESCRIPTION("Kprobe module for testing crash dumps");