debugobjects.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164
  1. /*
  2. * Generic infrastructure for lifetime debugging of objects.
  3. *
  4. * Started by Thomas Gleixner
  5. *
  6. * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
  7. *
  8. * For licencing details see kernel-base/COPYING
  9. */
  10. #define pr_fmt(fmt) "ODEBUG: " fmt
  11. #include <linux/debugobjects.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/sched.h>
  14. #include <linux/sched/task_stack.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/debugfs.h>
  17. #include <linux/slab.h>
  18. #include <linux/hash.h>
  19. #include <linux/kmemleak.h>
  20. #define ODEBUG_HASH_BITS 14
  21. #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
  22. #define ODEBUG_POOL_SIZE 1024
  23. #define ODEBUG_POOL_MIN_LEVEL 256
  24. #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
  25. #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
  26. #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
  27. struct debug_bucket {
  28. struct hlist_head list;
  29. raw_spinlock_t lock;
  30. };
  31. static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
  32. static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
  33. static DEFINE_RAW_SPINLOCK(pool_lock);
  34. static HLIST_HEAD(obj_pool);
  35. static int obj_pool_min_free = ODEBUG_POOL_SIZE;
  36. static int obj_pool_free = ODEBUG_POOL_SIZE;
  37. static int obj_pool_used;
  38. static int obj_pool_max_used;
  39. static struct kmem_cache *obj_cache;
  40. static int debug_objects_maxchain __read_mostly;
  41. static int debug_objects_maxchecked __read_mostly;
  42. static int debug_objects_fixups __read_mostly;
  43. static int debug_objects_warnings __read_mostly;
  44. static int debug_objects_enabled __read_mostly
  45. = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
  46. static int debug_objects_pool_size __read_mostly
  47. = ODEBUG_POOL_SIZE;
  48. static int debug_objects_pool_min_level __read_mostly
  49. = ODEBUG_POOL_MIN_LEVEL;
  50. static struct debug_obj_descr *descr_test __read_mostly;
  51. /*
  52. * Track numbers of kmem_cache_alloc()/free() calls done.
  53. */
  54. static int debug_objects_allocated;
  55. static int debug_objects_freed;
  56. static void free_obj_work(struct work_struct *work);
  57. static DECLARE_WORK(debug_obj_work, free_obj_work);
  58. static int __init enable_object_debug(char *str)
  59. {
  60. debug_objects_enabled = 1;
  61. return 0;
  62. }
  63. static int __init disable_object_debug(char *str)
  64. {
  65. debug_objects_enabled = 0;
  66. return 0;
  67. }
  68. early_param("debug_objects", enable_object_debug);
  69. early_param("no_debug_objects", disable_object_debug);
  70. static const char *obj_states[ODEBUG_STATE_MAX] = {
  71. [ODEBUG_STATE_NONE] = "none",
  72. [ODEBUG_STATE_INIT] = "initialized",
  73. [ODEBUG_STATE_INACTIVE] = "inactive",
  74. [ODEBUG_STATE_ACTIVE] = "active",
  75. [ODEBUG_STATE_DESTROYED] = "destroyed",
  76. [ODEBUG_STATE_NOTAVAILABLE] = "not available",
  77. };
  78. static void fill_pool(void)
  79. {
  80. gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
  81. struct debug_obj *new;
  82. unsigned long flags;
  83. if (likely(obj_pool_free >= debug_objects_pool_min_level))
  84. return;
  85. if (unlikely(!obj_cache))
  86. return;
  87. while (obj_pool_free < debug_objects_pool_min_level) {
  88. new = kmem_cache_zalloc(obj_cache, gfp);
  89. if (!new)
  90. return;
  91. kmemleak_ignore(new);
  92. raw_spin_lock_irqsave(&pool_lock, flags);
  93. hlist_add_head(&new->node, &obj_pool);
  94. debug_objects_allocated++;
  95. obj_pool_free++;
  96. raw_spin_unlock_irqrestore(&pool_lock, flags);
  97. }
  98. }
  99. /*
  100. * Lookup an object in the hash bucket.
  101. */
  102. static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
  103. {
  104. struct debug_obj *obj;
  105. int cnt = 0;
  106. hlist_for_each_entry(obj, &b->list, node) {
  107. cnt++;
  108. if (obj->object == addr)
  109. return obj;
  110. }
  111. if (cnt > debug_objects_maxchain)
  112. debug_objects_maxchain = cnt;
  113. return NULL;
  114. }
  115. /*
  116. * Allocate a new object. If the pool is empty, switch off the debugger.
  117. * Must be called with interrupts disabled.
  118. */
  119. static struct debug_obj *
  120. alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
  121. {
  122. struct debug_obj *obj = NULL;
  123. raw_spin_lock(&pool_lock);
  124. if (obj_pool.first) {
  125. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  126. obj->object = addr;
  127. obj->descr = descr;
  128. obj->state = ODEBUG_STATE_NONE;
  129. obj->astate = 0;
  130. hlist_del(&obj->node);
  131. hlist_add_head(&obj->node, &b->list);
  132. obj_pool_used++;
  133. if (obj_pool_used > obj_pool_max_used)
  134. obj_pool_max_used = obj_pool_used;
  135. obj_pool_free--;
  136. if (obj_pool_free < obj_pool_min_free)
  137. obj_pool_min_free = obj_pool_free;
  138. }
  139. raw_spin_unlock(&pool_lock);
  140. return obj;
  141. }
  142. /*
  143. * workqueue function to free objects.
  144. *
  145. * To reduce contention on the global pool_lock, the actual freeing of
  146. * debug objects will be delayed if the pool_lock is busy. We also free
  147. * the objects in a batch of 4 for each lock/unlock cycle.
  148. */
  149. #define ODEBUG_FREE_BATCH 4
  150. static void free_obj_work(struct work_struct *work)
  151. {
  152. struct debug_obj *objs[ODEBUG_FREE_BATCH];
  153. unsigned long flags;
  154. int i;
  155. if (!raw_spin_trylock_irqsave(&pool_lock, flags))
  156. return;
  157. while (obj_pool_free >= debug_objects_pool_size + ODEBUG_FREE_BATCH) {
  158. for (i = 0; i < ODEBUG_FREE_BATCH; i++) {
  159. objs[i] = hlist_entry(obj_pool.first,
  160. typeof(*objs[0]), node);
  161. hlist_del(&objs[i]->node);
  162. }
  163. obj_pool_free -= ODEBUG_FREE_BATCH;
  164. debug_objects_freed += ODEBUG_FREE_BATCH;
  165. /*
  166. * We release pool_lock across kmem_cache_free() to
  167. * avoid contention on pool_lock.
  168. */
  169. raw_spin_unlock_irqrestore(&pool_lock, flags);
  170. for (i = 0; i < ODEBUG_FREE_BATCH; i++)
  171. kmem_cache_free(obj_cache, objs[i]);
  172. if (!raw_spin_trylock_irqsave(&pool_lock, flags))
  173. return;
  174. }
  175. raw_spin_unlock_irqrestore(&pool_lock, flags);
  176. }
  177. /*
  178. * Put the object back into the pool and schedule work to free objects
  179. * if necessary.
  180. */
  181. static void free_object(struct debug_obj *obj)
  182. {
  183. unsigned long flags;
  184. int sched = 0;
  185. raw_spin_lock_irqsave(&pool_lock, flags);
  186. /*
  187. * schedule work when the pool is filled and the cache is
  188. * initialized:
  189. */
  190. if (obj_pool_free > debug_objects_pool_size && obj_cache)
  191. sched = 1;
  192. hlist_add_head(&obj->node, &obj_pool);
  193. obj_pool_free++;
  194. obj_pool_used--;
  195. raw_spin_unlock_irqrestore(&pool_lock, flags);
  196. if (sched)
  197. schedule_work(&debug_obj_work);
  198. }
  199. /*
  200. * We run out of memory. That means we probably have tons of objects
  201. * allocated.
  202. */
  203. static void debug_objects_oom(void)
  204. {
  205. struct debug_bucket *db = obj_hash;
  206. struct hlist_node *tmp;
  207. HLIST_HEAD(freelist);
  208. struct debug_obj *obj;
  209. unsigned long flags;
  210. int i;
  211. pr_warn("Out of memory. ODEBUG disabled\n");
  212. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  213. raw_spin_lock_irqsave(&db->lock, flags);
  214. hlist_move_list(&db->list, &freelist);
  215. raw_spin_unlock_irqrestore(&db->lock, flags);
  216. /* Now free them */
  217. hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
  218. hlist_del(&obj->node);
  219. free_object(obj);
  220. }
  221. }
  222. }
  223. /*
  224. * We use the pfn of the address for the hash. That way we can check
  225. * for freed objects simply by checking the affected bucket.
  226. */
  227. static struct debug_bucket *get_bucket(unsigned long addr)
  228. {
  229. unsigned long hash;
  230. hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
  231. return &obj_hash[hash];
  232. }
  233. static void debug_print_object(struct debug_obj *obj, char *msg)
  234. {
  235. struct debug_obj_descr *descr = obj->descr;
  236. static int limit;
  237. if (limit < 5 && descr != descr_test) {
  238. void *hint = descr->debug_hint ?
  239. descr->debug_hint(obj->object) : NULL;
  240. limit++;
  241. WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
  242. "object type: %s hint: %pS\n",
  243. msg, obj_states[obj->state], obj->astate,
  244. descr->name, hint);
  245. }
  246. debug_objects_warnings++;
  247. }
  248. /*
  249. * Try to repair the damage, so we have a better chance to get useful
  250. * debug output.
  251. */
  252. static bool
  253. debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
  254. void * addr, enum debug_obj_state state)
  255. {
  256. if (fixup && fixup(addr, state)) {
  257. debug_objects_fixups++;
  258. return true;
  259. }
  260. return false;
  261. }
  262. static void debug_object_is_on_stack(void *addr, int onstack)
  263. {
  264. int is_on_stack;
  265. static int limit;
  266. if (limit > 4)
  267. return;
  268. is_on_stack = object_is_on_stack(addr);
  269. if (is_on_stack == onstack)
  270. return;
  271. limit++;
  272. if (is_on_stack)
  273. pr_warn("object is on stack, but not annotated\n");
  274. else
  275. pr_warn("object is not on stack, but annotated\n");
  276. WARN_ON(1);
  277. }
  278. static void
  279. __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
  280. {
  281. enum debug_obj_state state;
  282. struct debug_bucket *db;
  283. struct debug_obj *obj;
  284. unsigned long flags;
  285. fill_pool();
  286. db = get_bucket((unsigned long) addr);
  287. raw_spin_lock_irqsave(&db->lock, flags);
  288. obj = lookup_object(addr, db);
  289. if (!obj) {
  290. obj = alloc_object(addr, db, descr);
  291. if (!obj) {
  292. debug_objects_enabled = 0;
  293. raw_spin_unlock_irqrestore(&db->lock, flags);
  294. debug_objects_oom();
  295. return;
  296. }
  297. debug_object_is_on_stack(addr, onstack);
  298. }
  299. switch (obj->state) {
  300. case ODEBUG_STATE_NONE:
  301. case ODEBUG_STATE_INIT:
  302. case ODEBUG_STATE_INACTIVE:
  303. obj->state = ODEBUG_STATE_INIT;
  304. break;
  305. case ODEBUG_STATE_ACTIVE:
  306. debug_print_object(obj, "init");
  307. state = obj->state;
  308. raw_spin_unlock_irqrestore(&db->lock, flags);
  309. debug_object_fixup(descr->fixup_init, addr, state);
  310. return;
  311. case ODEBUG_STATE_DESTROYED:
  312. debug_print_object(obj, "init");
  313. break;
  314. default:
  315. break;
  316. }
  317. raw_spin_unlock_irqrestore(&db->lock, flags);
  318. }
  319. /**
  320. * debug_object_init - debug checks when an object is initialized
  321. * @addr: address of the object
  322. * @descr: pointer to an object specific debug description structure
  323. */
  324. void debug_object_init(void *addr, struct debug_obj_descr *descr)
  325. {
  326. if (!debug_objects_enabled)
  327. return;
  328. __debug_object_init(addr, descr, 0);
  329. }
  330. EXPORT_SYMBOL_GPL(debug_object_init);
  331. /**
  332. * debug_object_init_on_stack - debug checks when an object on stack is
  333. * initialized
  334. * @addr: address of the object
  335. * @descr: pointer to an object specific debug description structure
  336. */
  337. void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
  338. {
  339. if (!debug_objects_enabled)
  340. return;
  341. __debug_object_init(addr, descr, 1);
  342. }
  343. EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
  344. /**
  345. * debug_object_activate - debug checks when an object is activated
  346. * @addr: address of the object
  347. * @descr: pointer to an object specific debug description structure
  348. * Returns 0 for success, -EINVAL for check failed.
  349. */
  350. int debug_object_activate(void *addr, struct debug_obj_descr *descr)
  351. {
  352. enum debug_obj_state state;
  353. struct debug_bucket *db;
  354. struct debug_obj *obj;
  355. unsigned long flags;
  356. int ret;
  357. struct debug_obj o = { .object = addr,
  358. .state = ODEBUG_STATE_NOTAVAILABLE,
  359. .descr = descr };
  360. if (!debug_objects_enabled)
  361. return 0;
  362. db = get_bucket((unsigned long) addr);
  363. raw_spin_lock_irqsave(&db->lock, flags);
  364. obj = lookup_object(addr, db);
  365. if (obj) {
  366. switch (obj->state) {
  367. case ODEBUG_STATE_INIT:
  368. case ODEBUG_STATE_INACTIVE:
  369. obj->state = ODEBUG_STATE_ACTIVE;
  370. ret = 0;
  371. break;
  372. case ODEBUG_STATE_ACTIVE:
  373. debug_print_object(obj, "activate");
  374. state = obj->state;
  375. raw_spin_unlock_irqrestore(&db->lock, flags);
  376. ret = debug_object_fixup(descr->fixup_activate, addr, state);
  377. return ret ? 0 : -EINVAL;
  378. case ODEBUG_STATE_DESTROYED:
  379. debug_print_object(obj, "activate");
  380. ret = -EINVAL;
  381. break;
  382. default:
  383. ret = 0;
  384. break;
  385. }
  386. raw_spin_unlock_irqrestore(&db->lock, flags);
  387. return ret;
  388. }
  389. raw_spin_unlock_irqrestore(&db->lock, flags);
  390. /*
  391. * We are here when a static object is activated. We
  392. * let the type specific code confirm whether this is
  393. * true or not. if true, we just make sure that the
  394. * static object is tracked in the object tracker. If
  395. * not, this must be a bug, so we try to fix it up.
  396. */
  397. if (descr->is_static_object && descr->is_static_object(addr)) {
  398. /* track this static object */
  399. debug_object_init(addr, descr);
  400. debug_object_activate(addr, descr);
  401. } else {
  402. debug_print_object(&o, "activate");
  403. ret = debug_object_fixup(descr->fixup_activate, addr,
  404. ODEBUG_STATE_NOTAVAILABLE);
  405. return ret ? 0 : -EINVAL;
  406. }
  407. return 0;
  408. }
  409. EXPORT_SYMBOL_GPL(debug_object_activate);
  410. /**
  411. * debug_object_deactivate - debug checks when an object is deactivated
  412. * @addr: address of the object
  413. * @descr: pointer to an object specific debug description structure
  414. */
  415. void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
  416. {
  417. struct debug_bucket *db;
  418. struct debug_obj *obj;
  419. unsigned long flags;
  420. if (!debug_objects_enabled)
  421. return;
  422. db = get_bucket((unsigned long) addr);
  423. raw_spin_lock_irqsave(&db->lock, flags);
  424. obj = lookup_object(addr, db);
  425. if (obj) {
  426. switch (obj->state) {
  427. case ODEBUG_STATE_INIT:
  428. case ODEBUG_STATE_INACTIVE:
  429. case ODEBUG_STATE_ACTIVE:
  430. if (!obj->astate)
  431. obj->state = ODEBUG_STATE_INACTIVE;
  432. else
  433. debug_print_object(obj, "deactivate");
  434. break;
  435. case ODEBUG_STATE_DESTROYED:
  436. debug_print_object(obj, "deactivate");
  437. break;
  438. default:
  439. break;
  440. }
  441. } else {
  442. struct debug_obj o = { .object = addr,
  443. .state = ODEBUG_STATE_NOTAVAILABLE,
  444. .descr = descr };
  445. debug_print_object(&o, "deactivate");
  446. }
  447. raw_spin_unlock_irqrestore(&db->lock, flags);
  448. }
  449. EXPORT_SYMBOL_GPL(debug_object_deactivate);
  450. /**
  451. * debug_object_destroy - debug checks when an object is destroyed
  452. * @addr: address of the object
  453. * @descr: pointer to an object specific debug description structure
  454. */
  455. void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
  456. {
  457. enum debug_obj_state state;
  458. struct debug_bucket *db;
  459. struct debug_obj *obj;
  460. unsigned long flags;
  461. if (!debug_objects_enabled)
  462. return;
  463. db = get_bucket((unsigned long) addr);
  464. raw_spin_lock_irqsave(&db->lock, flags);
  465. obj = lookup_object(addr, db);
  466. if (!obj)
  467. goto out_unlock;
  468. switch (obj->state) {
  469. case ODEBUG_STATE_NONE:
  470. case ODEBUG_STATE_INIT:
  471. case ODEBUG_STATE_INACTIVE:
  472. obj->state = ODEBUG_STATE_DESTROYED;
  473. break;
  474. case ODEBUG_STATE_ACTIVE:
  475. debug_print_object(obj, "destroy");
  476. state = obj->state;
  477. raw_spin_unlock_irqrestore(&db->lock, flags);
  478. debug_object_fixup(descr->fixup_destroy, addr, state);
  479. return;
  480. case ODEBUG_STATE_DESTROYED:
  481. debug_print_object(obj, "destroy");
  482. break;
  483. default:
  484. break;
  485. }
  486. out_unlock:
  487. raw_spin_unlock_irqrestore(&db->lock, flags);
  488. }
  489. EXPORT_SYMBOL_GPL(debug_object_destroy);
  490. /**
  491. * debug_object_free - debug checks when an object is freed
  492. * @addr: address of the object
  493. * @descr: pointer to an object specific debug description structure
  494. */
  495. void debug_object_free(void *addr, struct debug_obj_descr *descr)
  496. {
  497. enum debug_obj_state state;
  498. struct debug_bucket *db;
  499. struct debug_obj *obj;
  500. unsigned long flags;
  501. if (!debug_objects_enabled)
  502. return;
  503. db = get_bucket((unsigned long) addr);
  504. raw_spin_lock_irqsave(&db->lock, flags);
  505. obj = lookup_object(addr, db);
  506. if (!obj)
  507. goto out_unlock;
  508. switch (obj->state) {
  509. case ODEBUG_STATE_ACTIVE:
  510. debug_print_object(obj, "free");
  511. state = obj->state;
  512. raw_spin_unlock_irqrestore(&db->lock, flags);
  513. debug_object_fixup(descr->fixup_free, addr, state);
  514. return;
  515. default:
  516. hlist_del(&obj->node);
  517. raw_spin_unlock_irqrestore(&db->lock, flags);
  518. free_object(obj);
  519. return;
  520. }
  521. out_unlock:
  522. raw_spin_unlock_irqrestore(&db->lock, flags);
  523. }
  524. EXPORT_SYMBOL_GPL(debug_object_free);
  525. /**
  526. * debug_object_assert_init - debug checks when object should be init-ed
  527. * @addr: address of the object
  528. * @descr: pointer to an object specific debug description structure
  529. */
  530. void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
  531. {
  532. struct debug_bucket *db;
  533. struct debug_obj *obj;
  534. unsigned long flags;
  535. if (!debug_objects_enabled)
  536. return;
  537. db = get_bucket((unsigned long) addr);
  538. raw_spin_lock_irqsave(&db->lock, flags);
  539. obj = lookup_object(addr, db);
  540. if (!obj) {
  541. struct debug_obj o = { .object = addr,
  542. .state = ODEBUG_STATE_NOTAVAILABLE,
  543. .descr = descr };
  544. raw_spin_unlock_irqrestore(&db->lock, flags);
  545. /*
  546. * Maybe the object is static, and we let the type specific
  547. * code confirm. Track this static object if true, else invoke
  548. * fixup.
  549. */
  550. if (descr->is_static_object && descr->is_static_object(addr)) {
  551. /* Track this static object */
  552. debug_object_init(addr, descr);
  553. } else {
  554. debug_print_object(&o, "assert_init");
  555. debug_object_fixup(descr->fixup_assert_init, addr,
  556. ODEBUG_STATE_NOTAVAILABLE);
  557. }
  558. return;
  559. }
  560. raw_spin_unlock_irqrestore(&db->lock, flags);
  561. }
  562. EXPORT_SYMBOL_GPL(debug_object_assert_init);
  563. /**
  564. * debug_object_active_state - debug checks object usage state machine
  565. * @addr: address of the object
  566. * @descr: pointer to an object specific debug description structure
  567. * @expect: expected state
  568. * @next: state to move to if expected state is found
  569. */
  570. void
  571. debug_object_active_state(void *addr, struct debug_obj_descr *descr,
  572. unsigned int expect, unsigned int next)
  573. {
  574. struct debug_bucket *db;
  575. struct debug_obj *obj;
  576. unsigned long flags;
  577. if (!debug_objects_enabled)
  578. return;
  579. db = get_bucket((unsigned long) addr);
  580. raw_spin_lock_irqsave(&db->lock, flags);
  581. obj = lookup_object(addr, db);
  582. if (obj) {
  583. switch (obj->state) {
  584. case ODEBUG_STATE_ACTIVE:
  585. if (obj->astate == expect)
  586. obj->astate = next;
  587. else
  588. debug_print_object(obj, "active_state");
  589. break;
  590. default:
  591. debug_print_object(obj, "active_state");
  592. break;
  593. }
  594. } else {
  595. struct debug_obj o = { .object = addr,
  596. .state = ODEBUG_STATE_NOTAVAILABLE,
  597. .descr = descr };
  598. debug_print_object(&o, "active_state");
  599. }
  600. raw_spin_unlock_irqrestore(&db->lock, flags);
  601. }
  602. EXPORT_SYMBOL_GPL(debug_object_active_state);
  603. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  604. static void __debug_check_no_obj_freed(const void *address, unsigned long size)
  605. {
  606. unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
  607. struct hlist_node *tmp;
  608. HLIST_HEAD(freelist);
  609. struct debug_obj_descr *descr;
  610. enum debug_obj_state state;
  611. struct debug_bucket *db;
  612. struct debug_obj *obj;
  613. int cnt, objs_checked = 0;
  614. saddr = (unsigned long) address;
  615. eaddr = saddr + size;
  616. paddr = saddr & ODEBUG_CHUNK_MASK;
  617. chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
  618. chunks >>= ODEBUG_CHUNK_SHIFT;
  619. for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
  620. db = get_bucket(paddr);
  621. repeat:
  622. cnt = 0;
  623. raw_spin_lock_irqsave(&db->lock, flags);
  624. hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
  625. cnt++;
  626. oaddr = (unsigned long) obj->object;
  627. if (oaddr < saddr || oaddr >= eaddr)
  628. continue;
  629. switch (obj->state) {
  630. case ODEBUG_STATE_ACTIVE:
  631. debug_print_object(obj, "free");
  632. descr = obj->descr;
  633. state = obj->state;
  634. raw_spin_unlock_irqrestore(&db->lock, flags);
  635. debug_object_fixup(descr->fixup_free,
  636. (void *) oaddr, state);
  637. goto repeat;
  638. default:
  639. hlist_del(&obj->node);
  640. hlist_add_head(&obj->node, &freelist);
  641. break;
  642. }
  643. }
  644. raw_spin_unlock_irqrestore(&db->lock, flags);
  645. /* Now free them */
  646. hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
  647. hlist_del(&obj->node);
  648. free_object(obj);
  649. }
  650. if (cnt > debug_objects_maxchain)
  651. debug_objects_maxchain = cnt;
  652. objs_checked += cnt;
  653. }
  654. if (objs_checked > debug_objects_maxchecked)
  655. debug_objects_maxchecked = objs_checked;
  656. }
  657. void debug_check_no_obj_freed(const void *address, unsigned long size)
  658. {
  659. if (debug_objects_enabled)
  660. __debug_check_no_obj_freed(address, size);
  661. }
  662. #endif
  663. #ifdef CONFIG_DEBUG_FS
  664. static int debug_stats_show(struct seq_file *m, void *v)
  665. {
  666. seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
  667. seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
  668. seq_printf(m, "warnings :%d\n", debug_objects_warnings);
  669. seq_printf(m, "fixups :%d\n", debug_objects_fixups);
  670. seq_printf(m, "pool_free :%d\n", obj_pool_free);
  671. seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
  672. seq_printf(m, "pool_used :%d\n", obj_pool_used);
  673. seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
  674. seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
  675. seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
  676. return 0;
  677. }
  678. static int debug_stats_open(struct inode *inode, struct file *filp)
  679. {
  680. return single_open(filp, debug_stats_show, NULL);
  681. }
  682. static const struct file_operations debug_stats_fops = {
  683. .open = debug_stats_open,
  684. .read = seq_read,
  685. .llseek = seq_lseek,
  686. .release = single_release,
  687. };
  688. static int __init debug_objects_init_debugfs(void)
  689. {
  690. struct dentry *dbgdir, *dbgstats;
  691. if (!debug_objects_enabled)
  692. return 0;
  693. dbgdir = debugfs_create_dir("debug_objects", NULL);
  694. if (!dbgdir)
  695. return -ENOMEM;
  696. dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
  697. &debug_stats_fops);
  698. if (!dbgstats)
  699. goto err;
  700. return 0;
  701. err:
  702. debugfs_remove(dbgdir);
  703. return -ENOMEM;
  704. }
  705. __initcall(debug_objects_init_debugfs);
  706. #else
  707. static inline void debug_objects_init_debugfs(void) { }
  708. #endif
  709. #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
  710. /* Random data structure for the self test */
  711. struct self_test {
  712. unsigned long dummy1[6];
  713. int static_init;
  714. unsigned long dummy2[3];
  715. };
  716. static __initdata struct debug_obj_descr descr_type_test;
  717. static bool __init is_static_object(void *addr)
  718. {
  719. struct self_test *obj = addr;
  720. return obj->static_init;
  721. }
  722. /*
  723. * fixup_init is called when:
  724. * - an active object is initialized
  725. */
  726. static bool __init fixup_init(void *addr, enum debug_obj_state state)
  727. {
  728. struct self_test *obj = addr;
  729. switch (state) {
  730. case ODEBUG_STATE_ACTIVE:
  731. debug_object_deactivate(obj, &descr_type_test);
  732. debug_object_init(obj, &descr_type_test);
  733. return true;
  734. default:
  735. return false;
  736. }
  737. }
  738. /*
  739. * fixup_activate is called when:
  740. * - an active object is activated
  741. * - an unknown non-static object is activated
  742. */
  743. static bool __init fixup_activate(void *addr, enum debug_obj_state state)
  744. {
  745. struct self_test *obj = addr;
  746. switch (state) {
  747. case ODEBUG_STATE_NOTAVAILABLE:
  748. return true;
  749. case ODEBUG_STATE_ACTIVE:
  750. debug_object_deactivate(obj, &descr_type_test);
  751. debug_object_activate(obj, &descr_type_test);
  752. return true;
  753. default:
  754. return false;
  755. }
  756. }
  757. /*
  758. * fixup_destroy is called when:
  759. * - an active object is destroyed
  760. */
  761. static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
  762. {
  763. struct self_test *obj = addr;
  764. switch (state) {
  765. case ODEBUG_STATE_ACTIVE:
  766. debug_object_deactivate(obj, &descr_type_test);
  767. debug_object_destroy(obj, &descr_type_test);
  768. return true;
  769. default:
  770. return false;
  771. }
  772. }
  773. /*
  774. * fixup_free is called when:
  775. * - an active object is freed
  776. */
  777. static bool __init fixup_free(void *addr, enum debug_obj_state state)
  778. {
  779. struct self_test *obj = addr;
  780. switch (state) {
  781. case ODEBUG_STATE_ACTIVE:
  782. debug_object_deactivate(obj, &descr_type_test);
  783. debug_object_free(obj, &descr_type_test);
  784. return true;
  785. default:
  786. return false;
  787. }
  788. }
  789. static int __init
  790. check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
  791. {
  792. struct debug_bucket *db;
  793. struct debug_obj *obj;
  794. unsigned long flags;
  795. int res = -EINVAL;
  796. db = get_bucket((unsigned long) addr);
  797. raw_spin_lock_irqsave(&db->lock, flags);
  798. obj = lookup_object(addr, db);
  799. if (!obj && state != ODEBUG_STATE_NONE) {
  800. WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
  801. goto out;
  802. }
  803. if (obj && obj->state != state) {
  804. WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
  805. obj->state, state);
  806. goto out;
  807. }
  808. if (fixups != debug_objects_fixups) {
  809. WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
  810. fixups, debug_objects_fixups);
  811. goto out;
  812. }
  813. if (warnings != debug_objects_warnings) {
  814. WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
  815. warnings, debug_objects_warnings);
  816. goto out;
  817. }
  818. res = 0;
  819. out:
  820. raw_spin_unlock_irqrestore(&db->lock, flags);
  821. if (res)
  822. debug_objects_enabled = 0;
  823. return res;
  824. }
  825. static __initdata struct debug_obj_descr descr_type_test = {
  826. .name = "selftest",
  827. .is_static_object = is_static_object,
  828. .fixup_init = fixup_init,
  829. .fixup_activate = fixup_activate,
  830. .fixup_destroy = fixup_destroy,
  831. .fixup_free = fixup_free,
  832. };
  833. static __initdata struct self_test obj = { .static_init = 0 };
  834. static void __init debug_objects_selftest(void)
  835. {
  836. int fixups, oldfixups, warnings, oldwarnings;
  837. unsigned long flags;
  838. local_irq_save(flags);
  839. fixups = oldfixups = debug_objects_fixups;
  840. warnings = oldwarnings = debug_objects_warnings;
  841. descr_test = &descr_type_test;
  842. debug_object_init(&obj, &descr_type_test);
  843. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  844. goto out;
  845. debug_object_activate(&obj, &descr_type_test);
  846. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  847. goto out;
  848. debug_object_activate(&obj, &descr_type_test);
  849. if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
  850. goto out;
  851. debug_object_deactivate(&obj, &descr_type_test);
  852. if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
  853. goto out;
  854. debug_object_destroy(&obj, &descr_type_test);
  855. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
  856. goto out;
  857. debug_object_init(&obj, &descr_type_test);
  858. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  859. goto out;
  860. debug_object_activate(&obj, &descr_type_test);
  861. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  862. goto out;
  863. debug_object_deactivate(&obj, &descr_type_test);
  864. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  865. goto out;
  866. debug_object_free(&obj, &descr_type_test);
  867. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  868. goto out;
  869. obj.static_init = 1;
  870. debug_object_activate(&obj, &descr_type_test);
  871. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  872. goto out;
  873. debug_object_init(&obj, &descr_type_test);
  874. if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
  875. goto out;
  876. debug_object_free(&obj, &descr_type_test);
  877. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  878. goto out;
  879. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  880. debug_object_init(&obj, &descr_type_test);
  881. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  882. goto out;
  883. debug_object_activate(&obj, &descr_type_test);
  884. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  885. goto out;
  886. __debug_check_no_obj_freed(&obj, sizeof(obj));
  887. if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
  888. goto out;
  889. #endif
  890. pr_info("selftest passed\n");
  891. out:
  892. debug_objects_fixups = oldfixups;
  893. debug_objects_warnings = oldwarnings;
  894. descr_test = NULL;
  895. local_irq_restore(flags);
  896. }
  897. #else
  898. static inline void debug_objects_selftest(void) { }
  899. #endif
  900. /*
  901. * Called during early boot to initialize the hash buckets and link
  902. * the static object pool objects into the poll list. After this call
  903. * the object tracker is fully operational.
  904. */
  905. void __init debug_objects_early_init(void)
  906. {
  907. int i;
  908. for (i = 0; i < ODEBUG_HASH_SIZE; i++)
  909. raw_spin_lock_init(&obj_hash[i].lock);
  910. for (i = 0; i < ODEBUG_POOL_SIZE; i++)
  911. hlist_add_head(&obj_static_pool[i].node, &obj_pool);
  912. }
  913. /*
  914. * Convert the statically allocated objects to dynamic ones:
  915. */
  916. static int __init debug_objects_replace_static_objects(void)
  917. {
  918. struct debug_bucket *db = obj_hash;
  919. struct hlist_node *tmp;
  920. struct debug_obj *obj, *new;
  921. HLIST_HEAD(objects);
  922. int i, cnt = 0;
  923. for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
  924. obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
  925. if (!obj)
  926. goto free;
  927. kmemleak_ignore(obj);
  928. hlist_add_head(&obj->node, &objects);
  929. }
  930. /*
  931. * When debug_objects_mem_init() is called we know that only
  932. * one CPU is up, so disabling interrupts is enough
  933. * protection. This avoids the lockdep hell of lock ordering.
  934. */
  935. local_irq_disable();
  936. /* Remove the statically allocated objects from the pool */
  937. hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
  938. hlist_del(&obj->node);
  939. /* Move the allocated objects to the pool */
  940. hlist_move_list(&objects, &obj_pool);
  941. /* Replace the active object references */
  942. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  943. hlist_move_list(&db->list, &objects);
  944. hlist_for_each_entry(obj, &objects, node) {
  945. new = hlist_entry(obj_pool.first, typeof(*obj), node);
  946. hlist_del(&new->node);
  947. /* copy object data */
  948. *new = *obj;
  949. hlist_add_head(&new->node, &db->list);
  950. cnt++;
  951. }
  952. }
  953. local_irq_enable();
  954. pr_debug("%d of %d active objects replaced\n",
  955. cnt, obj_pool_used);
  956. return 0;
  957. free:
  958. hlist_for_each_entry_safe(obj, tmp, &objects, node) {
  959. hlist_del(&obj->node);
  960. kmem_cache_free(obj_cache, obj);
  961. }
  962. return -ENOMEM;
  963. }
  964. /*
  965. * Called after the kmem_caches are functional to setup a dedicated
  966. * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
  967. * prevents that the debug code is called on kmem_cache_free() for the
  968. * debug tracker objects to avoid recursive calls.
  969. */
  970. void __init debug_objects_mem_init(void)
  971. {
  972. if (!debug_objects_enabled)
  973. return;
  974. obj_cache = kmem_cache_create("debug_objects_cache",
  975. sizeof (struct debug_obj), 0,
  976. SLAB_DEBUG_OBJECTS, NULL);
  977. if (!obj_cache || debug_objects_replace_static_objects()) {
  978. debug_objects_enabled = 0;
  979. if (obj_cache)
  980. kmem_cache_destroy(obj_cache);
  981. pr_warn("out of memory.\n");
  982. } else
  983. debug_objects_selftest();
  984. /*
  985. * Increase the thresholds for allocating and freeing objects
  986. * according to the number of possible CPUs available in the system.
  987. */
  988. debug_objects_pool_size += num_possible_cpus() * 32;
  989. debug_objects_pool_min_level += num_possible_cpus() * 4;
  990. }