debugobjects.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139
  1. /*
  2. * Generic infrastructure for lifetime debugging of objects.
  3. *
  4. * Started by Thomas Gleixner
  5. *
  6. * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
  7. *
  8. * For licencing details see kernel-base/COPYING
  9. */
  10. #define pr_fmt(fmt) "ODEBUG: " fmt
  11. #include <linux/debugobjects.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/sched.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/debugfs.h>
  16. #include <linux/slab.h>
  17. #include <linux/hash.h>
  18. #define ODEBUG_HASH_BITS 14
  19. #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
  20. #define ODEBUG_POOL_SIZE 1024
  21. #define ODEBUG_POOL_MIN_LEVEL 256
  22. #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
  23. #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
  24. #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
  25. struct debug_bucket {
  26. struct hlist_head list;
  27. raw_spinlock_t lock;
  28. };
  29. static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
  30. static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
  31. static DEFINE_RAW_SPINLOCK(pool_lock);
  32. static HLIST_HEAD(obj_pool);
  33. static int obj_pool_min_free = ODEBUG_POOL_SIZE;
  34. static int obj_pool_free = ODEBUG_POOL_SIZE;
  35. static int obj_pool_used;
  36. static int obj_pool_max_used;
  37. static struct kmem_cache *obj_cache;
  38. static int debug_objects_maxchain __read_mostly;
  39. static int debug_objects_fixups __read_mostly;
  40. static int debug_objects_warnings __read_mostly;
  41. static int debug_objects_enabled __read_mostly
  42. = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
  43. static int debug_objects_pool_size __read_mostly
  44. = ODEBUG_POOL_SIZE;
  45. static int debug_objects_pool_min_level __read_mostly
  46. = ODEBUG_POOL_MIN_LEVEL;
  47. static struct debug_obj_descr *descr_test __read_mostly;
  48. /*
  49. * Track numbers of kmem_cache_alloc and kmem_cache_free done.
  50. */
  51. static int debug_objects_alloc;
  52. static int debug_objects_freed;
  53. static void free_obj_work(struct work_struct *work);
  54. static DECLARE_WORK(debug_obj_work, free_obj_work);
  55. static int __init enable_object_debug(char *str)
  56. {
  57. debug_objects_enabled = 1;
  58. return 0;
  59. }
  60. static int __init disable_object_debug(char *str)
  61. {
  62. debug_objects_enabled = 0;
  63. return 0;
  64. }
  65. early_param("debug_objects", enable_object_debug);
  66. early_param("no_debug_objects", disable_object_debug);
  67. static const char *obj_states[ODEBUG_STATE_MAX] = {
  68. [ODEBUG_STATE_NONE] = "none",
  69. [ODEBUG_STATE_INIT] = "initialized",
  70. [ODEBUG_STATE_INACTIVE] = "inactive",
  71. [ODEBUG_STATE_ACTIVE] = "active",
  72. [ODEBUG_STATE_DESTROYED] = "destroyed",
  73. [ODEBUG_STATE_NOTAVAILABLE] = "not available",
  74. };
  75. static void fill_pool(void)
  76. {
  77. gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
  78. struct debug_obj *new;
  79. unsigned long flags;
  80. if (likely(obj_pool_free >= debug_objects_pool_min_level))
  81. return;
  82. if (unlikely(!obj_cache))
  83. return;
  84. while (obj_pool_free < debug_objects_pool_min_level) {
  85. new = kmem_cache_zalloc(obj_cache, gfp);
  86. if (!new)
  87. return;
  88. raw_spin_lock_irqsave(&pool_lock, flags);
  89. hlist_add_head(&new->node, &obj_pool);
  90. debug_objects_alloc++;
  91. obj_pool_free++;
  92. raw_spin_unlock_irqrestore(&pool_lock, flags);
  93. }
  94. }
  95. /*
  96. * Lookup an object in the hash bucket.
  97. */
  98. static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
  99. {
  100. struct debug_obj *obj;
  101. int cnt = 0;
  102. hlist_for_each_entry(obj, &b->list, node) {
  103. cnt++;
  104. if (obj->object == addr)
  105. return obj;
  106. }
  107. if (cnt > debug_objects_maxchain)
  108. debug_objects_maxchain = cnt;
  109. return NULL;
  110. }
  111. /*
  112. * Allocate a new object. If the pool is empty, switch off the debugger.
  113. * Must be called with interrupts disabled.
  114. */
  115. static struct debug_obj *
  116. alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
  117. {
  118. struct debug_obj *obj = NULL;
  119. raw_spin_lock(&pool_lock);
  120. if (obj_pool.first) {
  121. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  122. obj->object = addr;
  123. obj->descr = descr;
  124. obj->state = ODEBUG_STATE_NONE;
  125. obj->astate = 0;
  126. hlist_del(&obj->node);
  127. hlist_add_head(&obj->node, &b->list);
  128. obj_pool_used++;
  129. if (obj_pool_used > obj_pool_max_used)
  130. obj_pool_max_used = obj_pool_used;
  131. obj_pool_free--;
  132. if (obj_pool_free < obj_pool_min_free)
  133. obj_pool_min_free = obj_pool_free;
  134. }
  135. raw_spin_unlock(&pool_lock);
  136. return obj;
  137. }
  138. /*
  139. * workqueue function to free objects.
  140. */
  141. static void free_obj_work(struct work_struct *work)
  142. {
  143. struct debug_obj *obj;
  144. unsigned long flags;
  145. raw_spin_lock_irqsave(&pool_lock, flags);
  146. while (obj_pool_free > debug_objects_pool_size) {
  147. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  148. hlist_del(&obj->node);
  149. obj_pool_free--;
  150. debug_objects_freed++;
  151. /*
  152. * We release pool_lock across kmem_cache_free() to
  153. * avoid contention on pool_lock.
  154. */
  155. raw_spin_unlock_irqrestore(&pool_lock, flags);
  156. kmem_cache_free(obj_cache, obj);
  157. raw_spin_lock_irqsave(&pool_lock, flags);
  158. }
  159. raw_spin_unlock_irqrestore(&pool_lock, flags);
  160. }
  161. /*
  162. * Put the object back into the pool and schedule work to free objects
  163. * if necessary.
  164. */
  165. static void free_object(struct debug_obj *obj)
  166. {
  167. unsigned long flags;
  168. int sched = 0;
  169. raw_spin_lock_irqsave(&pool_lock, flags);
  170. /*
  171. * schedule work when the pool is filled and the cache is
  172. * initialized:
  173. */
  174. if (obj_pool_free > debug_objects_pool_size && obj_cache)
  175. sched = 1;
  176. hlist_add_head(&obj->node, &obj_pool);
  177. obj_pool_free++;
  178. obj_pool_used--;
  179. raw_spin_unlock_irqrestore(&pool_lock, flags);
  180. if (sched)
  181. schedule_work(&debug_obj_work);
  182. }
  183. /*
  184. * We run out of memory. That means we probably have tons of objects
  185. * allocated.
  186. */
  187. static void debug_objects_oom(void)
  188. {
  189. struct debug_bucket *db = obj_hash;
  190. struct hlist_node *tmp;
  191. HLIST_HEAD(freelist);
  192. struct debug_obj *obj;
  193. unsigned long flags;
  194. int i;
  195. pr_warn("Out of memory. ODEBUG disabled\n");
  196. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  197. raw_spin_lock_irqsave(&db->lock, flags);
  198. hlist_move_list(&db->list, &freelist);
  199. raw_spin_unlock_irqrestore(&db->lock, flags);
  200. /* Now free them */
  201. hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
  202. hlist_del(&obj->node);
  203. free_object(obj);
  204. }
  205. }
  206. }
  207. /*
  208. * We use the pfn of the address for the hash. That way we can check
  209. * for freed objects simply by checking the affected bucket.
  210. */
  211. static struct debug_bucket *get_bucket(unsigned long addr)
  212. {
  213. unsigned long hash;
  214. hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
  215. return &obj_hash[hash];
  216. }
  217. static void debug_print_object(struct debug_obj *obj, char *msg)
  218. {
  219. struct debug_obj_descr *descr = obj->descr;
  220. static int limit;
  221. if (limit < 5 && descr != descr_test) {
  222. void *hint = descr->debug_hint ?
  223. descr->debug_hint(obj->object) : NULL;
  224. limit++;
  225. WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
  226. "object type: %s hint: %pS\n",
  227. msg, obj_states[obj->state], obj->astate,
  228. descr->name, hint);
  229. }
  230. debug_objects_warnings++;
  231. }
  232. /*
  233. * Try to repair the damage, so we have a better chance to get useful
  234. * debug output.
  235. */
  236. static bool
  237. debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
  238. void * addr, enum debug_obj_state state)
  239. {
  240. if (fixup && fixup(addr, state)) {
  241. debug_objects_fixups++;
  242. return true;
  243. }
  244. return false;
  245. }
  246. static void debug_object_is_on_stack(void *addr, int onstack)
  247. {
  248. int is_on_stack;
  249. static int limit;
  250. if (limit > 4)
  251. return;
  252. is_on_stack = object_is_on_stack(addr);
  253. if (is_on_stack == onstack)
  254. return;
  255. limit++;
  256. if (is_on_stack)
  257. pr_warn("object is on stack, but not annotated\n");
  258. else
  259. pr_warn("object is not on stack, but annotated\n");
  260. WARN_ON(1);
  261. }
  262. static void
  263. __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
  264. {
  265. enum debug_obj_state state;
  266. struct debug_bucket *db;
  267. struct debug_obj *obj;
  268. unsigned long flags;
  269. fill_pool();
  270. db = get_bucket((unsigned long) addr);
  271. raw_spin_lock_irqsave(&db->lock, flags);
  272. obj = lookup_object(addr, db);
  273. if (!obj) {
  274. obj = alloc_object(addr, db, descr);
  275. if (!obj) {
  276. debug_objects_enabled = 0;
  277. raw_spin_unlock_irqrestore(&db->lock, flags);
  278. debug_objects_oom();
  279. return;
  280. }
  281. debug_object_is_on_stack(addr, onstack);
  282. }
  283. switch (obj->state) {
  284. case ODEBUG_STATE_NONE:
  285. case ODEBUG_STATE_INIT:
  286. case ODEBUG_STATE_INACTIVE:
  287. obj->state = ODEBUG_STATE_INIT;
  288. break;
  289. case ODEBUG_STATE_ACTIVE:
  290. debug_print_object(obj, "init");
  291. state = obj->state;
  292. raw_spin_unlock_irqrestore(&db->lock, flags);
  293. debug_object_fixup(descr->fixup_init, addr, state);
  294. return;
  295. case ODEBUG_STATE_DESTROYED:
  296. debug_print_object(obj, "init");
  297. break;
  298. default:
  299. break;
  300. }
  301. raw_spin_unlock_irqrestore(&db->lock, flags);
  302. }
  303. /**
  304. * debug_object_init - debug checks when an object is initialized
  305. * @addr: address of the object
  306. * @descr: pointer to an object specific debug description structure
  307. */
  308. void debug_object_init(void *addr, struct debug_obj_descr *descr)
  309. {
  310. if (!debug_objects_enabled)
  311. return;
  312. __debug_object_init(addr, descr, 0);
  313. }
  314. EXPORT_SYMBOL_GPL(debug_object_init);
  315. /**
  316. * debug_object_init_on_stack - debug checks when an object on stack is
  317. * initialized
  318. * @addr: address of the object
  319. * @descr: pointer to an object specific debug description structure
  320. */
  321. void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
  322. {
  323. if (!debug_objects_enabled)
  324. return;
  325. __debug_object_init(addr, descr, 1);
  326. }
  327. EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
  328. /**
  329. * debug_object_activate - debug checks when an object is activated
  330. * @addr: address of the object
  331. * @descr: pointer to an object specific debug description structure
  332. * Returns 0 for success, -EINVAL for check failed.
  333. */
  334. int debug_object_activate(void *addr, struct debug_obj_descr *descr)
  335. {
  336. enum debug_obj_state state;
  337. struct debug_bucket *db;
  338. struct debug_obj *obj;
  339. unsigned long flags;
  340. int ret;
  341. struct debug_obj o = { .object = addr,
  342. .state = ODEBUG_STATE_NOTAVAILABLE,
  343. .descr = descr };
  344. if (!debug_objects_enabled)
  345. return 0;
  346. db = get_bucket((unsigned long) addr);
  347. raw_spin_lock_irqsave(&db->lock, flags);
  348. obj = lookup_object(addr, db);
  349. if (obj) {
  350. switch (obj->state) {
  351. case ODEBUG_STATE_INIT:
  352. case ODEBUG_STATE_INACTIVE:
  353. obj->state = ODEBUG_STATE_ACTIVE;
  354. ret = 0;
  355. break;
  356. case ODEBUG_STATE_ACTIVE:
  357. debug_print_object(obj, "activate");
  358. state = obj->state;
  359. raw_spin_unlock_irqrestore(&db->lock, flags);
  360. ret = debug_object_fixup(descr->fixup_activate, addr, state);
  361. return ret ? 0 : -EINVAL;
  362. case ODEBUG_STATE_DESTROYED:
  363. debug_print_object(obj, "activate");
  364. ret = -EINVAL;
  365. break;
  366. default:
  367. ret = 0;
  368. break;
  369. }
  370. raw_spin_unlock_irqrestore(&db->lock, flags);
  371. return ret;
  372. }
  373. raw_spin_unlock_irqrestore(&db->lock, flags);
  374. /*
  375. * We are here when a static object is activated. We
  376. * let the type specific code confirm whether this is
  377. * true or not. if true, we just make sure that the
  378. * static object is tracked in the object tracker. If
  379. * not, this must be a bug, so we try to fix it up.
  380. */
  381. if (descr->is_static_object && descr->is_static_object(addr)) {
  382. /* track this static object */
  383. debug_object_init(addr, descr);
  384. debug_object_activate(addr, descr);
  385. } else {
  386. debug_print_object(&o, "activate");
  387. ret = debug_object_fixup(descr->fixup_activate, addr,
  388. ODEBUG_STATE_NOTAVAILABLE);
  389. return ret ? 0 : -EINVAL;
  390. }
  391. return 0;
  392. }
  393. EXPORT_SYMBOL_GPL(debug_object_activate);
  394. /**
  395. * debug_object_deactivate - debug checks when an object is deactivated
  396. * @addr: address of the object
  397. * @descr: pointer to an object specific debug description structure
  398. */
  399. void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
  400. {
  401. struct debug_bucket *db;
  402. struct debug_obj *obj;
  403. unsigned long flags;
  404. if (!debug_objects_enabled)
  405. return;
  406. db = get_bucket((unsigned long) addr);
  407. raw_spin_lock_irqsave(&db->lock, flags);
  408. obj = lookup_object(addr, db);
  409. if (obj) {
  410. switch (obj->state) {
  411. case ODEBUG_STATE_INIT:
  412. case ODEBUG_STATE_INACTIVE:
  413. case ODEBUG_STATE_ACTIVE:
  414. if (!obj->astate)
  415. obj->state = ODEBUG_STATE_INACTIVE;
  416. else
  417. debug_print_object(obj, "deactivate");
  418. break;
  419. case ODEBUG_STATE_DESTROYED:
  420. debug_print_object(obj, "deactivate");
  421. break;
  422. default:
  423. break;
  424. }
  425. } else {
  426. struct debug_obj o = { .object = addr,
  427. .state = ODEBUG_STATE_NOTAVAILABLE,
  428. .descr = descr };
  429. debug_print_object(&o, "deactivate");
  430. }
  431. raw_spin_unlock_irqrestore(&db->lock, flags);
  432. }
  433. EXPORT_SYMBOL_GPL(debug_object_deactivate);
  434. /**
  435. * debug_object_destroy - debug checks when an object is destroyed
  436. * @addr: address of the object
  437. * @descr: pointer to an object specific debug description structure
  438. */
  439. void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
  440. {
  441. enum debug_obj_state state;
  442. struct debug_bucket *db;
  443. struct debug_obj *obj;
  444. unsigned long flags;
  445. if (!debug_objects_enabled)
  446. return;
  447. db = get_bucket((unsigned long) addr);
  448. raw_spin_lock_irqsave(&db->lock, flags);
  449. obj = lookup_object(addr, db);
  450. if (!obj)
  451. goto out_unlock;
  452. switch (obj->state) {
  453. case ODEBUG_STATE_NONE:
  454. case ODEBUG_STATE_INIT:
  455. case ODEBUG_STATE_INACTIVE:
  456. obj->state = ODEBUG_STATE_DESTROYED;
  457. break;
  458. case ODEBUG_STATE_ACTIVE:
  459. debug_print_object(obj, "destroy");
  460. state = obj->state;
  461. raw_spin_unlock_irqrestore(&db->lock, flags);
  462. debug_object_fixup(descr->fixup_destroy, addr, state);
  463. return;
  464. case ODEBUG_STATE_DESTROYED:
  465. debug_print_object(obj, "destroy");
  466. break;
  467. default:
  468. break;
  469. }
  470. out_unlock:
  471. raw_spin_unlock_irqrestore(&db->lock, flags);
  472. }
  473. EXPORT_SYMBOL_GPL(debug_object_destroy);
  474. /**
  475. * debug_object_free - debug checks when an object is freed
  476. * @addr: address of the object
  477. * @descr: pointer to an object specific debug description structure
  478. */
  479. void debug_object_free(void *addr, struct debug_obj_descr *descr)
  480. {
  481. enum debug_obj_state state;
  482. struct debug_bucket *db;
  483. struct debug_obj *obj;
  484. unsigned long flags;
  485. if (!debug_objects_enabled)
  486. return;
  487. db = get_bucket((unsigned long) addr);
  488. raw_spin_lock_irqsave(&db->lock, flags);
  489. obj = lookup_object(addr, db);
  490. if (!obj)
  491. goto out_unlock;
  492. switch (obj->state) {
  493. case ODEBUG_STATE_ACTIVE:
  494. debug_print_object(obj, "free");
  495. state = obj->state;
  496. raw_spin_unlock_irqrestore(&db->lock, flags);
  497. debug_object_fixup(descr->fixup_free, addr, state);
  498. return;
  499. default:
  500. hlist_del(&obj->node);
  501. raw_spin_unlock_irqrestore(&db->lock, flags);
  502. free_object(obj);
  503. return;
  504. }
  505. out_unlock:
  506. raw_spin_unlock_irqrestore(&db->lock, flags);
  507. }
  508. EXPORT_SYMBOL_GPL(debug_object_free);
  509. /**
  510. * debug_object_assert_init - debug checks when object should be init-ed
  511. * @addr: address of the object
  512. * @descr: pointer to an object specific debug description structure
  513. */
  514. void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
  515. {
  516. struct debug_bucket *db;
  517. struct debug_obj *obj;
  518. unsigned long flags;
  519. if (!debug_objects_enabled)
  520. return;
  521. db = get_bucket((unsigned long) addr);
  522. raw_spin_lock_irqsave(&db->lock, flags);
  523. obj = lookup_object(addr, db);
  524. if (!obj) {
  525. struct debug_obj o = { .object = addr,
  526. .state = ODEBUG_STATE_NOTAVAILABLE,
  527. .descr = descr };
  528. raw_spin_unlock_irqrestore(&db->lock, flags);
  529. /*
  530. * Maybe the object is static, and we let the type specific
  531. * code confirm. Track this static object if true, else invoke
  532. * fixup.
  533. */
  534. if (descr->is_static_object && descr->is_static_object(addr)) {
  535. /* Track this static object */
  536. debug_object_init(addr, descr);
  537. } else {
  538. debug_print_object(&o, "assert_init");
  539. debug_object_fixup(descr->fixup_assert_init, addr,
  540. ODEBUG_STATE_NOTAVAILABLE);
  541. }
  542. return;
  543. }
  544. raw_spin_unlock_irqrestore(&db->lock, flags);
  545. }
  546. EXPORT_SYMBOL_GPL(debug_object_assert_init);
  547. /**
  548. * debug_object_active_state - debug checks object usage state machine
  549. * @addr: address of the object
  550. * @descr: pointer to an object specific debug description structure
  551. * @expect: expected state
  552. * @next: state to move to if expected state is found
  553. */
  554. void
  555. debug_object_active_state(void *addr, struct debug_obj_descr *descr,
  556. unsigned int expect, unsigned int next)
  557. {
  558. struct debug_bucket *db;
  559. struct debug_obj *obj;
  560. unsigned long flags;
  561. if (!debug_objects_enabled)
  562. return;
  563. db = get_bucket((unsigned long) addr);
  564. raw_spin_lock_irqsave(&db->lock, flags);
  565. obj = lookup_object(addr, db);
  566. if (obj) {
  567. switch (obj->state) {
  568. case ODEBUG_STATE_ACTIVE:
  569. if (obj->astate == expect)
  570. obj->astate = next;
  571. else
  572. debug_print_object(obj, "active_state");
  573. break;
  574. default:
  575. debug_print_object(obj, "active_state");
  576. break;
  577. }
  578. } else {
  579. struct debug_obj o = { .object = addr,
  580. .state = ODEBUG_STATE_NOTAVAILABLE,
  581. .descr = descr };
  582. debug_print_object(&o, "active_state");
  583. }
  584. raw_spin_unlock_irqrestore(&db->lock, flags);
  585. }
  586. EXPORT_SYMBOL_GPL(debug_object_active_state);
  587. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  588. static void __debug_check_no_obj_freed(const void *address, unsigned long size)
  589. {
  590. unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
  591. struct hlist_node *tmp;
  592. HLIST_HEAD(freelist);
  593. struct debug_obj_descr *descr;
  594. enum debug_obj_state state;
  595. struct debug_bucket *db;
  596. struct debug_obj *obj;
  597. int cnt;
  598. saddr = (unsigned long) address;
  599. eaddr = saddr + size;
  600. paddr = saddr & ODEBUG_CHUNK_MASK;
  601. chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
  602. chunks >>= ODEBUG_CHUNK_SHIFT;
  603. for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
  604. db = get_bucket(paddr);
  605. repeat:
  606. cnt = 0;
  607. raw_spin_lock_irqsave(&db->lock, flags);
  608. hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
  609. cnt++;
  610. oaddr = (unsigned long) obj->object;
  611. if (oaddr < saddr || oaddr >= eaddr)
  612. continue;
  613. switch (obj->state) {
  614. case ODEBUG_STATE_ACTIVE:
  615. debug_print_object(obj, "free");
  616. descr = obj->descr;
  617. state = obj->state;
  618. raw_spin_unlock_irqrestore(&db->lock, flags);
  619. debug_object_fixup(descr->fixup_free,
  620. (void *) oaddr, state);
  621. goto repeat;
  622. default:
  623. hlist_del(&obj->node);
  624. hlist_add_head(&obj->node, &freelist);
  625. break;
  626. }
  627. }
  628. raw_spin_unlock_irqrestore(&db->lock, flags);
  629. /* Now free them */
  630. hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
  631. hlist_del(&obj->node);
  632. free_object(obj);
  633. }
  634. if (cnt > debug_objects_maxchain)
  635. debug_objects_maxchain = cnt;
  636. }
  637. }
  638. void debug_check_no_obj_freed(const void *address, unsigned long size)
  639. {
  640. if (debug_objects_enabled)
  641. __debug_check_no_obj_freed(address, size);
  642. }
  643. #endif
  644. #ifdef CONFIG_DEBUG_FS
  645. static int debug_stats_show(struct seq_file *m, void *v)
  646. {
  647. seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
  648. seq_printf(m, "warnings :%d\n", debug_objects_warnings);
  649. seq_printf(m, "fixups :%d\n", debug_objects_fixups);
  650. seq_printf(m, "pool_free :%d\n", obj_pool_free);
  651. seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
  652. seq_printf(m, "pool_used :%d\n", obj_pool_used);
  653. seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
  654. seq_printf(m, "objects_alloc :%d\n", debug_objects_alloc);
  655. seq_printf(m, "objects_freed :%d\n", debug_objects_freed);
  656. return 0;
  657. }
  658. static int debug_stats_open(struct inode *inode, struct file *filp)
  659. {
  660. return single_open(filp, debug_stats_show, NULL);
  661. }
  662. static const struct file_operations debug_stats_fops = {
  663. .open = debug_stats_open,
  664. .read = seq_read,
  665. .llseek = seq_lseek,
  666. .release = single_release,
  667. };
  668. static int __init debug_objects_init_debugfs(void)
  669. {
  670. struct dentry *dbgdir, *dbgstats;
  671. if (!debug_objects_enabled)
  672. return 0;
  673. dbgdir = debugfs_create_dir("debug_objects", NULL);
  674. if (!dbgdir)
  675. return -ENOMEM;
  676. dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
  677. &debug_stats_fops);
  678. if (!dbgstats)
  679. goto err;
  680. return 0;
  681. err:
  682. debugfs_remove(dbgdir);
  683. return -ENOMEM;
  684. }
  685. __initcall(debug_objects_init_debugfs);
  686. #else
  687. static inline void debug_objects_init_debugfs(void) { }
  688. #endif
  689. #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
  690. /* Random data structure for the self test */
  691. struct self_test {
  692. unsigned long dummy1[6];
  693. int static_init;
  694. unsigned long dummy2[3];
  695. };
  696. static __initdata struct debug_obj_descr descr_type_test;
  697. static bool __init is_static_object(void *addr)
  698. {
  699. struct self_test *obj = addr;
  700. return obj->static_init;
  701. }
  702. /*
  703. * fixup_init is called when:
  704. * - an active object is initialized
  705. */
  706. static bool __init fixup_init(void *addr, enum debug_obj_state state)
  707. {
  708. struct self_test *obj = addr;
  709. switch (state) {
  710. case ODEBUG_STATE_ACTIVE:
  711. debug_object_deactivate(obj, &descr_type_test);
  712. debug_object_init(obj, &descr_type_test);
  713. return true;
  714. default:
  715. return false;
  716. }
  717. }
  718. /*
  719. * fixup_activate is called when:
  720. * - an active object is activated
  721. * - an unknown non-static object is activated
  722. */
  723. static bool __init fixup_activate(void *addr, enum debug_obj_state state)
  724. {
  725. struct self_test *obj = addr;
  726. switch (state) {
  727. case ODEBUG_STATE_NOTAVAILABLE:
  728. return true;
  729. case ODEBUG_STATE_ACTIVE:
  730. debug_object_deactivate(obj, &descr_type_test);
  731. debug_object_activate(obj, &descr_type_test);
  732. return true;
  733. default:
  734. return false;
  735. }
  736. }
  737. /*
  738. * fixup_destroy is called when:
  739. * - an active object is destroyed
  740. */
  741. static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
  742. {
  743. struct self_test *obj = addr;
  744. switch (state) {
  745. case ODEBUG_STATE_ACTIVE:
  746. debug_object_deactivate(obj, &descr_type_test);
  747. debug_object_destroy(obj, &descr_type_test);
  748. return true;
  749. default:
  750. return false;
  751. }
  752. }
  753. /*
  754. * fixup_free is called when:
  755. * - an active object is freed
  756. */
  757. static bool __init fixup_free(void *addr, enum debug_obj_state state)
  758. {
  759. struct self_test *obj = addr;
  760. switch (state) {
  761. case ODEBUG_STATE_ACTIVE:
  762. debug_object_deactivate(obj, &descr_type_test);
  763. debug_object_free(obj, &descr_type_test);
  764. return true;
  765. default:
  766. return false;
  767. }
  768. }
  769. static int __init
  770. check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
  771. {
  772. struct debug_bucket *db;
  773. struct debug_obj *obj;
  774. unsigned long flags;
  775. int res = -EINVAL;
  776. db = get_bucket((unsigned long) addr);
  777. raw_spin_lock_irqsave(&db->lock, flags);
  778. obj = lookup_object(addr, db);
  779. if (!obj && state != ODEBUG_STATE_NONE) {
  780. WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
  781. goto out;
  782. }
  783. if (obj && obj->state != state) {
  784. WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
  785. obj->state, state);
  786. goto out;
  787. }
  788. if (fixups != debug_objects_fixups) {
  789. WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
  790. fixups, debug_objects_fixups);
  791. goto out;
  792. }
  793. if (warnings != debug_objects_warnings) {
  794. WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
  795. warnings, debug_objects_warnings);
  796. goto out;
  797. }
  798. res = 0;
  799. out:
  800. raw_spin_unlock_irqrestore(&db->lock, flags);
  801. if (res)
  802. debug_objects_enabled = 0;
  803. return res;
  804. }
  805. static __initdata struct debug_obj_descr descr_type_test = {
  806. .name = "selftest",
  807. .is_static_object = is_static_object,
  808. .fixup_init = fixup_init,
  809. .fixup_activate = fixup_activate,
  810. .fixup_destroy = fixup_destroy,
  811. .fixup_free = fixup_free,
  812. };
  813. static __initdata struct self_test obj = { .static_init = 0 };
  814. static void __init debug_objects_selftest(void)
  815. {
  816. int fixups, oldfixups, warnings, oldwarnings;
  817. unsigned long flags;
  818. local_irq_save(flags);
  819. fixups = oldfixups = debug_objects_fixups;
  820. warnings = oldwarnings = debug_objects_warnings;
  821. descr_test = &descr_type_test;
  822. debug_object_init(&obj, &descr_type_test);
  823. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  824. goto out;
  825. debug_object_activate(&obj, &descr_type_test);
  826. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  827. goto out;
  828. debug_object_activate(&obj, &descr_type_test);
  829. if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
  830. goto out;
  831. debug_object_deactivate(&obj, &descr_type_test);
  832. if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
  833. goto out;
  834. debug_object_destroy(&obj, &descr_type_test);
  835. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
  836. goto out;
  837. debug_object_init(&obj, &descr_type_test);
  838. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  839. goto out;
  840. debug_object_activate(&obj, &descr_type_test);
  841. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  842. goto out;
  843. debug_object_deactivate(&obj, &descr_type_test);
  844. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  845. goto out;
  846. debug_object_free(&obj, &descr_type_test);
  847. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  848. goto out;
  849. obj.static_init = 1;
  850. debug_object_activate(&obj, &descr_type_test);
  851. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  852. goto out;
  853. debug_object_init(&obj, &descr_type_test);
  854. if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
  855. goto out;
  856. debug_object_free(&obj, &descr_type_test);
  857. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  858. goto out;
  859. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  860. debug_object_init(&obj, &descr_type_test);
  861. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  862. goto out;
  863. debug_object_activate(&obj, &descr_type_test);
  864. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  865. goto out;
  866. __debug_check_no_obj_freed(&obj, sizeof(obj));
  867. if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
  868. goto out;
  869. #endif
  870. pr_info("selftest passed\n");
  871. out:
  872. debug_objects_fixups = oldfixups;
  873. debug_objects_warnings = oldwarnings;
  874. descr_test = NULL;
  875. local_irq_restore(flags);
  876. }
  877. #else
  878. static inline void debug_objects_selftest(void) { }
  879. #endif
  880. /*
  881. * Called during early boot to initialize the hash buckets and link
  882. * the static object pool objects into the poll list. After this call
  883. * the object tracker is fully operational.
  884. */
  885. void __init debug_objects_early_init(void)
  886. {
  887. int i;
  888. for (i = 0; i < ODEBUG_HASH_SIZE; i++)
  889. raw_spin_lock_init(&obj_hash[i].lock);
  890. for (i = 0; i < ODEBUG_POOL_SIZE; i++)
  891. hlist_add_head(&obj_static_pool[i].node, &obj_pool);
  892. }
  893. /*
  894. * Convert the statically allocated objects to dynamic ones:
  895. */
  896. static int __init debug_objects_replace_static_objects(void)
  897. {
  898. struct debug_bucket *db = obj_hash;
  899. struct hlist_node *tmp;
  900. struct debug_obj *obj, *new;
  901. HLIST_HEAD(objects);
  902. int i, cnt = 0;
  903. for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
  904. obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
  905. if (!obj)
  906. goto free;
  907. hlist_add_head(&obj->node, &objects);
  908. }
  909. /*
  910. * When debug_objects_mem_init() is called we know that only
  911. * one CPU is up, so disabling interrupts is enough
  912. * protection. This avoids the lockdep hell of lock ordering.
  913. */
  914. local_irq_disable();
  915. /* Remove the statically allocated objects from the pool */
  916. hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
  917. hlist_del(&obj->node);
  918. /* Move the allocated objects to the pool */
  919. hlist_move_list(&objects, &obj_pool);
  920. /* Replace the active object references */
  921. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  922. hlist_move_list(&db->list, &objects);
  923. hlist_for_each_entry(obj, &objects, node) {
  924. new = hlist_entry(obj_pool.first, typeof(*obj), node);
  925. hlist_del(&new->node);
  926. /* copy object data */
  927. *new = *obj;
  928. hlist_add_head(&new->node, &db->list);
  929. cnt++;
  930. }
  931. }
  932. local_irq_enable();
  933. pr_debug("%d of %d active objects replaced\n",
  934. cnt, obj_pool_used);
  935. return 0;
  936. free:
  937. hlist_for_each_entry_safe(obj, tmp, &objects, node) {
  938. hlist_del(&obj->node);
  939. kmem_cache_free(obj_cache, obj);
  940. }
  941. return -ENOMEM;
  942. }
  943. /*
  944. * Called after the kmem_caches are functional to setup a dedicated
  945. * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
  946. * prevents that the debug code is called on kmem_cache_free() for the
  947. * debug tracker objects to avoid recursive calls.
  948. */
  949. void __init debug_objects_mem_init(void)
  950. {
  951. if (!debug_objects_enabled)
  952. return;
  953. obj_cache = kmem_cache_create("debug_objects_cache",
  954. sizeof (struct debug_obj), 0,
  955. SLAB_DEBUG_OBJECTS, NULL);
  956. if (!obj_cache || debug_objects_replace_static_objects()) {
  957. debug_objects_enabled = 0;
  958. if (obj_cache)
  959. kmem_cache_destroy(obj_cache);
  960. pr_warn("out of memory.\n");
  961. } else
  962. debug_objects_selftest();
  963. /*
  964. * Increase the thresholds for allocating and freeing objects
  965. * according to the number of possible CPUs available in the system.
  966. */
  967. debug_objects_pool_size += num_possible_cpus() * 32;
  968. debug_objects_pool_min_level += num_possible_cpus() * 4;
  969. }