debugobjects.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094
  1. /*
  2. * Generic infrastructure for lifetime debugging of objects.
  3. *
  4. * Started by Thomas Gleixner
  5. *
  6. * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
  7. *
  8. * For licencing details see kernel-base/COPYING
  9. */
  10. #include <linux/debugobjects.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/sched.h>
  13. #include <linux/seq_file.h>
  14. #include <linux/debugfs.h>
  15. #include <linux/slab.h>
  16. #include <linux/hash.h>
  17. #define ODEBUG_HASH_BITS 14
  18. #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
  19. #define ODEBUG_POOL_SIZE 512
  20. #define ODEBUG_POOL_MIN_LEVEL 256
  21. #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
  22. #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
  23. #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
  24. struct debug_bucket {
  25. struct hlist_head list;
  26. raw_spinlock_t lock;
  27. };
  28. static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
  29. static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
  30. static DEFINE_RAW_SPINLOCK(pool_lock);
  31. static HLIST_HEAD(obj_pool);
  32. static int obj_pool_min_free = ODEBUG_POOL_SIZE;
  33. static int obj_pool_free = ODEBUG_POOL_SIZE;
  34. static int obj_pool_used;
  35. static int obj_pool_max_used;
  36. static struct kmem_cache *obj_cache;
  37. static int debug_objects_maxchain __read_mostly;
  38. static int debug_objects_fixups __read_mostly;
  39. static int debug_objects_warnings __read_mostly;
  40. static int debug_objects_enabled __read_mostly
  41. = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
  42. static struct debug_obj_descr *descr_test __read_mostly;
  43. static void free_obj_work(struct work_struct *work);
  44. static DECLARE_WORK(debug_obj_work, free_obj_work);
  45. static int __init enable_object_debug(char *str)
  46. {
  47. debug_objects_enabled = 1;
  48. return 0;
  49. }
  50. static int __init disable_object_debug(char *str)
  51. {
  52. debug_objects_enabled = 0;
  53. return 0;
  54. }
  55. early_param("debug_objects", enable_object_debug);
  56. early_param("no_debug_objects", disable_object_debug);
  57. static const char *obj_states[ODEBUG_STATE_MAX] = {
  58. [ODEBUG_STATE_NONE] = "none",
  59. [ODEBUG_STATE_INIT] = "initialized",
  60. [ODEBUG_STATE_INACTIVE] = "inactive",
  61. [ODEBUG_STATE_ACTIVE] = "active",
  62. [ODEBUG_STATE_DESTROYED] = "destroyed",
  63. [ODEBUG_STATE_NOTAVAILABLE] = "not available",
  64. };
  65. static void fill_pool(void)
  66. {
  67. gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
  68. struct debug_obj *new;
  69. unsigned long flags;
  70. if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
  71. return;
  72. if (unlikely(!obj_cache))
  73. return;
  74. while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) {
  75. new = kmem_cache_zalloc(obj_cache, gfp);
  76. if (!new)
  77. return;
  78. raw_spin_lock_irqsave(&pool_lock, flags);
  79. hlist_add_head(&new->node, &obj_pool);
  80. obj_pool_free++;
  81. raw_spin_unlock_irqrestore(&pool_lock, flags);
  82. }
  83. }
  84. /*
  85. * Lookup an object in the hash bucket.
  86. */
  87. static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
  88. {
  89. struct debug_obj *obj;
  90. int cnt = 0;
  91. hlist_for_each_entry(obj, &b->list, node) {
  92. cnt++;
  93. if (obj->object == addr)
  94. return obj;
  95. }
  96. if (cnt > debug_objects_maxchain)
  97. debug_objects_maxchain = cnt;
  98. return NULL;
  99. }
  100. /*
  101. * Allocate a new object. If the pool is empty, switch off the debugger.
  102. * Must be called with interrupts disabled.
  103. */
  104. static struct debug_obj *
  105. alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
  106. {
  107. struct debug_obj *obj = NULL;
  108. raw_spin_lock(&pool_lock);
  109. if (obj_pool.first) {
  110. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  111. obj->object = addr;
  112. obj->descr = descr;
  113. obj->state = ODEBUG_STATE_NONE;
  114. obj->astate = 0;
  115. hlist_del(&obj->node);
  116. hlist_add_head(&obj->node, &b->list);
  117. obj_pool_used++;
  118. if (obj_pool_used > obj_pool_max_used)
  119. obj_pool_max_used = obj_pool_used;
  120. obj_pool_free--;
  121. if (obj_pool_free < obj_pool_min_free)
  122. obj_pool_min_free = obj_pool_free;
  123. }
  124. raw_spin_unlock(&pool_lock);
  125. return obj;
  126. }
  127. /*
  128. * workqueue function to free objects.
  129. */
  130. static void free_obj_work(struct work_struct *work)
  131. {
  132. struct debug_obj *obj;
  133. unsigned long flags;
  134. raw_spin_lock_irqsave(&pool_lock, flags);
  135. while (obj_pool_free > ODEBUG_POOL_SIZE) {
  136. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  137. hlist_del(&obj->node);
  138. obj_pool_free--;
  139. /*
  140. * We release pool_lock across kmem_cache_free() to
  141. * avoid contention on pool_lock.
  142. */
  143. raw_spin_unlock_irqrestore(&pool_lock, flags);
  144. kmem_cache_free(obj_cache, obj);
  145. raw_spin_lock_irqsave(&pool_lock, flags);
  146. }
  147. raw_spin_unlock_irqrestore(&pool_lock, flags);
  148. }
  149. /*
  150. * Put the object back into the pool and schedule work to free objects
  151. * if necessary.
  152. */
  153. static void free_object(struct debug_obj *obj)
  154. {
  155. unsigned long flags;
  156. int sched = 0;
  157. raw_spin_lock_irqsave(&pool_lock, flags);
  158. /*
  159. * schedule work when the pool is filled and the cache is
  160. * initialized:
  161. */
  162. if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
  163. sched = keventd_up();
  164. hlist_add_head(&obj->node, &obj_pool);
  165. obj_pool_free++;
  166. obj_pool_used--;
  167. raw_spin_unlock_irqrestore(&pool_lock, flags);
  168. if (sched)
  169. schedule_work(&debug_obj_work);
  170. }
  171. /*
  172. * We run out of memory. That means we probably have tons of objects
  173. * allocated.
  174. */
  175. static void debug_objects_oom(void)
  176. {
  177. struct debug_bucket *db = obj_hash;
  178. struct hlist_node *tmp;
  179. HLIST_HEAD(freelist);
  180. struct debug_obj *obj;
  181. unsigned long flags;
  182. int i;
  183. pr_warn("ODEBUG: Out of memory. ODEBUG disabled\n");
  184. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  185. raw_spin_lock_irqsave(&db->lock, flags);
  186. hlist_move_list(&db->list, &freelist);
  187. raw_spin_unlock_irqrestore(&db->lock, flags);
  188. /* Now free them */
  189. hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
  190. hlist_del(&obj->node);
  191. free_object(obj);
  192. }
  193. }
  194. }
  195. /*
  196. * We use the pfn of the address for the hash. That way we can check
  197. * for freed objects simply by checking the affected bucket.
  198. */
  199. static struct debug_bucket *get_bucket(unsigned long addr)
  200. {
  201. unsigned long hash;
  202. hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
  203. return &obj_hash[hash];
  204. }
  205. static void debug_print_object(struct debug_obj *obj, char *msg)
  206. {
  207. struct debug_obj_descr *descr = obj->descr;
  208. static int limit;
  209. if (limit < 5 && descr != descr_test) {
  210. void *hint = descr->debug_hint ?
  211. descr->debug_hint(obj->object) : NULL;
  212. limit++;
  213. WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
  214. "object type: %s hint: %pS\n",
  215. msg, obj_states[obj->state], obj->astate,
  216. descr->name, hint);
  217. }
  218. debug_objects_warnings++;
  219. }
  220. /*
  221. * Try to repair the damage, so we have a better chance to get useful
  222. * debug output.
  223. */
  224. static int
  225. debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
  226. void * addr, enum debug_obj_state state)
  227. {
  228. int fixed = 0;
  229. if (fixup)
  230. fixed = fixup(addr, state);
  231. debug_objects_fixups += fixed;
  232. return fixed;
  233. }
  234. static void debug_object_is_on_stack(void *addr, int onstack)
  235. {
  236. int is_on_stack;
  237. static int limit;
  238. if (limit > 4)
  239. return;
  240. is_on_stack = object_is_on_stack(addr);
  241. if (is_on_stack == onstack)
  242. return;
  243. limit++;
  244. if (is_on_stack)
  245. pr_warn("ODEBUG: object is on stack, but not annotated\n");
  246. else
  247. pr_warn("ODEBUG: object is not on stack, but annotated\n");
  248. WARN_ON(1);
  249. }
  250. static void
  251. __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
  252. {
  253. enum debug_obj_state state;
  254. struct debug_bucket *db;
  255. struct debug_obj *obj;
  256. unsigned long flags;
  257. fill_pool();
  258. db = get_bucket((unsigned long) addr);
  259. raw_spin_lock_irqsave(&db->lock, flags);
  260. obj = lookup_object(addr, db);
  261. if (!obj) {
  262. obj = alloc_object(addr, db, descr);
  263. if (!obj) {
  264. debug_objects_enabled = 0;
  265. raw_spin_unlock_irqrestore(&db->lock, flags);
  266. debug_objects_oom();
  267. return;
  268. }
  269. debug_object_is_on_stack(addr, onstack);
  270. }
  271. switch (obj->state) {
  272. case ODEBUG_STATE_NONE:
  273. case ODEBUG_STATE_INIT:
  274. case ODEBUG_STATE_INACTIVE:
  275. obj->state = ODEBUG_STATE_INIT;
  276. break;
  277. case ODEBUG_STATE_ACTIVE:
  278. debug_print_object(obj, "init");
  279. state = obj->state;
  280. raw_spin_unlock_irqrestore(&db->lock, flags);
  281. debug_object_fixup(descr->fixup_init, addr, state);
  282. return;
  283. case ODEBUG_STATE_DESTROYED:
  284. debug_print_object(obj, "init");
  285. break;
  286. default:
  287. break;
  288. }
  289. raw_spin_unlock_irqrestore(&db->lock, flags);
  290. }
  291. /**
  292. * debug_object_init - debug checks when an object is initialized
  293. * @addr: address of the object
  294. * @descr: pointer to an object specific debug description structure
  295. */
  296. void debug_object_init(void *addr, struct debug_obj_descr *descr)
  297. {
  298. if (!debug_objects_enabled)
  299. return;
  300. __debug_object_init(addr, descr, 0);
  301. }
  302. /**
  303. * debug_object_init_on_stack - debug checks when an object on stack is
  304. * initialized
  305. * @addr: address of the object
  306. * @descr: pointer to an object specific debug description structure
  307. */
  308. void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
  309. {
  310. if (!debug_objects_enabled)
  311. return;
  312. __debug_object_init(addr, descr, 1);
  313. }
  314. /**
  315. * debug_object_activate - debug checks when an object is activated
  316. * @addr: address of the object
  317. * @descr: pointer to an object specific debug description structure
  318. * Returns 0 for success, -EINVAL for check failed.
  319. */
  320. int debug_object_activate(void *addr, struct debug_obj_descr *descr)
  321. {
  322. enum debug_obj_state state;
  323. struct debug_bucket *db;
  324. struct debug_obj *obj;
  325. unsigned long flags;
  326. int ret;
  327. struct debug_obj o = { .object = addr,
  328. .state = ODEBUG_STATE_NOTAVAILABLE,
  329. .descr = descr };
  330. if (!debug_objects_enabled)
  331. return 0;
  332. db = get_bucket((unsigned long) addr);
  333. raw_spin_lock_irqsave(&db->lock, flags);
  334. obj = lookup_object(addr, db);
  335. if (obj) {
  336. switch (obj->state) {
  337. case ODEBUG_STATE_INIT:
  338. case ODEBUG_STATE_INACTIVE:
  339. obj->state = ODEBUG_STATE_ACTIVE;
  340. ret = 0;
  341. break;
  342. case ODEBUG_STATE_ACTIVE:
  343. debug_print_object(obj, "activate");
  344. state = obj->state;
  345. raw_spin_unlock_irqrestore(&db->lock, flags);
  346. ret = debug_object_fixup(descr->fixup_activate, addr, state);
  347. return ret ? -EINVAL : 0;
  348. case ODEBUG_STATE_DESTROYED:
  349. debug_print_object(obj, "activate");
  350. ret = -EINVAL;
  351. break;
  352. default:
  353. ret = 0;
  354. break;
  355. }
  356. raw_spin_unlock_irqrestore(&db->lock, flags);
  357. return ret;
  358. }
  359. raw_spin_unlock_irqrestore(&db->lock, flags);
  360. /*
  361. * This happens when a static object is activated. We
  362. * let the type specific code decide whether this is
  363. * true or not.
  364. */
  365. if (debug_object_fixup(descr->fixup_activate, addr,
  366. ODEBUG_STATE_NOTAVAILABLE)) {
  367. debug_print_object(&o, "activate");
  368. return -EINVAL;
  369. }
  370. return 0;
  371. }
  372. /**
  373. * debug_object_deactivate - debug checks when an object is deactivated
  374. * @addr: address of the object
  375. * @descr: pointer to an object specific debug description structure
  376. */
  377. void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
  378. {
  379. struct debug_bucket *db;
  380. struct debug_obj *obj;
  381. unsigned long flags;
  382. if (!debug_objects_enabled)
  383. return;
  384. db = get_bucket((unsigned long) addr);
  385. raw_spin_lock_irqsave(&db->lock, flags);
  386. obj = lookup_object(addr, db);
  387. if (obj) {
  388. switch (obj->state) {
  389. case ODEBUG_STATE_INIT:
  390. case ODEBUG_STATE_INACTIVE:
  391. case ODEBUG_STATE_ACTIVE:
  392. if (!obj->astate)
  393. obj->state = ODEBUG_STATE_INACTIVE;
  394. else
  395. debug_print_object(obj, "deactivate");
  396. break;
  397. case ODEBUG_STATE_DESTROYED:
  398. debug_print_object(obj, "deactivate");
  399. break;
  400. default:
  401. break;
  402. }
  403. } else {
  404. struct debug_obj o = { .object = addr,
  405. .state = ODEBUG_STATE_NOTAVAILABLE,
  406. .descr = descr };
  407. debug_print_object(&o, "deactivate");
  408. }
  409. raw_spin_unlock_irqrestore(&db->lock, flags);
  410. }
  411. /**
  412. * debug_object_destroy - debug checks when an object is destroyed
  413. * @addr: address of the object
  414. * @descr: pointer to an object specific debug description structure
  415. */
  416. void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
  417. {
  418. enum debug_obj_state state;
  419. struct debug_bucket *db;
  420. struct debug_obj *obj;
  421. unsigned long flags;
  422. if (!debug_objects_enabled)
  423. return;
  424. db = get_bucket((unsigned long) addr);
  425. raw_spin_lock_irqsave(&db->lock, flags);
  426. obj = lookup_object(addr, db);
  427. if (!obj)
  428. goto out_unlock;
  429. switch (obj->state) {
  430. case ODEBUG_STATE_NONE:
  431. case ODEBUG_STATE_INIT:
  432. case ODEBUG_STATE_INACTIVE:
  433. obj->state = ODEBUG_STATE_DESTROYED;
  434. break;
  435. case ODEBUG_STATE_ACTIVE:
  436. debug_print_object(obj, "destroy");
  437. state = obj->state;
  438. raw_spin_unlock_irqrestore(&db->lock, flags);
  439. debug_object_fixup(descr->fixup_destroy, addr, state);
  440. return;
  441. case ODEBUG_STATE_DESTROYED:
  442. debug_print_object(obj, "destroy");
  443. break;
  444. default:
  445. break;
  446. }
  447. out_unlock:
  448. raw_spin_unlock_irqrestore(&db->lock, flags);
  449. }
  450. /**
  451. * debug_object_free - debug checks when an object is freed
  452. * @addr: address of the object
  453. * @descr: pointer to an object specific debug description structure
  454. */
  455. void debug_object_free(void *addr, struct debug_obj_descr *descr)
  456. {
  457. enum debug_obj_state state;
  458. struct debug_bucket *db;
  459. struct debug_obj *obj;
  460. unsigned long flags;
  461. if (!debug_objects_enabled)
  462. return;
  463. db = get_bucket((unsigned long) addr);
  464. raw_spin_lock_irqsave(&db->lock, flags);
  465. obj = lookup_object(addr, db);
  466. if (!obj)
  467. goto out_unlock;
  468. switch (obj->state) {
  469. case ODEBUG_STATE_ACTIVE:
  470. debug_print_object(obj, "free");
  471. state = obj->state;
  472. raw_spin_unlock_irqrestore(&db->lock, flags);
  473. debug_object_fixup(descr->fixup_free, addr, state);
  474. return;
  475. default:
  476. hlist_del(&obj->node);
  477. raw_spin_unlock_irqrestore(&db->lock, flags);
  478. free_object(obj);
  479. return;
  480. }
  481. out_unlock:
  482. raw_spin_unlock_irqrestore(&db->lock, flags);
  483. }
  484. /**
  485. * debug_object_assert_init - debug checks when object should be init-ed
  486. * @addr: address of the object
  487. * @descr: pointer to an object specific debug description structure
  488. */
  489. void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
  490. {
  491. struct debug_bucket *db;
  492. struct debug_obj *obj;
  493. unsigned long flags;
  494. if (!debug_objects_enabled)
  495. return;
  496. db = get_bucket((unsigned long) addr);
  497. raw_spin_lock_irqsave(&db->lock, flags);
  498. obj = lookup_object(addr, db);
  499. if (!obj) {
  500. struct debug_obj o = { .object = addr,
  501. .state = ODEBUG_STATE_NOTAVAILABLE,
  502. .descr = descr };
  503. raw_spin_unlock_irqrestore(&db->lock, flags);
  504. /*
  505. * Maybe the object is static. Let the type specific
  506. * code decide what to do.
  507. */
  508. if (debug_object_fixup(descr->fixup_assert_init, addr,
  509. ODEBUG_STATE_NOTAVAILABLE))
  510. debug_print_object(&o, "assert_init");
  511. return;
  512. }
  513. raw_spin_unlock_irqrestore(&db->lock, flags);
  514. }
  515. /**
  516. * debug_object_active_state - debug checks object usage state machine
  517. * @addr: address of the object
  518. * @descr: pointer to an object specific debug description structure
  519. * @expect: expected state
  520. * @next: state to move to if expected state is found
  521. */
  522. void
  523. debug_object_active_state(void *addr, struct debug_obj_descr *descr,
  524. unsigned int expect, unsigned int next)
  525. {
  526. struct debug_bucket *db;
  527. struct debug_obj *obj;
  528. unsigned long flags;
  529. if (!debug_objects_enabled)
  530. return;
  531. db = get_bucket((unsigned long) addr);
  532. raw_spin_lock_irqsave(&db->lock, flags);
  533. obj = lookup_object(addr, db);
  534. if (obj) {
  535. switch (obj->state) {
  536. case ODEBUG_STATE_ACTIVE:
  537. if (obj->astate == expect)
  538. obj->astate = next;
  539. else
  540. debug_print_object(obj, "active_state");
  541. break;
  542. default:
  543. debug_print_object(obj, "active_state");
  544. break;
  545. }
  546. } else {
  547. struct debug_obj o = { .object = addr,
  548. .state = ODEBUG_STATE_NOTAVAILABLE,
  549. .descr = descr };
  550. debug_print_object(&o, "active_state");
  551. }
  552. raw_spin_unlock_irqrestore(&db->lock, flags);
  553. }
  554. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  555. static void __debug_check_no_obj_freed(const void *address, unsigned long size)
  556. {
  557. unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
  558. struct hlist_node *tmp;
  559. HLIST_HEAD(freelist);
  560. struct debug_obj_descr *descr;
  561. enum debug_obj_state state;
  562. struct debug_bucket *db;
  563. struct debug_obj *obj;
  564. int cnt;
  565. saddr = (unsigned long) address;
  566. eaddr = saddr + size;
  567. paddr = saddr & ODEBUG_CHUNK_MASK;
  568. chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
  569. chunks >>= ODEBUG_CHUNK_SHIFT;
  570. for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
  571. db = get_bucket(paddr);
  572. repeat:
  573. cnt = 0;
  574. raw_spin_lock_irqsave(&db->lock, flags);
  575. hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
  576. cnt++;
  577. oaddr = (unsigned long) obj->object;
  578. if (oaddr < saddr || oaddr >= eaddr)
  579. continue;
  580. switch (obj->state) {
  581. case ODEBUG_STATE_ACTIVE:
  582. debug_print_object(obj, "free");
  583. descr = obj->descr;
  584. state = obj->state;
  585. raw_spin_unlock_irqrestore(&db->lock, flags);
  586. debug_object_fixup(descr->fixup_free,
  587. (void *) oaddr, state);
  588. goto repeat;
  589. default:
  590. hlist_del(&obj->node);
  591. hlist_add_head(&obj->node, &freelist);
  592. break;
  593. }
  594. }
  595. raw_spin_unlock_irqrestore(&db->lock, flags);
  596. /* Now free them */
  597. hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
  598. hlist_del(&obj->node);
  599. free_object(obj);
  600. }
  601. if (cnt > debug_objects_maxchain)
  602. debug_objects_maxchain = cnt;
  603. }
  604. }
  605. void debug_check_no_obj_freed(const void *address, unsigned long size)
  606. {
  607. if (debug_objects_enabled)
  608. __debug_check_no_obj_freed(address, size);
  609. }
  610. #endif
  611. #ifdef CONFIG_DEBUG_FS
  612. static int debug_stats_show(struct seq_file *m, void *v)
  613. {
  614. seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
  615. seq_printf(m, "warnings :%d\n", debug_objects_warnings);
  616. seq_printf(m, "fixups :%d\n", debug_objects_fixups);
  617. seq_printf(m, "pool_free :%d\n", obj_pool_free);
  618. seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
  619. seq_printf(m, "pool_used :%d\n", obj_pool_used);
  620. seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
  621. return 0;
  622. }
  623. static int debug_stats_open(struct inode *inode, struct file *filp)
  624. {
  625. return single_open(filp, debug_stats_show, NULL);
  626. }
  627. static const struct file_operations debug_stats_fops = {
  628. .open = debug_stats_open,
  629. .read = seq_read,
  630. .llseek = seq_lseek,
  631. .release = single_release,
  632. };
  633. static int __init debug_objects_init_debugfs(void)
  634. {
  635. struct dentry *dbgdir, *dbgstats;
  636. if (!debug_objects_enabled)
  637. return 0;
  638. dbgdir = debugfs_create_dir("debug_objects", NULL);
  639. if (!dbgdir)
  640. return -ENOMEM;
  641. dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
  642. &debug_stats_fops);
  643. if (!dbgstats)
  644. goto err;
  645. return 0;
  646. err:
  647. debugfs_remove(dbgdir);
  648. return -ENOMEM;
  649. }
  650. __initcall(debug_objects_init_debugfs);
  651. #else
  652. static inline void debug_objects_init_debugfs(void) { }
  653. #endif
  654. #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
  655. /* Random data structure for the self test */
  656. struct self_test {
  657. unsigned long dummy1[6];
  658. int static_init;
  659. unsigned long dummy2[3];
  660. };
  661. static __initdata struct debug_obj_descr descr_type_test;
  662. /*
  663. * fixup_init is called when:
  664. * - an active object is initialized
  665. */
  666. static int __init fixup_init(void *addr, enum debug_obj_state state)
  667. {
  668. struct self_test *obj = addr;
  669. switch (state) {
  670. case ODEBUG_STATE_ACTIVE:
  671. debug_object_deactivate(obj, &descr_type_test);
  672. debug_object_init(obj, &descr_type_test);
  673. return 1;
  674. default:
  675. return 0;
  676. }
  677. }
  678. /*
  679. * fixup_activate is called when:
  680. * - an active object is activated
  681. * - an unknown object is activated (might be a statically initialized object)
  682. */
  683. static int __init fixup_activate(void *addr, enum debug_obj_state state)
  684. {
  685. struct self_test *obj = addr;
  686. switch (state) {
  687. case ODEBUG_STATE_NOTAVAILABLE:
  688. if (obj->static_init == 1) {
  689. debug_object_init(obj, &descr_type_test);
  690. debug_object_activate(obj, &descr_type_test);
  691. return 0;
  692. }
  693. return 1;
  694. case ODEBUG_STATE_ACTIVE:
  695. debug_object_deactivate(obj, &descr_type_test);
  696. debug_object_activate(obj, &descr_type_test);
  697. return 1;
  698. default:
  699. return 0;
  700. }
  701. }
  702. /*
  703. * fixup_destroy is called when:
  704. * - an active object is destroyed
  705. */
  706. static int __init fixup_destroy(void *addr, enum debug_obj_state state)
  707. {
  708. struct self_test *obj = addr;
  709. switch (state) {
  710. case ODEBUG_STATE_ACTIVE:
  711. debug_object_deactivate(obj, &descr_type_test);
  712. debug_object_destroy(obj, &descr_type_test);
  713. return 1;
  714. default:
  715. return 0;
  716. }
  717. }
  718. /*
  719. * fixup_free is called when:
  720. * - an active object is freed
  721. */
  722. static int __init fixup_free(void *addr, enum debug_obj_state state)
  723. {
  724. struct self_test *obj = addr;
  725. switch (state) {
  726. case ODEBUG_STATE_ACTIVE:
  727. debug_object_deactivate(obj, &descr_type_test);
  728. debug_object_free(obj, &descr_type_test);
  729. return 1;
  730. default:
  731. return 0;
  732. }
  733. }
  734. static int __init
  735. check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
  736. {
  737. struct debug_bucket *db;
  738. struct debug_obj *obj;
  739. unsigned long flags;
  740. int res = -EINVAL;
  741. db = get_bucket((unsigned long) addr);
  742. raw_spin_lock_irqsave(&db->lock, flags);
  743. obj = lookup_object(addr, db);
  744. if (!obj && state != ODEBUG_STATE_NONE) {
  745. WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
  746. goto out;
  747. }
  748. if (obj && obj->state != state) {
  749. WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
  750. obj->state, state);
  751. goto out;
  752. }
  753. if (fixups != debug_objects_fixups) {
  754. WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
  755. fixups, debug_objects_fixups);
  756. goto out;
  757. }
  758. if (warnings != debug_objects_warnings) {
  759. WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
  760. warnings, debug_objects_warnings);
  761. goto out;
  762. }
  763. res = 0;
  764. out:
  765. raw_spin_unlock_irqrestore(&db->lock, flags);
  766. if (res)
  767. debug_objects_enabled = 0;
  768. return res;
  769. }
  770. static __initdata struct debug_obj_descr descr_type_test = {
  771. .name = "selftest",
  772. .fixup_init = fixup_init,
  773. .fixup_activate = fixup_activate,
  774. .fixup_destroy = fixup_destroy,
  775. .fixup_free = fixup_free,
  776. };
  777. static __initdata struct self_test obj = { .static_init = 0 };
  778. static void __init debug_objects_selftest(void)
  779. {
  780. int fixups, oldfixups, warnings, oldwarnings;
  781. unsigned long flags;
  782. local_irq_save(flags);
  783. fixups = oldfixups = debug_objects_fixups;
  784. warnings = oldwarnings = debug_objects_warnings;
  785. descr_test = &descr_type_test;
  786. debug_object_init(&obj, &descr_type_test);
  787. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  788. goto out;
  789. debug_object_activate(&obj, &descr_type_test);
  790. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  791. goto out;
  792. debug_object_activate(&obj, &descr_type_test);
  793. if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
  794. goto out;
  795. debug_object_deactivate(&obj, &descr_type_test);
  796. if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
  797. goto out;
  798. debug_object_destroy(&obj, &descr_type_test);
  799. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
  800. goto out;
  801. debug_object_init(&obj, &descr_type_test);
  802. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  803. goto out;
  804. debug_object_activate(&obj, &descr_type_test);
  805. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  806. goto out;
  807. debug_object_deactivate(&obj, &descr_type_test);
  808. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  809. goto out;
  810. debug_object_free(&obj, &descr_type_test);
  811. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  812. goto out;
  813. obj.static_init = 1;
  814. debug_object_activate(&obj, &descr_type_test);
  815. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  816. goto out;
  817. debug_object_init(&obj, &descr_type_test);
  818. if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
  819. goto out;
  820. debug_object_free(&obj, &descr_type_test);
  821. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  822. goto out;
  823. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  824. debug_object_init(&obj, &descr_type_test);
  825. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  826. goto out;
  827. debug_object_activate(&obj, &descr_type_test);
  828. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  829. goto out;
  830. __debug_check_no_obj_freed(&obj, sizeof(obj));
  831. if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
  832. goto out;
  833. #endif
  834. pr_info("ODEBUG: selftest passed\n");
  835. out:
  836. debug_objects_fixups = oldfixups;
  837. debug_objects_warnings = oldwarnings;
  838. descr_test = NULL;
  839. local_irq_restore(flags);
  840. }
  841. #else
  842. static inline void debug_objects_selftest(void) { }
  843. #endif
  844. /*
  845. * Called during early boot to initialize the hash buckets and link
  846. * the static object pool objects into the poll list. After this call
  847. * the object tracker is fully operational.
  848. */
  849. void __init debug_objects_early_init(void)
  850. {
  851. int i;
  852. for (i = 0; i < ODEBUG_HASH_SIZE; i++)
  853. raw_spin_lock_init(&obj_hash[i].lock);
  854. for (i = 0; i < ODEBUG_POOL_SIZE; i++)
  855. hlist_add_head(&obj_static_pool[i].node, &obj_pool);
  856. }
  857. /*
  858. * Convert the statically allocated objects to dynamic ones:
  859. */
  860. static int __init debug_objects_replace_static_objects(void)
  861. {
  862. struct debug_bucket *db = obj_hash;
  863. struct hlist_node *tmp;
  864. struct debug_obj *obj, *new;
  865. HLIST_HEAD(objects);
  866. int i, cnt = 0;
  867. for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
  868. obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
  869. if (!obj)
  870. goto free;
  871. hlist_add_head(&obj->node, &objects);
  872. }
  873. /*
  874. * When debug_objects_mem_init() is called we know that only
  875. * one CPU is up, so disabling interrupts is enough
  876. * protection. This avoids the lockdep hell of lock ordering.
  877. */
  878. local_irq_disable();
  879. /* Remove the statically allocated objects from the pool */
  880. hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
  881. hlist_del(&obj->node);
  882. /* Move the allocated objects to the pool */
  883. hlist_move_list(&objects, &obj_pool);
  884. /* Replace the active object references */
  885. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  886. hlist_move_list(&db->list, &objects);
  887. hlist_for_each_entry(obj, &objects, node) {
  888. new = hlist_entry(obj_pool.first, typeof(*obj), node);
  889. hlist_del(&new->node);
  890. /* copy object data */
  891. *new = *obj;
  892. hlist_add_head(&new->node, &db->list);
  893. cnt++;
  894. }
  895. }
  896. local_irq_enable();
  897. printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
  898. obj_pool_used);
  899. return 0;
  900. free:
  901. hlist_for_each_entry_safe(obj, tmp, &objects, node) {
  902. hlist_del(&obj->node);
  903. kmem_cache_free(obj_cache, obj);
  904. }
  905. return -ENOMEM;
  906. }
  907. /*
  908. * Called after the kmem_caches are functional to setup a dedicated
  909. * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
  910. * prevents that the debug code is called on kmem_cache_free() for the
  911. * debug tracker objects to avoid recursive calls.
  912. */
  913. void __init debug_objects_mem_init(void)
  914. {
  915. if (!debug_objects_enabled)
  916. return;
  917. obj_cache = kmem_cache_create("debug_objects_cache",
  918. sizeof (struct debug_obj), 0,
  919. SLAB_DEBUG_OBJECTS, NULL);
  920. if (!obj_cache || debug_objects_replace_static_objects()) {
  921. debug_objects_enabled = 0;
  922. if (obj_cache)
  923. kmem_cache_destroy(obj_cache);
  924. pr_warn("ODEBUG: out of memory.\n");
  925. } else
  926. debug_objects_selftest();
  927. }