debugobjects.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220
  1. /*
  2. * Generic infrastructure for lifetime debugging of objects.
  3. *
  4. * Started by Thomas Gleixner
  5. *
  6. * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
  7. *
  8. * For licencing details see kernel-base/COPYING
  9. */
  10. #define pr_fmt(fmt) "ODEBUG: " fmt
  11. #include <linux/debugobjects.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/sched.h>
  14. #include <linux/sched/task_stack.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/debugfs.h>
  17. #include <linux/slab.h>
  18. #include <linux/hash.h>
  19. #include <linux/kmemleak.h>
  20. #define ODEBUG_HASH_BITS 14
  21. #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
  22. #define ODEBUG_POOL_SIZE 1024
  23. #define ODEBUG_POOL_MIN_LEVEL 256
  24. #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
  25. #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
  26. #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
  27. struct debug_bucket {
  28. struct hlist_head list;
  29. raw_spinlock_t lock;
  30. };
  31. static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
  32. static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
  33. static DEFINE_RAW_SPINLOCK(pool_lock);
  34. static HLIST_HEAD(obj_pool);
  35. static HLIST_HEAD(obj_to_free);
  36. static int obj_pool_min_free = ODEBUG_POOL_SIZE;
  37. static int obj_pool_free = ODEBUG_POOL_SIZE;
  38. static int obj_pool_used;
  39. static int obj_pool_max_used;
  40. /* The number of objs on the global free list */
  41. static int obj_nr_tofree;
  42. static struct kmem_cache *obj_cache;
  43. static int debug_objects_maxchain __read_mostly;
  44. static int debug_objects_maxchecked __read_mostly;
  45. static int debug_objects_fixups __read_mostly;
  46. static int debug_objects_warnings __read_mostly;
  47. static int debug_objects_enabled __read_mostly
  48. = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
  49. static int debug_objects_pool_size __read_mostly
  50. = ODEBUG_POOL_SIZE;
  51. static int debug_objects_pool_min_level __read_mostly
  52. = ODEBUG_POOL_MIN_LEVEL;
  53. static struct debug_obj_descr *descr_test __read_mostly;
  54. /*
  55. * Track numbers of kmem_cache_alloc()/free() calls done.
  56. */
  57. static int debug_objects_allocated;
  58. static int debug_objects_freed;
  59. static void free_obj_work(struct work_struct *work);
  60. static DECLARE_WORK(debug_obj_work, free_obj_work);
  61. static int __init enable_object_debug(char *str)
  62. {
  63. debug_objects_enabled = 1;
  64. return 0;
  65. }
  66. static int __init disable_object_debug(char *str)
  67. {
  68. debug_objects_enabled = 0;
  69. return 0;
  70. }
  71. early_param("debug_objects", enable_object_debug);
  72. early_param("no_debug_objects", disable_object_debug);
  73. static const char *obj_states[ODEBUG_STATE_MAX] = {
  74. [ODEBUG_STATE_NONE] = "none",
  75. [ODEBUG_STATE_INIT] = "initialized",
  76. [ODEBUG_STATE_INACTIVE] = "inactive",
  77. [ODEBUG_STATE_ACTIVE] = "active",
  78. [ODEBUG_STATE_DESTROYED] = "destroyed",
  79. [ODEBUG_STATE_NOTAVAILABLE] = "not available",
  80. };
  81. static void fill_pool(void)
  82. {
  83. gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
  84. struct debug_obj *new, *obj;
  85. unsigned long flags;
  86. if (likely(obj_pool_free >= debug_objects_pool_min_level))
  87. return;
  88. /*
  89. * Reuse objs from the global free list; they will be reinitialized
  90. * when allocating.
  91. */
  92. while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
  93. raw_spin_lock_irqsave(&pool_lock, flags);
  94. /*
  95. * Recheck with the lock held as the worker thread might have
  96. * won the race and freed the global free list already.
  97. */
  98. if (obj_nr_tofree) {
  99. obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
  100. hlist_del(&obj->node);
  101. obj_nr_tofree--;
  102. hlist_add_head(&obj->node, &obj_pool);
  103. obj_pool_free++;
  104. }
  105. raw_spin_unlock_irqrestore(&pool_lock, flags);
  106. }
  107. if (unlikely(!obj_cache))
  108. return;
  109. while (obj_pool_free < debug_objects_pool_min_level) {
  110. new = kmem_cache_zalloc(obj_cache, gfp);
  111. if (!new)
  112. return;
  113. kmemleak_ignore(new);
  114. raw_spin_lock_irqsave(&pool_lock, flags);
  115. hlist_add_head(&new->node, &obj_pool);
  116. debug_objects_allocated++;
  117. obj_pool_free++;
  118. raw_spin_unlock_irqrestore(&pool_lock, flags);
  119. }
  120. }
  121. /*
  122. * Lookup an object in the hash bucket.
  123. */
  124. static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
  125. {
  126. struct debug_obj *obj;
  127. int cnt = 0;
  128. hlist_for_each_entry(obj, &b->list, node) {
  129. cnt++;
  130. if (obj->object == addr)
  131. return obj;
  132. }
  133. if (cnt > debug_objects_maxchain)
  134. debug_objects_maxchain = cnt;
  135. return NULL;
  136. }
  137. /*
  138. * Allocate a new object. If the pool is empty, switch off the debugger.
  139. * Must be called with interrupts disabled.
  140. */
  141. static struct debug_obj *
  142. alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
  143. {
  144. struct debug_obj *obj = NULL;
  145. raw_spin_lock(&pool_lock);
  146. if (obj_pool.first) {
  147. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  148. obj->object = addr;
  149. obj->descr = descr;
  150. obj->state = ODEBUG_STATE_NONE;
  151. obj->astate = 0;
  152. hlist_del(&obj->node);
  153. hlist_add_head(&obj->node, &b->list);
  154. obj_pool_used++;
  155. if (obj_pool_used > obj_pool_max_used)
  156. obj_pool_max_used = obj_pool_used;
  157. obj_pool_free--;
  158. if (obj_pool_free < obj_pool_min_free)
  159. obj_pool_min_free = obj_pool_free;
  160. }
  161. raw_spin_unlock(&pool_lock);
  162. return obj;
  163. }
  164. /*
  165. * workqueue function to free objects.
  166. *
  167. * To reduce contention on the global pool_lock, the actual freeing of
  168. * debug objects will be delayed if the pool_lock is busy. We also free
  169. * the objects in a batch of 4 for each lock/unlock cycle.
  170. */
  171. #define ODEBUG_FREE_BATCH 4
  172. static void free_obj_work(struct work_struct *work)
  173. {
  174. struct debug_obj *objs[ODEBUG_FREE_BATCH];
  175. struct hlist_node *tmp;
  176. struct debug_obj *obj;
  177. unsigned long flags;
  178. int i;
  179. HLIST_HEAD(tofree);
  180. if (!raw_spin_trylock_irqsave(&pool_lock, flags))
  181. return;
  182. /*
  183. * The objs on the pool list might be allocated before the work is
  184. * run, so recheck if pool list it full or not, if not fill pool
  185. * list from the global free list
  186. */
  187. while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
  188. obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
  189. hlist_del(&obj->node);
  190. hlist_add_head(&obj->node, &obj_pool);
  191. obj_pool_free++;
  192. obj_nr_tofree--;
  193. }
  194. /*
  195. * Pool list is already full and there are still objs on the free
  196. * list. Move remaining free objs to a temporary list to free the
  197. * memory outside the pool_lock held region.
  198. */
  199. if (obj_nr_tofree) {
  200. hlist_move_list(&obj_to_free, &tofree);
  201. obj_nr_tofree = 0;
  202. }
  203. while (obj_pool_free >= debug_objects_pool_size + ODEBUG_FREE_BATCH) {
  204. for (i = 0; i < ODEBUG_FREE_BATCH; i++) {
  205. objs[i] = hlist_entry(obj_pool.first,
  206. typeof(*objs[0]), node);
  207. hlist_del(&objs[i]->node);
  208. }
  209. obj_pool_free -= ODEBUG_FREE_BATCH;
  210. debug_objects_freed += ODEBUG_FREE_BATCH;
  211. /*
  212. * We release pool_lock across kmem_cache_free() to
  213. * avoid contention on pool_lock.
  214. */
  215. raw_spin_unlock_irqrestore(&pool_lock, flags);
  216. for (i = 0; i < ODEBUG_FREE_BATCH; i++)
  217. kmem_cache_free(obj_cache, objs[i]);
  218. if (!raw_spin_trylock_irqsave(&pool_lock, flags))
  219. return;
  220. }
  221. raw_spin_unlock_irqrestore(&pool_lock, flags);
  222. hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
  223. hlist_del(&obj->node);
  224. kmem_cache_free(obj_cache, obj);
  225. }
  226. }
  227. /*
  228. * Put the object back into the pool and schedule work to free objects
  229. * if necessary.
  230. */
  231. static void free_object(struct debug_obj *obj)
  232. {
  233. unsigned long flags;
  234. int sched = 0;
  235. raw_spin_lock_irqsave(&pool_lock, flags);
  236. /*
  237. * schedule work when the pool is filled and the cache is
  238. * initialized:
  239. */
  240. if (obj_pool_free > debug_objects_pool_size && obj_cache)
  241. sched = 1;
  242. hlist_add_head(&obj->node, &obj_pool);
  243. obj_pool_free++;
  244. obj_pool_used--;
  245. raw_spin_unlock_irqrestore(&pool_lock, flags);
  246. if (sched)
  247. schedule_work(&debug_obj_work);
  248. }
  249. /*
  250. * We run out of memory. That means we probably have tons of objects
  251. * allocated.
  252. */
  253. static void debug_objects_oom(void)
  254. {
  255. struct debug_bucket *db = obj_hash;
  256. struct hlist_node *tmp;
  257. HLIST_HEAD(freelist);
  258. struct debug_obj *obj;
  259. unsigned long flags;
  260. int i;
  261. pr_warn("Out of memory. ODEBUG disabled\n");
  262. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  263. raw_spin_lock_irqsave(&db->lock, flags);
  264. hlist_move_list(&db->list, &freelist);
  265. raw_spin_unlock_irqrestore(&db->lock, flags);
  266. /* Now free them */
  267. hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
  268. hlist_del(&obj->node);
  269. free_object(obj);
  270. }
  271. }
  272. }
  273. /*
  274. * We use the pfn of the address for the hash. That way we can check
  275. * for freed objects simply by checking the affected bucket.
  276. */
  277. static struct debug_bucket *get_bucket(unsigned long addr)
  278. {
  279. unsigned long hash;
  280. hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
  281. return &obj_hash[hash];
  282. }
  283. static void debug_print_object(struct debug_obj *obj, char *msg)
  284. {
  285. struct debug_obj_descr *descr = obj->descr;
  286. static int limit;
  287. if (limit < 5 && descr != descr_test) {
  288. void *hint = descr->debug_hint ?
  289. descr->debug_hint(obj->object) : NULL;
  290. limit++;
  291. WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
  292. "object type: %s hint: %pS\n",
  293. msg, obj_states[obj->state], obj->astate,
  294. descr->name, hint);
  295. }
  296. debug_objects_warnings++;
  297. }
  298. /*
  299. * Try to repair the damage, so we have a better chance to get useful
  300. * debug output.
  301. */
  302. static bool
  303. debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
  304. void * addr, enum debug_obj_state state)
  305. {
  306. if (fixup && fixup(addr, state)) {
  307. debug_objects_fixups++;
  308. return true;
  309. }
  310. return false;
  311. }
  312. static void debug_object_is_on_stack(void *addr, int onstack)
  313. {
  314. int is_on_stack;
  315. static int limit;
  316. if (limit > 4)
  317. return;
  318. is_on_stack = object_is_on_stack(addr);
  319. if (is_on_stack == onstack)
  320. return;
  321. limit++;
  322. if (is_on_stack)
  323. pr_warn("object is on stack, but not annotated\n");
  324. else
  325. pr_warn("object is not on stack, but annotated\n");
  326. WARN_ON(1);
  327. }
  328. static void
  329. __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
  330. {
  331. enum debug_obj_state state;
  332. struct debug_bucket *db;
  333. struct debug_obj *obj;
  334. unsigned long flags;
  335. fill_pool();
  336. db = get_bucket((unsigned long) addr);
  337. raw_spin_lock_irqsave(&db->lock, flags);
  338. obj = lookup_object(addr, db);
  339. if (!obj) {
  340. obj = alloc_object(addr, db, descr);
  341. if (!obj) {
  342. debug_objects_enabled = 0;
  343. raw_spin_unlock_irqrestore(&db->lock, flags);
  344. debug_objects_oom();
  345. return;
  346. }
  347. debug_object_is_on_stack(addr, onstack);
  348. }
  349. switch (obj->state) {
  350. case ODEBUG_STATE_NONE:
  351. case ODEBUG_STATE_INIT:
  352. case ODEBUG_STATE_INACTIVE:
  353. obj->state = ODEBUG_STATE_INIT;
  354. break;
  355. case ODEBUG_STATE_ACTIVE:
  356. debug_print_object(obj, "init");
  357. state = obj->state;
  358. raw_spin_unlock_irqrestore(&db->lock, flags);
  359. debug_object_fixup(descr->fixup_init, addr, state);
  360. return;
  361. case ODEBUG_STATE_DESTROYED:
  362. debug_print_object(obj, "init");
  363. break;
  364. default:
  365. break;
  366. }
  367. raw_spin_unlock_irqrestore(&db->lock, flags);
  368. }
  369. /**
  370. * debug_object_init - debug checks when an object is initialized
  371. * @addr: address of the object
  372. * @descr: pointer to an object specific debug description structure
  373. */
  374. void debug_object_init(void *addr, struct debug_obj_descr *descr)
  375. {
  376. if (!debug_objects_enabled)
  377. return;
  378. __debug_object_init(addr, descr, 0);
  379. }
  380. EXPORT_SYMBOL_GPL(debug_object_init);
  381. /**
  382. * debug_object_init_on_stack - debug checks when an object on stack is
  383. * initialized
  384. * @addr: address of the object
  385. * @descr: pointer to an object specific debug description structure
  386. */
  387. void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
  388. {
  389. if (!debug_objects_enabled)
  390. return;
  391. __debug_object_init(addr, descr, 1);
  392. }
  393. EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
  394. /**
  395. * debug_object_activate - debug checks when an object is activated
  396. * @addr: address of the object
  397. * @descr: pointer to an object specific debug description structure
  398. * Returns 0 for success, -EINVAL for check failed.
  399. */
  400. int debug_object_activate(void *addr, struct debug_obj_descr *descr)
  401. {
  402. enum debug_obj_state state;
  403. struct debug_bucket *db;
  404. struct debug_obj *obj;
  405. unsigned long flags;
  406. int ret;
  407. struct debug_obj o = { .object = addr,
  408. .state = ODEBUG_STATE_NOTAVAILABLE,
  409. .descr = descr };
  410. if (!debug_objects_enabled)
  411. return 0;
  412. db = get_bucket((unsigned long) addr);
  413. raw_spin_lock_irqsave(&db->lock, flags);
  414. obj = lookup_object(addr, db);
  415. if (obj) {
  416. switch (obj->state) {
  417. case ODEBUG_STATE_INIT:
  418. case ODEBUG_STATE_INACTIVE:
  419. obj->state = ODEBUG_STATE_ACTIVE;
  420. ret = 0;
  421. break;
  422. case ODEBUG_STATE_ACTIVE:
  423. debug_print_object(obj, "activate");
  424. state = obj->state;
  425. raw_spin_unlock_irqrestore(&db->lock, flags);
  426. ret = debug_object_fixup(descr->fixup_activate, addr, state);
  427. return ret ? 0 : -EINVAL;
  428. case ODEBUG_STATE_DESTROYED:
  429. debug_print_object(obj, "activate");
  430. ret = -EINVAL;
  431. break;
  432. default:
  433. ret = 0;
  434. break;
  435. }
  436. raw_spin_unlock_irqrestore(&db->lock, flags);
  437. return ret;
  438. }
  439. raw_spin_unlock_irqrestore(&db->lock, flags);
  440. /*
  441. * We are here when a static object is activated. We
  442. * let the type specific code confirm whether this is
  443. * true or not. if true, we just make sure that the
  444. * static object is tracked in the object tracker. If
  445. * not, this must be a bug, so we try to fix it up.
  446. */
  447. if (descr->is_static_object && descr->is_static_object(addr)) {
  448. /* track this static object */
  449. debug_object_init(addr, descr);
  450. debug_object_activate(addr, descr);
  451. } else {
  452. debug_print_object(&o, "activate");
  453. ret = debug_object_fixup(descr->fixup_activate, addr,
  454. ODEBUG_STATE_NOTAVAILABLE);
  455. return ret ? 0 : -EINVAL;
  456. }
  457. return 0;
  458. }
  459. EXPORT_SYMBOL_GPL(debug_object_activate);
  460. /**
  461. * debug_object_deactivate - debug checks when an object is deactivated
  462. * @addr: address of the object
  463. * @descr: pointer to an object specific debug description structure
  464. */
  465. void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
  466. {
  467. struct debug_bucket *db;
  468. struct debug_obj *obj;
  469. unsigned long flags;
  470. if (!debug_objects_enabled)
  471. return;
  472. db = get_bucket((unsigned long) addr);
  473. raw_spin_lock_irqsave(&db->lock, flags);
  474. obj = lookup_object(addr, db);
  475. if (obj) {
  476. switch (obj->state) {
  477. case ODEBUG_STATE_INIT:
  478. case ODEBUG_STATE_INACTIVE:
  479. case ODEBUG_STATE_ACTIVE:
  480. if (!obj->astate)
  481. obj->state = ODEBUG_STATE_INACTIVE;
  482. else
  483. debug_print_object(obj, "deactivate");
  484. break;
  485. case ODEBUG_STATE_DESTROYED:
  486. debug_print_object(obj, "deactivate");
  487. break;
  488. default:
  489. break;
  490. }
  491. } else {
  492. struct debug_obj o = { .object = addr,
  493. .state = ODEBUG_STATE_NOTAVAILABLE,
  494. .descr = descr };
  495. debug_print_object(&o, "deactivate");
  496. }
  497. raw_spin_unlock_irqrestore(&db->lock, flags);
  498. }
  499. EXPORT_SYMBOL_GPL(debug_object_deactivate);
  500. /**
  501. * debug_object_destroy - debug checks when an object is destroyed
  502. * @addr: address of the object
  503. * @descr: pointer to an object specific debug description structure
  504. */
  505. void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
  506. {
  507. enum debug_obj_state state;
  508. struct debug_bucket *db;
  509. struct debug_obj *obj;
  510. unsigned long flags;
  511. if (!debug_objects_enabled)
  512. return;
  513. db = get_bucket((unsigned long) addr);
  514. raw_spin_lock_irqsave(&db->lock, flags);
  515. obj = lookup_object(addr, db);
  516. if (!obj)
  517. goto out_unlock;
  518. switch (obj->state) {
  519. case ODEBUG_STATE_NONE:
  520. case ODEBUG_STATE_INIT:
  521. case ODEBUG_STATE_INACTIVE:
  522. obj->state = ODEBUG_STATE_DESTROYED;
  523. break;
  524. case ODEBUG_STATE_ACTIVE:
  525. debug_print_object(obj, "destroy");
  526. state = obj->state;
  527. raw_spin_unlock_irqrestore(&db->lock, flags);
  528. debug_object_fixup(descr->fixup_destroy, addr, state);
  529. return;
  530. case ODEBUG_STATE_DESTROYED:
  531. debug_print_object(obj, "destroy");
  532. break;
  533. default:
  534. break;
  535. }
  536. out_unlock:
  537. raw_spin_unlock_irqrestore(&db->lock, flags);
  538. }
  539. EXPORT_SYMBOL_GPL(debug_object_destroy);
  540. /**
  541. * debug_object_free - debug checks when an object is freed
  542. * @addr: address of the object
  543. * @descr: pointer to an object specific debug description structure
  544. */
  545. void debug_object_free(void *addr, struct debug_obj_descr *descr)
  546. {
  547. enum debug_obj_state state;
  548. struct debug_bucket *db;
  549. struct debug_obj *obj;
  550. unsigned long flags;
  551. if (!debug_objects_enabled)
  552. return;
  553. db = get_bucket((unsigned long) addr);
  554. raw_spin_lock_irqsave(&db->lock, flags);
  555. obj = lookup_object(addr, db);
  556. if (!obj)
  557. goto out_unlock;
  558. switch (obj->state) {
  559. case ODEBUG_STATE_ACTIVE:
  560. debug_print_object(obj, "free");
  561. state = obj->state;
  562. raw_spin_unlock_irqrestore(&db->lock, flags);
  563. debug_object_fixup(descr->fixup_free, addr, state);
  564. return;
  565. default:
  566. hlist_del(&obj->node);
  567. raw_spin_unlock_irqrestore(&db->lock, flags);
  568. free_object(obj);
  569. return;
  570. }
  571. out_unlock:
  572. raw_spin_unlock_irqrestore(&db->lock, flags);
  573. }
  574. EXPORT_SYMBOL_GPL(debug_object_free);
  575. /**
  576. * debug_object_assert_init - debug checks when object should be init-ed
  577. * @addr: address of the object
  578. * @descr: pointer to an object specific debug description structure
  579. */
  580. void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
  581. {
  582. struct debug_bucket *db;
  583. struct debug_obj *obj;
  584. unsigned long flags;
  585. if (!debug_objects_enabled)
  586. return;
  587. db = get_bucket((unsigned long) addr);
  588. raw_spin_lock_irqsave(&db->lock, flags);
  589. obj = lookup_object(addr, db);
  590. if (!obj) {
  591. struct debug_obj o = { .object = addr,
  592. .state = ODEBUG_STATE_NOTAVAILABLE,
  593. .descr = descr };
  594. raw_spin_unlock_irqrestore(&db->lock, flags);
  595. /*
  596. * Maybe the object is static, and we let the type specific
  597. * code confirm. Track this static object if true, else invoke
  598. * fixup.
  599. */
  600. if (descr->is_static_object && descr->is_static_object(addr)) {
  601. /* Track this static object */
  602. debug_object_init(addr, descr);
  603. } else {
  604. debug_print_object(&o, "assert_init");
  605. debug_object_fixup(descr->fixup_assert_init, addr,
  606. ODEBUG_STATE_NOTAVAILABLE);
  607. }
  608. return;
  609. }
  610. raw_spin_unlock_irqrestore(&db->lock, flags);
  611. }
  612. EXPORT_SYMBOL_GPL(debug_object_assert_init);
  613. /**
  614. * debug_object_active_state - debug checks object usage state machine
  615. * @addr: address of the object
  616. * @descr: pointer to an object specific debug description structure
  617. * @expect: expected state
  618. * @next: state to move to if expected state is found
  619. */
  620. void
  621. debug_object_active_state(void *addr, struct debug_obj_descr *descr,
  622. unsigned int expect, unsigned int next)
  623. {
  624. struct debug_bucket *db;
  625. struct debug_obj *obj;
  626. unsigned long flags;
  627. if (!debug_objects_enabled)
  628. return;
  629. db = get_bucket((unsigned long) addr);
  630. raw_spin_lock_irqsave(&db->lock, flags);
  631. obj = lookup_object(addr, db);
  632. if (obj) {
  633. switch (obj->state) {
  634. case ODEBUG_STATE_ACTIVE:
  635. if (obj->astate == expect)
  636. obj->astate = next;
  637. else
  638. debug_print_object(obj, "active_state");
  639. break;
  640. default:
  641. debug_print_object(obj, "active_state");
  642. break;
  643. }
  644. } else {
  645. struct debug_obj o = { .object = addr,
  646. .state = ODEBUG_STATE_NOTAVAILABLE,
  647. .descr = descr };
  648. debug_print_object(&o, "active_state");
  649. }
  650. raw_spin_unlock_irqrestore(&db->lock, flags);
  651. }
  652. EXPORT_SYMBOL_GPL(debug_object_active_state);
  653. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  654. static void __debug_check_no_obj_freed(const void *address, unsigned long size)
  655. {
  656. unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
  657. struct hlist_node *tmp;
  658. HLIST_HEAD(freelist);
  659. struct debug_obj_descr *descr;
  660. enum debug_obj_state state;
  661. struct debug_bucket *db;
  662. struct debug_obj *obj;
  663. int cnt, objs_checked = 0;
  664. saddr = (unsigned long) address;
  665. eaddr = saddr + size;
  666. paddr = saddr & ODEBUG_CHUNK_MASK;
  667. chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
  668. chunks >>= ODEBUG_CHUNK_SHIFT;
  669. for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
  670. db = get_bucket(paddr);
  671. repeat:
  672. cnt = 0;
  673. raw_spin_lock_irqsave(&db->lock, flags);
  674. hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
  675. cnt++;
  676. oaddr = (unsigned long) obj->object;
  677. if (oaddr < saddr || oaddr >= eaddr)
  678. continue;
  679. switch (obj->state) {
  680. case ODEBUG_STATE_ACTIVE:
  681. debug_print_object(obj, "free");
  682. descr = obj->descr;
  683. state = obj->state;
  684. raw_spin_unlock_irqrestore(&db->lock, flags);
  685. debug_object_fixup(descr->fixup_free,
  686. (void *) oaddr, state);
  687. goto repeat;
  688. default:
  689. hlist_del(&obj->node);
  690. hlist_add_head(&obj->node, &freelist);
  691. break;
  692. }
  693. }
  694. raw_spin_unlock_irqrestore(&db->lock, flags);
  695. /* Now free them */
  696. hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
  697. hlist_del(&obj->node);
  698. free_object(obj);
  699. }
  700. if (cnt > debug_objects_maxchain)
  701. debug_objects_maxchain = cnt;
  702. objs_checked += cnt;
  703. }
  704. if (objs_checked > debug_objects_maxchecked)
  705. debug_objects_maxchecked = objs_checked;
  706. }
  707. void debug_check_no_obj_freed(const void *address, unsigned long size)
  708. {
  709. if (debug_objects_enabled)
  710. __debug_check_no_obj_freed(address, size);
  711. }
  712. #endif
  713. #ifdef CONFIG_DEBUG_FS
  714. static int debug_stats_show(struct seq_file *m, void *v)
  715. {
  716. seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
  717. seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
  718. seq_printf(m, "warnings :%d\n", debug_objects_warnings);
  719. seq_printf(m, "fixups :%d\n", debug_objects_fixups);
  720. seq_printf(m, "pool_free :%d\n", obj_pool_free);
  721. seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
  722. seq_printf(m, "pool_used :%d\n", obj_pool_used);
  723. seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
  724. seq_printf(m, "on_free_list :%d\n", obj_nr_tofree);
  725. seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
  726. seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
  727. return 0;
  728. }
  729. static int debug_stats_open(struct inode *inode, struct file *filp)
  730. {
  731. return single_open(filp, debug_stats_show, NULL);
  732. }
  733. static const struct file_operations debug_stats_fops = {
  734. .open = debug_stats_open,
  735. .read = seq_read,
  736. .llseek = seq_lseek,
  737. .release = single_release,
  738. };
  739. static int __init debug_objects_init_debugfs(void)
  740. {
  741. struct dentry *dbgdir, *dbgstats;
  742. if (!debug_objects_enabled)
  743. return 0;
  744. dbgdir = debugfs_create_dir("debug_objects", NULL);
  745. if (!dbgdir)
  746. return -ENOMEM;
  747. dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
  748. &debug_stats_fops);
  749. if (!dbgstats)
  750. goto err;
  751. return 0;
  752. err:
  753. debugfs_remove(dbgdir);
  754. return -ENOMEM;
  755. }
  756. __initcall(debug_objects_init_debugfs);
  757. #else
  758. static inline void debug_objects_init_debugfs(void) { }
  759. #endif
  760. #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
  761. /* Random data structure for the self test */
  762. struct self_test {
  763. unsigned long dummy1[6];
  764. int static_init;
  765. unsigned long dummy2[3];
  766. };
  767. static __initdata struct debug_obj_descr descr_type_test;
  768. static bool __init is_static_object(void *addr)
  769. {
  770. struct self_test *obj = addr;
  771. return obj->static_init;
  772. }
  773. /*
  774. * fixup_init is called when:
  775. * - an active object is initialized
  776. */
  777. static bool __init fixup_init(void *addr, enum debug_obj_state state)
  778. {
  779. struct self_test *obj = addr;
  780. switch (state) {
  781. case ODEBUG_STATE_ACTIVE:
  782. debug_object_deactivate(obj, &descr_type_test);
  783. debug_object_init(obj, &descr_type_test);
  784. return true;
  785. default:
  786. return false;
  787. }
  788. }
  789. /*
  790. * fixup_activate is called when:
  791. * - an active object is activated
  792. * - an unknown non-static object is activated
  793. */
  794. static bool __init fixup_activate(void *addr, enum debug_obj_state state)
  795. {
  796. struct self_test *obj = addr;
  797. switch (state) {
  798. case ODEBUG_STATE_NOTAVAILABLE:
  799. return true;
  800. case ODEBUG_STATE_ACTIVE:
  801. debug_object_deactivate(obj, &descr_type_test);
  802. debug_object_activate(obj, &descr_type_test);
  803. return true;
  804. default:
  805. return false;
  806. }
  807. }
  808. /*
  809. * fixup_destroy is called when:
  810. * - an active object is destroyed
  811. */
  812. static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
  813. {
  814. struct self_test *obj = addr;
  815. switch (state) {
  816. case ODEBUG_STATE_ACTIVE:
  817. debug_object_deactivate(obj, &descr_type_test);
  818. debug_object_destroy(obj, &descr_type_test);
  819. return true;
  820. default:
  821. return false;
  822. }
  823. }
  824. /*
  825. * fixup_free is called when:
  826. * - an active object is freed
  827. */
  828. static bool __init fixup_free(void *addr, enum debug_obj_state state)
  829. {
  830. struct self_test *obj = addr;
  831. switch (state) {
  832. case ODEBUG_STATE_ACTIVE:
  833. debug_object_deactivate(obj, &descr_type_test);
  834. debug_object_free(obj, &descr_type_test);
  835. return true;
  836. default:
  837. return false;
  838. }
  839. }
  840. static int __init
  841. check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
  842. {
  843. struct debug_bucket *db;
  844. struct debug_obj *obj;
  845. unsigned long flags;
  846. int res = -EINVAL;
  847. db = get_bucket((unsigned long) addr);
  848. raw_spin_lock_irqsave(&db->lock, flags);
  849. obj = lookup_object(addr, db);
  850. if (!obj && state != ODEBUG_STATE_NONE) {
  851. WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
  852. goto out;
  853. }
  854. if (obj && obj->state != state) {
  855. WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
  856. obj->state, state);
  857. goto out;
  858. }
  859. if (fixups != debug_objects_fixups) {
  860. WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
  861. fixups, debug_objects_fixups);
  862. goto out;
  863. }
  864. if (warnings != debug_objects_warnings) {
  865. WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
  866. warnings, debug_objects_warnings);
  867. goto out;
  868. }
  869. res = 0;
  870. out:
  871. raw_spin_unlock_irqrestore(&db->lock, flags);
  872. if (res)
  873. debug_objects_enabled = 0;
  874. return res;
  875. }
  876. static __initdata struct debug_obj_descr descr_type_test = {
  877. .name = "selftest",
  878. .is_static_object = is_static_object,
  879. .fixup_init = fixup_init,
  880. .fixup_activate = fixup_activate,
  881. .fixup_destroy = fixup_destroy,
  882. .fixup_free = fixup_free,
  883. };
  884. static __initdata struct self_test obj = { .static_init = 0 };
  885. static void __init debug_objects_selftest(void)
  886. {
  887. int fixups, oldfixups, warnings, oldwarnings;
  888. unsigned long flags;
  889. local_irq_save(flags);
  890. fixups = oldfixups = debug_objects_fixups;
  891. warnings = oldwarnings = debug_objects_warnings;
  892. descr_test = &descr_type_test;
  893. debug_object_init(&obj, &descr_type_test);
  894. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  895. goto out;
  896. debug_object_activate(&obj, &descr_type_test);
  897. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  898. goto out;
  899. debug_object_activate(&obj, &descr_type_test);
  900. if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
  901. goto out;
  902. debug_object_deactivate(&obj, &descr_type_test);
  903. if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
  904. goto out;
  905. debug_object_destroy(&obj, &descr_type_test);
  906. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
  907. goto out;
  908. debug_object_init(&obj, &descr_type_test);
  909. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  910. goto out;
  911. debug_object_activate(&obj, &descr_type_test);
  912. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  913. goto out;
  914. debug_object_deactivate(&obj, &descr_type_test);
  915. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  916. goto out;
  917. debug_object_free(&obj, &descr_type_test);
  918. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  919. goto out;
  920. obj.static_init = 1;
  921. debug_object_activate(&obj, &descr_type_test);
  922. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  923. goto out;
  924. debug_object_init(&obj, &descr_type_test);
  925. if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
  926. goto out;
  927. debug_object_free(&obj, &descr_type_test);
  928. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  929. goto out;
  930. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  931. debug_object_init(&obj, &descr_type_test);
  932. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  933. goto out;
  934. debug_object_activate(&obj, &descr_type_test);
  935. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  936. goto out;
  937. __debug_check_no_obj_freed(&obj, sizeof(obj));
  938. if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
  939. goto out;
  940. #endif
  941. pr_info("selftest passed\n");
  942. out:
  943. debug_objects_fixups = oldfixups;
  944. debug_objects_warnings = oldwarnings;
  945. descr_test = NULL;
  946. local_irq_restore(flags);
  947. }
  948. #else
  949. static inline void debug_objects_selftest(void) { }
  950. #endif
  951. /*
  952. * Called during early boot to initialize the hash buckets and link
  953. * the static object pool objects into the poll list. After this call
  954. * the object tracker is fully operational.
  955. */
  956. void __init debug_objects_early_init(void)
  957. {
  958. int i;
  959. for (i = 0; i < ODEBUG_HASH_SIZE; i++)
  960. raw_spin_lock_init(&obj_hash[i].lock);
  961. for (i = 0; i < ODEBUG_POOL_SIZE; i++)
  962. hlist_add_head(&obj_static_pool[i].node, &obj_pool);
  963. }
  964. /*
  965. * Convert the statically allocated objects to dynamic ones:
  966. */
  967. static int __init debug_objects_replace_static_objects(void)
  968. {
  969. struct debug_bucket *db = obj_hash;
  970. struct hlist_node *tmp;
  971. struct debug_obj *obj, *new;
  972. HLIST_HEAD(objects);
  973. int i, cnt = 0;
  974. for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
  975. obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
  976. if (!obj)
  977. goto free;
  978. kmemleak_ignore(obj);
  979. hlist_add_head(&obj->node, &objects);
  980. }
  981. /*
  982. * When debug_objects_mem_init() is called we know that only
  983. * one CPU is up, so disabling interrupts is enough
  984. * protection. This avoids the lockdep hell of lock ordering.
  985. */
  986. local_irq_disable();
  987. /* Remove the statically allocated objects from the pool */
  988. hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
  989. hlist_del(&obj->node);
  990. /* Move the allocated objects to the pool */
  991. hlist_move_list(&objects, &obj_pool);
  992. /* Replace the active object references */
  993. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  994. hlist_move_list(&db->list, &objects);
  995. hlist_for_each_entry(obj, &objects, node) {
  996. new = hlist_entry(obj_pool.first, typeof(*obj), node);
  997. hlist_del(&new->node);
  998. /* copy object data */
  999. *new = *obj;
  1000. hlist_add_head(&new->node, &db->list);
  1001. cnt++;
  1002. }
  1003. }
  1004. local_irq_enable();
  1005. pr_debug("%d of %d active objects replaced\n",
  1006. cnt, obj_pool_used);
  1007. return 0;
  1008. free:
  1009. hlist_for_each_entry_safe(obj, tmp, &objects, node) {
  1010. hlist_del(&obj->node);
  1011. kmem_cache_free(obj_cache, obj);
  1012. }
  1013. return -ENOMEM;
  1014. }
  1015. /*
  1016. * Called after the kmem_caches are functional to setup a dedicated
  1017. * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
  1018. * prevents that the debug code is called on kmem_cache_free() for the
  1019. * debug tracker objects to avoid recursive calls.
  1020. */
  1021. void __init debug_objects_mem_init(void)
  1022. {
  1023. if (!debug_objects_enabled)
  1024. return;
  1025. obj_cache = kmem_cache_create("debug_objects_cache",
  1026. sizeof (struct debug_obj), 0,
  1027. SLAB_DEBUG_OBJECTS, NULL);
  1028. if (!obj_cache || debug_objects_replace_static_objects()) {
  1029. debug_objects_enabled = 0;
  1030. if (obj_cache)
  1031. kmem_cache_destroy(obj_cache);
  1032. pr_warn("out of memory.\n");
  1033. } else
  1034. debug_objects_selftest();
  1035. /*
  1036. * Increase the thresholds for allocating and freeing objects
  1037. * according to the number of possible CPUs available in the system.
  1038. */
  1039. debug_objects_pool_size += num_possible_cpus() * 32;
  1040. debug_objects_pool_min_level += num_possible_cpus() * 4;
  1041. }