btrfs.h 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237
  1. #undef TRACE_SYSTEM
  2. #define TRACE_SYSTEM btrfs
  3. #if !defined(_TRACE_BTRFS_H) || defined(TRACE_HEADER_MULTI_READ)
  4. #define _TRACE_BTRFS_H
  5. #include <linux/writeback.h>
  6. #include <linux/tracepoint.h>
  7. #include <trace/events/mmflags.h>
  8. struct btrfs_root;
  9. struct btrfs_fs_info;
  10. struct btrfs_inode;
  11. struct extent_map;
  12. struct btrfs_ordered_extent;
  13. struct btrfs_delayed_ref_node;
  14. struct btrfs_delayed_tree_ref;
  15. struct btrfs_delayed_data_ref;
  16. struct btrfs_delayed_ref_head;
  17. struct btrfs_block_group_cache;
  18. struct btrfs_free_cluster;
  19. struct map_lookup;
  20. struct extent_buffer;
  21. struct btrfs_work;
  22. struct __btrfs_workqueue;
  23. struct btrfs_qgroup_operation;
  24. #define show_ref_type(type) \
  25. __print_symbolic(type, \
  26. { BTRFS_TREE_BLOCK_REF_KEY, "TREE_BLOCK_REF" }, \
  27. { BTRFS_EXTENT_DATA_REF_KEY, "EXTENT_DATA_REF" }, \
  28. { BTRFS_EXTENT_REF_V0_KEY, "EXTENT_REF_V0" }, \
  29. { BTRFS_SHARED_BLOCK_REF_KEY, "SHARED_BLOCK_REF" }, \
  30. { BTRFS_SHARED_DATA_REF_KEY, "SHARED_DATA_REF" })
  31. #define __show_root_type(obj) \
  32. __print_symbolic_u64(obj, \
  33. { BTRFS_ROOT_TREE_OBJECTID, "ROOT_TREE" }, \
  34. { BTRFS_EXTENT_TREE_OBJECTID, "EXTENT_TREE" }, \
  35. { BTRFS_CHUNK_TREE_OBJECTID, "CHUNK_TREE" }, \
  36. { BTRFS_DEV_TREE_OBJECTID, "DEV_TREE" }, \
  37. { BTRFS_FS_TREE_OBJECTID, "FS_TREE" }, \
  38. { BTRFS_ROOT_TREE_DIR_OBJECTID, "ROOT_TREE_DIR" }, \
  39. { BTRFS_CSUM_TREE_OBJECTID, "CSUM_TREE" }, \
  40. { BTRFS_TREE_LOG_OBJECTID, "TREE_LOG" }, \
  41. { BTRFS_QUOTA_TREE_OBJECTID, "QUOTA_TREE" }, \
  42. { BTRFS_TREE_RELOC_OBJECTID, "TREE_RELOC" }, \
  43. { BTRFS_UUID_TREE_OBJECTID, "UUID_TREE" }, \
  44. { BTRFS_FREE_SPACE_TREE_OBJECTID, "FREE_SPACE_TREE" }, \
  45. { BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" })
  46. #define show_root_type(obj) \
  47. obj, ((obj >= BTRFS_DATA_RELOC_TREE_OBJECTID) || \
  48. (obj >= BTRFS_ROOT_TREE_OBJECTID && \
  49. obj <= BTRFS_QUOTA_TREE_OBJECTID)) ? __show_root_type(obj) : "-"
  50. #define BTRFS_GROUP_FLAGS \
  51. { BTRFS_BLOCK_GROUP_DATA, "DATA"}, \
  52. { BTRFS_BLOCK_GROUP_SYSTEM, "SYSTEM"}, \
  53. { BTRFS_BLOCK_GROUP_METADATA, "METADATA"}, \
  54. { BTRFS_BLOCK_GROUP_RAID0, "RAID0"}, \
  55. { BTRFS_BLOCK_GROUP_RAID1, "RAID1"}, \
  56. { BTRFS_BLOCK_GROUP_DUP, "DUP"}, \
  57. { BTRFS_BLOCK_GROUP_RAID10, "RAID10"}, \
  58. { BTRFS_BLOCK_GROUP_RAID5, "RAID5"}, \
  59. { BTRFS_BLOCK_GROUP_RAID6, "RAID6"}
  60. #define BTRFS_UUID_SIZE 16
  61. TRACE_EVENT(btrfs_transaction_commit,
  62. TP_PROTO(struct btrfs_root *root),
  63. TP_ARGS(root),
  64. TP_STRUCT__entry(
  65. __field( u64, generation )
  66. __field( u64, root_objectid )
  67. ),
  68. TP_fast_assign(
  69. __entry->generation = root->fs_info->generation;
  70. __entry->root_objectid = root->root_key.objectid;
  71. ),
  72. TP_printk("root = %llu(%s), gen = %llu",
  73. show_root_type(__entry->root_objectid),
  74. (unsigned long long)__entry->generation)
  75. );
  76. DECLARE_EVENT_CLASS(btrfs__inode,
  77. TP_PROTO(struct inode *inode),
  78. TP_ARGS(inode),
  79. TP_STRUCT__entry(
  80. __field( ino_t, ino )
  81. __field( blkcnt_t, blocks )
  82. __field( u64, disk_i_size )
  83. __field( u64, generation )
  84. __field( u64, last_trans )
  85. __field( u64, logged_trans )
  86. __field( u64, root_objectid )
  87. ),
  88. TP_fast_assign(
  89. __entry->ino = inode->i_ino;
  90. __entry->blocks = inode->i_blocks;
  91. __entry->disk_i_size = BTRFS_I(inode)->disk_i_size;
  92. __entry->generation = BTRFS_I(inode)->generation;
  93. __entry->last_trans = BTRFS_I(inode)->last_trans;
  94. __entry->logged_trans = BTRFS_I(inode)->logged_trans;
  95. __entry->root_objectid =
  96. BTRFS_I(inode)->root->root_key.objectid;
  97. ),
  98. TP_printk("root = %llu(%s), gen = %llu, ino = %lu, blocks = %llu, "
  99. "disk_i_size = %llu, last_trans = %llu, logged_trans = %llu",
  100. show_root_type(__entry->root_objectid),
  101. (unsigned long long)__entry->generation,
  102. (unsigned long)__entry->ino,
  103. (unsigned long long)__entry->blocks,
  104. (unsigned long long)__entry->disk_i_size,
  105. (unsigned long long)__entry->last_trans,
  106. (unsigned long long)__entry->logged_trans)
  107. );
  108. DEFINE_EVENT(btrfs__inode, btrfs_inode_new,
  109. TP_PROTO(struct inode *inode),
  110. TP_ARGS(inode)
  111. );
  112. DEFINE_EVENT(btrfs__inode, btrfs_inode_request,
  113. TP_PROTO(struct inode *inode),
  114. TP_ARGS(inode)
  115. );
  116. DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
  117. TP_PROTO(struct inode *inode),
  118. TP_ARGS(inode)
  119. );
  120. #define __show_map_type(type) \
  121. __print_symbolic_u64(type, \
  122. { EXTENT_MAP_LAST_BYTE, "LAST_BYTE" }, \
  123. { EXTENT_MAP_HOLE, "HOLE" }, \
  124. { EXTENT_MAP_INLINE, "INLINE" }, \
  125. { EXTENT_MAP_DELALLOC, "DELALLOC" })
  126. #define show_map_type(type) \
  127. type, (type >= EXTENT_MAP_LAST_BYTE) ? "-" : __show_map_type(type)
  128. #define show_map_flags(flag) \
  129. __print_flags(flag, "|", \
  130. { (1 << EXTENT_FLAG_PINNED), "PINNED" },\
  131. { (1 << EXTENT_FLAG_COMPRESSED), "COMPRESSED" },\
  132. { (1 << EXTENT_FLAG_VACANCY), "VACANCY" },\
  133. { (1 << EXTENT_FLAG_PREALLOC), "PREALLOC" },\
  134. { (1 << EXTENT_FLAG_LOGGING), "LOGGING" },\
  135. { (1 << EXTENT_FLAG_FILLING), "FILLING" },\
  136. { (1 << EXTENT_FLAG_FS_MAPPING), "FS_MAPPING" })
  137. TRACE_EVENT_CONDITION(btrfs_get_extent,
  138. TP_PROTO(struct btrfs_root *root, struct extent_map *map),
  139. TP_ARGS(root, map),
  140. TP_CONDITION(map),
  141. TP_STRUCT__entry(
  142. __field( u64, root_objectid )
  143. __field( u64, start )
  144. __field( u64, len )
  145. __field( u64, orig_start )
  146. __field( u64, block_start )
  147. __field( u64, block_len )
  148. __field( unsigned long, flags )
  149. __field( int, refs )
  150. __field( unsigned int, compress_type )
  151. ),
  152. TP_fast_assign(
  153. __entry->root_objectid = root->root_key.objectid;
  154. __entry->start = map->start;
  155. __entry->len = map->len;
  156. __entry->orig_start = map->orig_start;
  157. __entry->block_start = map->block_start;
  158. __entry->block_len = map->block_len;
  159. __entry->flags = map->flags;
  160. __entry->refs = atomic_read(&map->refs);
  161. __entry->compress_type = map->compress_type;
  162. ),
  163. TP_printk("root = %llu(%s), start = %llu, len = %llu, "
  164. "orig_start = %llu, block_start = %llu(%s), "
  165. "block_len = %llu, flags = %s, refs = %u, "
  166. "compress_type = %u",
  167. show_root_type(__entry->root_objectid),
  168. (unsigned long long)__entry->start,
  169. (unsigned long long)__entry->len,
  170. (unsigned long long)__entry->orig_start,
  171. show_map_type(__entry->block_start),
  172. (unsigned long long)__entry->block_len,
  173. show_map_flags(__entry->flags),
  174. __entry->refs, __entry->compress_type)
  175. );
  176. #define show_ordered_flags(flags) \
  177. __print_flags(flags, "|", \
  178. { (1 << BTRFS_ORDERED_IO_DONE), "IO_DONE" }, \
  179. { (1 << BTRFS_ORDERED_COMPLETE), "COMPLETE" }, \
  180. { (1 << BTRFS_ORDERED_NOCOW), "NOCOW" }, \
  181. { (1 << BTRFS_ORDERED_COMPRESSED), "COMPRESSED" }, \
  182. { (1 << BTRFS_ORDERED_PREALLOC), "PREALLOC" }, \
  183. { (1 << BTRFS_ORDERED_DIRECT), "DIRECT" }, \
  184. { (1 << BTRFS_ORDERED_IOERR), "IOERR" }, \
  185. { (1 << BTRFS_ORDERED_UPDATED_ISIZE), "UPDATED_ISIZE" }, \
  186. { (1 << BTRFS_ORDERED_LOGGED_CSUM), "LOGGED_CSUM" }, \
  187. { (1 << BTRFS_ORDERED_TRUNCATED), "TRUNCATED" })
  188. DECLARE_EVENT_CLASS(btrfs__ordered_extent,
  189. TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
  190. TP_ARGS(inode, ordered),
  191. TP_STRUCT__entry(
  192. __field( ino_t, ino )
  193. __field( u64, file_offset )
  194. __field( u64, start )
  195. __field( u64, len )
  196. __field( u64, disk_len )
  197. __field( u64, bytes_left )
  198. __field( unsigned long, flags )
  199. __field( int, compress_type )
  200. __field( int, refs )
  201. __field( u64, root_objectid )
  202. ),
  203. TP_fast_assign(
  204. __entry->ino = inode->i_ino;
  205. __entry->file_offset = ordered->file_offset;
  206. __entry->start = ordered->start;
  207. __entry->len = ordered->len;
  208. __entry->disk_len = ordered->disk_len;
  209. __entry->bytes_left = ordered->bytes_left;
  210. __entry->flags = ordered->flags;
  211. __entry->compress_type = ordered->compress_type;
  212. __entry->refs = atomic_read(&ordered->refs);
  213. __entry->root_objectid =
  214. BTRFS_I(inode)->root->root_key.objectid;
  215. ),
  216. TP_printk("root = %llu(%s), ino = %llu, file_offset = %llu, "
  217. "start = %llu, len = %llu, disk_len = %llu, "
  218. "bytes_left = %llu, flags = %s, compress_type = %d, "
  219. "refs = %d",
  220. show_root_type(__entry->root_objectid),
  221. (unsigned long long)__entry->ino,
  222. (unsigned long long)__entry->file_offset,
  223. (unsigned long long)__entry->start,
  224. (unsigned long long)__entry->len,
  225. (unsigned long long)__entry->disk_len,
  226. (unsigned long long)__entry->bytes_left,
  227. show_ordered_flags(__entry->flags),
  228. __entry->compress_type, __entry->refs)
  229. );
  230. DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_add,
  231. TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
  232. TP_ARGS(inode, ordered)
  233. );
  234. DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_remove,
  235. TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
  236. TP_ARGS(inode, ordered)
  237. );
  238. DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_start,
  239. TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
  240. TP_ARGS(inode, ordered)
  241. );
  242. DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_put,
  243. TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
  244. TP_ARGS(inode, ordered)
  245. );
  246. DECLARE_EVENT_CLASS(btrfs__writepage,
  247. TP_PROTO(struct page *page, struct inode *inode,
  248. struct writeback_control *wbc),
  249. TP_ARGS(page, inode, wbc),
  250. TP_STRUCT__entry(
  251. __field( ino_t, ino )
  252. __field( pgoff_t, index )
  253. __field( long, nr_to_write )
  254. __field( long, pages_skipped )
  255. __field( loff_t, range_start )
  256. __field( loff_t, range_end )
  257. __field( char, for_kupdate )
  258. __field( char, for_reclaim )
  259. __field( char, range_cyclic )
  260. __field( pgoff_t, writeback_index )
  261. __field( u64, root_objectid )
  262. ),
  263. TP_fast_assign(
  264. __entry->ino = inode->i_ino;
  265. __entry->index = page->index;
  266. __entry->nr_to_write = wbc->nr_to_write;
  267. __entry->pages_skipped = wbc->pages_skipped;
  268. __entry->range_start = wbc->range_start;
  269. __entry->range_end = wbc->range_end;
  270. __entry->for_kupdate = wbc->for_kupdate;
  271. __entry->for_reclaim = wbc->for_reclaim;
  272. __entry->range_cyclic = wbc->range_cyclic;
  273. __entry->writeback_index = inode->i_mapping->writeback_index;
  274. __entry->root_objectid =
  275. BTRFS_I(inode)->root->root_key.objectid;
  276. ),
  277. TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, "
  278. "nr_to_write = %ld, pages_skipped = %ld, range_start = %llu, "
  279. "range_end = %llu, for_kupdate = %d, "
  280. "for_reclaim = %d, range_cyclic = %d, writeback_index = %lu",
  281. show_root_type(__entry->root_objectid),
  282. (unsigned long)__entry->ino, __entry->index,
  283. __entry->nr_to_write, __entry->pages_skipped,
  284. __entry->range_start, __entry->range_end,
  285. __entry->for_kupdate,
  286. __entry->for_reclaim, __entry->range_cyclic,
  287. (unsigned long)__entry->writeback_index)
  288. );
  289. DEFINE_EVENT(btrfs__writepage, __extent_writepage,
  290. TP_PROTO(struct page *page, struct inode *inode,
  291. struct writeback_control *wbc),
  292. TP_ARGS(page, inode, wbc)
  293. );
  294. TRACE_EVENT(btrfs_writepage_end_io_hook,
  295. TP_PROTO(struct page *page, u64 start, u64 end, int uptodate),
  296. TP_ARGS(page, start, end, uptodate),
  297. TP_STRUCT__entry(
  298. __field( ino_t, ino )
  299. __field( pgoff_t, index )
  300. __field( u64, start )
  301. __field( u64, end )
  302. __field( int, uptodate )
  303. __field( u64, root_objectid )
  304. ),
  305. TP_fast_assign(
  306. __entry->ino = page->mapping->host->i_ino;
  307. __entry->index = page->index;
  308. __entry->start = start;
  309. __entry->end = end;
  310. __entry->uptodate = uptodate;
  311. __entry->root_objectid =
  312. BTRFS_I(page->mapping->host)->root->root_key.objectid;
  313. ),
  314. TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, start = %llu, "
  315. "end = %llu, uptodate = %d",
  316. show_root_type(__entry->root_objectid),
  317. (unsigned long)__entry->ino, (unsigned long)__entry->index,
  318. (unsigned long long)__entry->start,
  319. (unsigned long long)__entry->end, __entry->uptodate)
  320. );
  321. TRACE_EVENT(btrfs_sync_file,
  322. TP_PROTO(struct file *file, int datasync),
  323. TP_ARGS(file, datasync),
  324. TP_STRUCT__entry(
  325. __field( ino_t, ino )
  326. __field( ino_t, parent )
  327. __field( int, datasync )
  328. __field( u64, root_objectid )
  329. ),
  330. TP_fast_assign(
  331. struct dentry *dentry = file->f_path.dentry;
  332. struct inode *inode = d_inode(dentry);
  333. __entry->ino = inode->i_ino;
  334. __entry->parent = d_inode(dentry->d_parent)->i_ino;
  335. __entry->datasync = datasync;
  336. __entry->root_objectid =
  337. BTRFS_I(inode)->root->root_key.objectid;
  338. ),
  339. TP_printk("root = %llu(%s), ino = %ld, parent = %ld, datasync = %d",
  340. show_root_type(__entry->root_objectid),
  341. (unsigned long)__entry->ino, (unsigned long)__entry->parent,
  342. __entry->datasync)
  343. );
  344. TRACE_EVENT(btrfs_sync_fs,
  345. TP_PROTO(int wait),
  346. TP_ARGS(wait),
  347. TP_STRUCT__entry(
  348. __field( int, wait )
  349. ),
  350. TP_fast_assign(
  351. __entry->wait = wait;
  352. ),
  353. TP_printk("wait = %d", __entry->wait)
  354. );
  355. #define show_ref_action(action) \
  356. __print_symbolic(action, \
  357. { BTRFS_ADD_DELAYED_REF, "ADD_DELAYED_REF" }, \
  358. { BTRFS_DROP_DELAYED_REF, "DROP_DELAYED_REF" }, \
  359. { BTRFS_ADD_DELAYED_EXTENT, "ADD_DELAYED_EXTENT" }, \
  360. { BTRFS_UPDATE_DELAYED_HEAD, "UPDATE_DELAYED_HEAD" })
  361. DECLARE_EVENT_CLASS(btrfs_delayed_tree_ref,
  362. TP_PROTO(struct btrfs_delayed_ref_node *ref,
  363. struct btrfs_delayed_tree_ref *full_ref,
  364. int action),
  365. TP_ARGS(ref, full_ref, action),
  366. TP_STRUCT__entry(
  367. __field( u64, bytenr )
  368. __field( u64, num_bytes )
  369. __field( int, action )
  370. __field( u64, parent )
  371. __field( u64, ref_root )
  372. __field( int, level )
  373. __field( int, type )
  374. __field( u64, seq )
  375. ),
  376. TP_fast_assign(
  377. __entry->bytenr = ref->bytenr;
  378. __entry->num_bytes = ref->num_bytes;
  379. __entry->action = action;
  380. __entry->parent = full_ref->parent;
  381. __entry->ref_root = full_ref->root;
  382. __entry->level = full_ref->level;
  383. __entry->type = ref->type;
  384. __entry->seq = ref->seq;
  385. ),
  386. TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, "
  387. "parent = %llu(%s), ref_root = %llu(%s), level = %d, "
  388. "type = %s, seq = %llu",
  389. (unsigned long long)__entry->bytenr,
  390. (unsigned long long)__entry->num_bytes,
  391. show_ref_action(__entry->action),
  392. show_root_type(__entry->parent),
  393. show_root_type(__entry->ref_root),
  394. __entry->level, show_ref_type(__entry->type),
  395. (unsigned long long)__entry->seq)
  396. );
  397. DEFINE_EVENT(btrfs_delayed_tree_ref, add_delayed_tree_ref,
  398. TP_PROTO(struct btrfs_delayed_ref_node *ref,
  399. struct btrfs_delayed_tree_ref *full_ref,
  400. int action),
  401. TP_ARGS(ref, full_ref, action)
  402. );
  403. DEFINE_EVENT(btrfs_delayed_tree_ref, run_delayed_tree_ref,
  404. TP_PROTO(struct btrfs_delayed_ref_node *ref,
  405. struct btrfs_delayed_tree_ref *full_ref,
  406. int action),
  407. TP_ARGS(ref, full_ref, action)
  408. );
  409. DECLARE_EVENT_CLASS(btrfs_delayed_data_ref,
  410. TP_PROTO(struct btrfs_delayed_ref_node *ref,
  411. struct btrfs_delayed_data_ref *full_ref,
  412. int action),
  413. TP_ARGS(ref, full_ref, action),
  414. TP_STRUCT__entry(
  415. __field( u64, bytenr )
  416. __field( u64, num_bytes )
  417. __field( int, action )
  418. __field( u64, parent )
  419. __field( u64, ref_root )
  420. __field( u64, owner )
  421. __field( u64, offset )
  422. __field( int, type )
  423. __field( u64, seq )
  424. ),
  425. TP_fast_assign(
  426. __entry->bytenr = ref->bytenr;
  427. __entry->num_bytes = ref->num_bytes;
  428. __entry->action = action;
  429. __entry->parent = full_ref->parent;
  430. __entry->ref_root = full_ref->root;
  431. __entry->owner = full_ref->objectid;
  432. __entry->offset = full_ref->offset;
  433. __entry->type = ref->type;
  434. __entry->seq = ref->seq;
  435. ),
  436. TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, "
  437. "parent = %llu(%s), ref_root = %llu(%s), owner = %llu, "
  438. "offset = %llu, type = %s, seq = %llu",
  439. (unsigned long long)__entry->bytenr,
  440. (unsigned long long)__entry->num_bytes,
  441. show_ref_action(__entry->action),
  442. show_root_type(__entry->parent),
  443. show_root_type(__entry->ref_root),
  444. (unsigned long long)__entry->owner,
  445. (unsigned long long)__entry->offset,
  446. show_ref_type(__entry->type),
  447. (unsigned long long)__entry->seq)
  448. );
  449. DEFINE_EVENT(btrfs_delayed_data_ref, add_delayed_data_ref,
  450. TP_PROTO(struct btrfs_delayed_ref_node *ref,
  451. struct btrfs_delayed_data_ref *full_ref,
  452. int action),
  453. TP_ARGS(ref, full_ref, action)
  454. );
  455. DEFINE_EVENT(btrfs_delayed_data_ref, run_delayed_data_ref,
  456. TP_PROTO(struct btrfs_delayed_ref_node *ref,
  457. struct btrfs_delayed_data_ref *full_ref,
  458. int action),
  459. TP_ARGS(ref, full_ref, action)
  460. );
  461. DECLARE_EVENT_CLASS(btrfs_delayed_ref_head,
  462. TP_PROTO(struct btrfs_delayed_ref_node *ref,
  463. struct btrfs_delayed_ref_head *head_ref,
  464. int action),
  465. TP_ARGS(ref, head_ref, action),
  466. TP_STRUCT__entry(
  467. __field( u64, bytenr )
  468. __field( u64, num_bytes )
  469. __field( int, action )
  470. __field( int, is_data )
  471. ),
  472. TP_fast_assign(
  473. __entry->bytenr = ref->bytenr;
  474. __entry->num_bytes = ref->num_bytes;
  475. __entry->action = action;
  476. __entry->is_data = head_ref->is_data;
  477. ),
  478. TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, is_data = %d",
  479. (unsigned long long)__entry->bytenr,
  480. (unsigned long long)__entry->num_bytes,
  481. show_ref_action(__entry->action),
  482. __entry->is_data)
  483. );
  484. DEFINE_EVENT(btrfs_delayed_ref_head, add_delayed_ref_head,
  485. TP_PROTO(struct btrfs_delayed_ref_node *ref,
  486. struct btrfs_delayed_ref_head *head_ref,
  487. int action),
  488. TP_ARGS(ref, head_ref, action)
  489. );
  490. DEFINE_EVENT(btrfs_delayed_ref_head, run_delayed_ref_head,
  491. TP_PROTO(struct btrfs_delayed_ref_node *ref,
  492. struct btrfs_delayed_ref_head *head_ref,
  493. int action),
  494. TP_ARGS(ref, head_ref, action)
  495. );
  496. #define show_chunk_type(type) \
  497. __print_flags(type, "|", \
  498. { BTRFS_BLOCK_GROUP_DATA, "DATA" }, \
  499. { BTRFS_BLOCK_GROUP_SYSTEM, "SYSTEM"}, \
  500. { BTRFS_BLOCK_GROUP_METADATA, "METADATA"}, \
  501. { BTRFS_BLOCK_GROUP_RAID0, "RAID0" }, \
  502. { BTRFS_BLOCK_GROUP_RAID1, "RAID1" }, \
  503. { BTRFS_BLOCK_GROUP_DUP, "DUP" }, \
  504. { BTRFS_BLOCK_GROUP_RAID10, "RAID10"}, \
  505. { BTRFS_BLOCK_GROUP_RAID5, "RAID5" }, \
  506. { BTRFS_BLOCK_GROUP_RAID6, "RAID6" })
  507. DECLARE_EVENT_CLASS(btrfs__chunk,
  508. TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
  509. u64 offset, u64 size),
  510. TP_ARGS(root, map, offset, size),
  511. TP_STRUCT__entry(
  512. __field( int, num_stripes )
  513. __field( u64, type )
  514. __field( int, sub_stripes )
  515. __field( u64, offset )
  516. __field( u64, size )
  517. __field( u64, root_objectid )
  518. ),
  519. TP_fast_assign(
  520. __entry->num_stripes = map->num_stripes;
  521. __entry->type = map->type;
  522. __entry->sub_stripes = map->sub_stripes;
  523. __entry->offset = offset;
  524. __entry->size = size;
  525. __entry->root_objectid = root->root_key.objectid;
  526. ),
  527. TP_printk("root = %llu(%s), offset = %llu, size = %llu, "
  528. "num_stripes = %d, sub_stripes = %d, type = %s",
  529. show_root_type(__entry->root_objectid),
  530. (unsigned long long)__entry->offset,
  531. (unsigned long long)__entry->size,
  532. __entry->num_stripes, __entry->sub_stripes,
  533. show_chunk_type(__entry->type))
  534. );
  535. DEFINE_EVENT(btrfs__chunk, btrfs_chunk_alloc,
  536. TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
  537. u64 offset, u64 size),
  538. TP_ARGS(root, map, offset, size)
  539. );
  540. DEFINE_EVENT(btrfs__chunk, btrfs_chunk_free,
  541. TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
  542. u64 offset, u64 size),
  543. TP_ARGS(root, map, offset, size)
  544. );
  545. TRACE_EVENT(btrfs_cow_block,
  546. TP_PROTO(struct btrfs_root *root, struct extent_buffer *buf,
  547. struct extent_buffer *cow),
  548. TP_ARGS(root, buf, cow),
  549. TP_STRUCT__entry(
  550. __field( u64, root_objectid )
  551. __field( u64, buf_start )
  552. __field( int, refs )
  553. __field( u64, cow_start )
  554. __field( int, buf_level )
  555. __field( int, cow_level )
  556. ),
  557. TP_fast_assign(
  558. __entry->root_objectid = root->root_key.objectid;
  559. __entry->buf_start = buf->start;
  560. __entry->refs = atomic_read(&buf->refs);
  561. __entry->cow_start = cow->start;
  562. __entry->buf_level = btrfs_header_level(buf);
  563. __entry->cow_level = btrfs_header_level(cow);
  564. ),
  565. TP_printk("root = %llu(%s), refs = %d, orig_buf = %llu "
  566. "(orig_level = %d), cow_buf = %llu (cow_level = %d)",
  567. show_root_type(__entry->root_objectid),
  568. __entry->refs,
  569. (unsigned long long)__entry->buf_start,
  570. __entry->buf_level,
  571. (unsigned long long)__entry->cow_start,
  572. __entry->cow_level)
  573. );
  574. TRACE_EVENT(btrfs_space_reservation,
  575. TP_PROTO(struct btrfs_fs_info *fs_info, char *type, u64 val,
  576. u64 bytes, int reserve),
  577. TP_ARGS(fs_info, type, val, bytes, reserve),
  578. TP_STRUCT__entry(
  579. __array( u8, fsid, BTRFS_UUID_SIZE )
  580. __string( type, type )
  581. __field( u64, val )
  582. __field( u64, bytes )
  583. __field( int, reserve )
  584. ),
  585. TP_fast_assign(
  586. memcpy(__entry->fsid, fs_info->fsid, BTRFS_UUID_SIZE);
  587. __assign_str(type, type);
  588. __entry->val = val;
  589. __entry->bytes = bytes;
  590. __entry->reserve = reserve;
  591. ),
  592. TP_printk("%pU: %s: %Lu %s %Lu", __entry->fsid, __get_str(type),
  593. __entry->val, __entry->reserve ? "reserve" : "release",
  594. __entry->bytes)
  595. );
  596. DECLARE_EVENT_CLASS(btrfs__reserved_extent,
  597. TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
  598. TP_ARGS(root, start, len),
  599. TP_STRUCT__entry(
  600. __field( u64, root_objectid )
  601. __field( u64, start )
  602. __field( u64, len )
  603. ),
  604. TP_fast_assign(
  605. __entry->root_objectid = root->root_key.objectid;
  606. __entry->start = start;
  607. __entry->len = len;
  608. ),
  609. TP_printk("root = %llu(%s), start = %llu, len = %llu",
  610. show_root_type(__entry->root_objectid),
  611. (unsigned long long)__entry->start,
  612. (unsigned long long)__entry->len)
  613. );
  614. DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_alloc,
  615. TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
  616. TP_ARGS(root, start, len)
  617. );
  618. DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_free,
  619. TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
  620. TP_ARGS(root, start, len)
  621. );
  622. TRACE_EVENT(find_free_extent,
  623. TP_PROTO(struct btrfs_root *root, u64 num_bytes, u64 empty_size,
  624. u64 data),
  625. TP_ARGS(root, num_bytes, empty_size, data),
  626. TP_STRUCT__entry(
  627. __field( u64, root_objectid )
  628. __field( u64, num_bytes )
  629. __field( u64, empty_size )
  630. __field( u64, data )
  631. ),
  632. TP_fast_assign(
  633. __entry->root_objectid = root->root_key.objectid;
  634. __entry->num_bytes = num_bytes;
  635. __entry->empty_size = empty_size;
  636. __entry->data = data;
  637. ),
  638. TP_printk("root = %Lu(%s), len = %Lu, empty_size = %Lu, "
  639. "flags = %Lu(%s)", show_root_type(__entry->root_objectid),
  640. __entry->num_bytes, __entry->empty_size, __entry->data,
  641. __print_flags((unsigned long)__entry->data, "|",
  642. BTRFS_GROUP_FLAGS))
  643. );
  644. DECLARE_EVENT_CLASS(btrfs__reserve_extent,
  645. TP_PROTO(struct btrfs_root *root,
  646. struct btrfs_block_group_cache *block_group, u64 start,
  647. u64 len),
  648. TP_ARGS(root, block_group, start, len),
  649. TP_STRUCT__entry(
  650. __field( u64, root_objectid )
  651. __field( u64, bg_objectid )
  652. __field( u64, flags )
  653. __field( u64, start )
  654. __field( u64, len )
  655. ),
  656. TP_fast_assign(
  657. __entry->root_objectid = root->root_key.objectid;
  658. __entry->bg_objectid = block_group->key.objectid;
  659. __entry->flags = block_group->flags;
  660. __entry->start = start;
  661. __entry->len = len;
  662. ),
  663. TP_printk("root = %Lu(%s), block_group = %Lu, flags = %Lu(%s), "
  664. "start = %Lu, len = %Lu",
  665. show_root_type(__entry->root_objectid), __entry->bg_objectid,
  666. __entry->flags, __print_flags((unsigned long)__entry->flags,
  667. "|", BTRFS_GROUP_FLAGS),
  668. __entry->start, __entry->len)
  669. );
  670. DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent,
  671. TP_PROTO(struct btrfs_root *root,
  672. struct btrfs_block_group_cache *block_group, u64 start,
  673. u64 len),
  674. TP_ARGS(root, block_group, start, len)
  675. );
  676. DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster,
  677. TP_PROTO(struct btrfs_root *root,
  678. struct btrfs_block_group_cache *block_group, u64 start,
  679. u64 len),
  680. TP_ARGS(root, block_group, start, len)
  681. );
  682. TRACE_EVENT(btrfs_find_cluster,
  683. TP_PROTO(struct btrfs_block_group_cache *block_group, u64 start,
  684. u64 bytes, u64 empty_size, u64 min_bytes),
  685. TP_ARGS(block_group, start, bytes, empty_size, min_bytes),
  686. TP_STRUCT__entry(
  687. __field( u64, bg_objectid )
  688. __field( u64, flags )
  689. __field( u64, start )
  690. __field( u64, bytes )
  691. __field( u64, empty_size )
  692. __field( u64, min_bytes )
  693. ),
  694. TP_fast_assign(
  695. __entry->bg_objectid = block_group->key.objectid;
  696. __entry->flags = block_group->flags;
  697. __entry->start = start;
  698. __entry->bytes = bytes;
  699. __entry->empty_size = empty_size;
  700. __entry->min_bytes = min_bytes;
  701. ),
  702. TP_printk("block_group = %Lu, flags = %Lu(%s), start = %Lu, len = %Lu,"
  703. " empty_size = %Lu, min_bytes = %Lu", __entry->bg_objectid,
  704. __entry->flags,
  705. __print_flags((unsigned long)__entry->flags, "|",
  706. BTRFS_GROUP_FLAGS), __entry->start,
  707. __entry->bytes, __entry->empty_size, __entry->min_bytes)
  708. );
  709. TRACE_EVENT(btrfs_failed_cluster_setup,
  710. TP_PROTO(struct btrfs_block_group_cache *block_group),
  711. TP_ARGS(block_group),
  712. TP_STRUCT__entry(
  713. __field( u64, bg_objectid )
  714. ),
  715. TP_fast_assign(
  716. __entry->bg_objectid = block_group->key.objectid;
  717. ),
  718. TP_printk("block_group = %Lu", __entry->bg_objectid)
  719. );
  720. TRACE_EVENT(btrfs_setup_cluster,
  721. TP_PROTO(struct btrfs_block_group_cache *block_group,
  722. struct btrfs_free_cluster *cluster, u64 size, int bitmap),
  723. TP_ARGS(block_group, cluster, size, bitmap),
  724. TP_STRUCT__entry(
  725. __field( u64, bg_objectid )
  726. __field( u64, flags )
  727. __field( u64, start )
  728. __field( u64, max_size )
  729. __field( u64, size )
  730. __field( int, bitmap )
  731. ),
  732. TP_fast_assign(
  733. __entry->bg_objectid = block_group->key.objectid;
  734. __entry->flags = block_group->flags;
  735. __entry->start = cluster->window_start;
  736. __entry->max_size = cluster->max_size;
  737. __entry->size = size;
  738. __entry->bitmap = bitmap;
  739. ),
  740. TP_printk("block_group = %Lu, flags = %Lu(%s), window_start = %Lu, "
  741. "size = %Lu, max_size = %Lu, bitmap = %d",
  742. __entry->bg_objectid,
  743. __entry->flags,
  744. __print_flags((unsigned long)__entry->flags, "|",
  745. BTRFS_GROUP_FLAGS), __entry->start,
  746. __entry->size, __entry->max_size, __entry->bitmap)
  747. );
  748. struct extent_state;
  749. TRACE_EVENT(alloc_extent_state,
  750. TP_PROTO(struct extent_state *state, gfp_t mask, unsigned long IP),
  751. TP_ARGS(state, mask, IP),
  752. TP_STRUCT__entry(
  753. __field(struct extent_state *, state)
  754. __field(gfp_t, mask)
  755. __field(unsigned long, ip)
  756. ),
  757. TP_fast_assign(
  758. __entry->state = state,
  759. __entry->mask = mask,
  760. __entry->ip = IP
  761. ),
  762. TP_printk("state=%p; mask = %s; caller = %pS", __entry->state,
  763. show_gfp_flags(__entry->mask), (void *)__entry->ip)
  764. );
  765. TRACE_EVENT(free_extent_state,
  766. TP_PROTO(struct extent_state *state, unsigned long IP),
  767. TP_ARGS(state, IP),
  768. TP_STRUCT__entry(
  769. __field(struct extent_state *, state)
  770. __field(unsigned long, ip)
  771. ),
  772. TP_fast_assign(
  773. __entry->state = state,
  774. __entry->ip = IP
  775. ),
  776. TP_printk(" state=%p; caller = %pS", __entry->state,
  777. (void *)__entry->ip)
  778. );
  779. DECLARE_EVENT_CLASS(btrfs__work,
  780. TP_PROTO(struct btrfs_work *work),
  781. TP_ARGS(work),
  782. TP_STRUCT__entry(
  783. __field( void *, work )
  784. __field( void *, wq )
  785. __field( void *, func )
  786. __field( void *, ordered_func )
  787. __field( void *, ordered_free )
  788. __field( void *, normal_work )
  789. ),
  790. TP_fast_assign(
  791. __entry->work = work;
  792. __entry->wq = work->wq;
  793. __entry->func = work->func;
  794. __entry->ordered_func = work->ordered_func;
  795. __entry->ordered_free = work->ordered_free;
  796. __entry->normal_work = &work->normal_work;
  797. ),
  798. TP_printk("work=%p (normal_work=%p), wq=%p, func=%pf, ordered_func=%p,"
  799. " ordered_free=%p",
  800. __entry->work, __entry->normal_work, __entry->wq,
  801. __entry->func, __entry->ordered_func, __entry->ordered_free)
  802. );
  803. /* For situiations that the work is freed */
  804. DECLARE_EVENT_CLASS(btrfs__work__done,
  805. TP_PROTO(struct btrfs_work *work),
  806. TP_ARGS(work),
  807. TP_STRUCT__entry(
  808. __field( void *, work )
  809. ),
  810. TP_fast_assign(
  811. __entry->work = work;
  812. ),
  813. TP_printk("work->%p", __entry->work)
  814. );
  815. DEFINE_EVENT(btrfs__work, btrfs_work_queued,
  816. TP_PROTO(struct btrfs_work *work),
  817. TP_ARGS(work)
  818. );
  819. DEFINE_EVENT(btrfs__work, btrfs_work_sched,
  820. TP_PROTO(struct btrfs_work *work),
  821. TP_ARGS(work)
  822. );
  823. DEFINE_EVENT(btrfs__work__done, btrfs_all_work_done,
  824. TP_PROTO(struct btrfs_work *work),
  825. TP_ARGS(work)
  826. );
  827. DEFINE_EVENT(btrfs__work, btrfs_ordered_sched,
  828. TP_PROTO(struct btrfs_work *work),
  829. TP_ARGS(work)
  830. );
  831. DECLARE_EVENT_CLASS(btrfs__workqueue,
  832. TP_PROTO(struct __btrfs_workqueue *wq, const char *name, int high),
  833. TP_ARGS(wq, name, high),
  834. TP_STRUCT__entry(
  835. __field( void *, wq )
  836. __string( name, name )
  837. __field( int , high )
  838. ),
  839. TP_fast_assign(
  840. __entry->wq = wq;
  841. __assign_str(name, name);
  842. __entry->high = high;
  843. ),
  844. TP_printk("name=%s%s, wq=%p", __get_str(name),
  845. __print_flags(__entry->high, "",
  846. {(WQ_HIGHPRI), "-high"}),
  847. __entry->wq)
  848. );
  849. DEFINE_EVENT(btrfs__workqueue, btrfs_workqueue_alloc,
  850. TP_PROTO(struct __btrfs_workqueue *wq, const char *name, int high),
  851. TP_ARGS(wq, name, high)
  852. );
  853. DECLARE_EVENT_CLASS(btrfs__workqueue_done,
  854. TP_PROTO(struct __btrfs_workqueue *wq),
  855. TP_ARGS(wq),
  856. TP_STRUCT__entry(
  857. __field( void *, wq )
  858. ),
  859. TP_fast_assign(
  860. __entry->wq = wq;
  861. ),
  862. TP_printk("wq=%p", __entry->wq)
  863. );
  864. DEFINE_EVENT(btrfs__workqueue_done, btrfs_workqueue_destroy,
  865. TP_PROTO(struct __btrfs_workqueue *wq),
  866. TP_ARGS(wq)
  867. );
  868. DECLARE_EVENT_CLASS(btrfs__qgroup_data_map,
  869. TP_PROTO(struct inode *inode, u64 free_reserved),
  870. TP_ARGS(inode, free_reserved),
  871. TP_STRUCT__entry(
  872. __field( u64, rootid )
  873. __field( unsigned long, ino )
  874. __field( u64, free_reserved )
  875. ),
  876. TP_fast_assign(
  877. __entry->rootid = BTRFS_I(inode)->root->objectid;
  878. __entry->ino = inode->i_ino;
  879. __entry->free_reserved = free_reserved;
  880. ),
  881. TP_printk("rootid=%llu, ino=%lu, free_reserved=%llu",
  882. __entry->rootid, __entry->ino, __entry->free_reserved)
  883. );
  884. DEFINE_EVENT(btrfs__qgroup_data_map, btrfs_qgroup_init_data_rsv_map,
  885. TP_PROTO(struct inode *inode, u64 free_reserved),
  886. TP_ARGS(inode, free_reserved)
  887. );
  888. DEFINE_EVENT(btrfs__qgroup_data_map, btrfs_qgroup_free_data_rsv_map,
  889. TP_PROTO(struct inode *inode, u64 free_reserved),
  890. TP_ARGS(inode, free_reserved)
  891. );
  892. #define BTRFS_QGROUP_OPERATIONS \
  893. { QGROUP_RESERVE, "reserve" }, \
  894. { QGROUP_RELEASE, "release" }, \
  895. { QGROUP_FREE, "free" }
  896. DECLARE_EVENT_CLASS(btrfs__qgroup_rsv_data,
  897. TP_PROTO(struct inode *inode, u64 start, u64 len, u64 reserved, int op),
  898. TP_ARGS(inode, start, len, reserved, op),
  899. TP_STRUCT__entry(
  900. __field( u64, rootid )
  901. __field( unsigned long, ino )
  902. __field( u64, start )
  903. __field( u64, len )
  904. __field( u64, reserved )
  905. __field( int, op )
  906. ),
  907. TP_fast_assign(
  908. __entry->rootid = BTRFS_I(inode)->root->objectid;
  909. __entry->ino = inode->i_ino;
  910. __entry->start = start;
  911. __entry->len = len;
  912. __entry->reserved = reserved;
  913. __entry->op = op;
  914. ),
  915. TP_printk("root=%llu, ino=%lu, start=%llu, len=%llu, reserved=%llu, op=%s",
  916. __entry->rootid, __entry->ino, __entry->start, __entry->len,
  917. __entry->reserved,
  918. __print_flags((unsigned long)__entry->op, "",
  919. BTRFS_QGROUP_OPERATIONS)
  920. )
  921. );
  922. DEFINE_EVENT(btrfs__qgroup_rsv_data, btrfs_qgroup_reserve_data,
  923. TP_PROTO(struct inode *inode, u64 start, u64 len, u64 reserved, int op),
  924. TP_ARGS(inode, start, len, reserved, op)
  925. );
  926. DEFINE_EVENT(btrfs__qgroup_rsv_data, btrfs_qgroup_release_data,
  927. TP_PROTO(struct inode *inode, u64 start, u64 len, u64 reserved, int op),
  928. TP_ARGS(inode, start, len, reserved, op)
  929. );
  930. DECLARE_EVENT_CLASS(btrfs__qgroup_delayed_ref,
  931. TP_PROTO(u64 ref_root, u64 reserved),
  932. TP_ARGS(ref_root, reserved),
  933. TP_STRUCT__entry(
  934. __field( u64, ref_root )
  935. __field( u64, reserved )
  936. ),
  937. TP_fast_assign(
  938. __entry->ref_root = ref_root;
  939. __entry->reserved = reserved;
  940. ),
  941. TP_printk("root=%llu, reserved=%llu, op=free",
  942. __entry->ref_root, __entry->reserved)
  943. );
  944. DEFINE_EVENT(btrfs__qgroup_delayed_ref, btrfs_qgroup_free_delayed_ref,
  945. TP_PROTO(u64 ref_root, u64 reserved),
  946. TP_ARGS(ref_root, reserved)
  947. );
  948. #endif /* _TRACE_BTRFS_H */
  949. /* This part must be outside protection */
  950. #include <trace/define_trace.h>