fs_dax.h 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. #undef TRACE_SYSTEM
  2. #define TRACE_SYSTEM fs_dax
  3. #if !defined(_TRACE_FS_DAX_H) || defined(TRACE_HEADER_MULTI_READ)
  4. #define _TRACE_FS_DAX_H
  5. #include <linux/tracepoint.h>
  6. DECLARE_EVENT_CLASS(dax_pmd_fault_class,
  7. TP_PROTO(struct inode *inode, struct vm_fault *vmf,
  8. pgoff_t max_pgoff, int result),
  9. TP_ARGS(inode, vmf, max_pgoff, result),
  10. TP_STRUCT__entry(
  11. __field(unsigned long, ino)
  12. __field(unsigned long, vm_start)
  13. __field(unsigned long, vm_end)
  14. __field(unsigned long, vm_flags)
  15. __field(unsigned long, address)
  16. __field(pgoff_t, pgoff)
  17. __field(pgoff_t, max_pgoff)
  18. __field(dev_t, dev)
  19. __field(unsigned int, flags)
  20. __field(int, result)
  21. ),
  22. TP_fast_assign(
  23. __entry->dev = inode->i_sb->s_dev;
  24. __entry->ino = inode->i_ino;
  25. __entry->vm_start = vmf->vma->vm_start;
  26. __entry->vm_end = vmf->vma->vm_end;
  27. __entry->vm_flags = vmf->vma->vm_flags;
  28. __entry->address = vmf->address;
  29. __entry->flags = vmf->flags;
  30. __entry->pgoff = vmf->pgoff;
  31. __entry->max_pgoff = max_pgoff;
  32. __entry->result = result;
  33. ),
  34. TP_printk("dev %d:%d ino %#lx %s %s address %#lx vm_start "
  35. "%#lx vm_end %#lx pgoff %#lx max_pgoff %#lx %s",
  36. MAJOR(__entry->dev),
  37. MINOR(__entry->dev),
  38. __entry->ino,
  39. __entry->vm_flags & VM_SHARED ? "shared" : "private",
  40. __print_flags(__entry->flags, "|", FAULT_FLAG_TRACE),
  41. __entry->address,
  42. __entry->vm_start,
  43. __entry->vm_end,
  44. __entry->pgoff,
  45. __entry->max_pgoff,
  46. __print_flags(__entry->result, "|", VM_FAULT_RESULT_TRACE)
  47. )
  48. )
  49. #define DEFINE_PMD_FAULT_EVENT(name) \
  50. DEFINE_EVENT(dax_pmd_fault_class, name, \
  51. TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
  52. pgoff_t max_pgoff, int result), \
  53. TP_ARGS(inode, vmf, max_pgoff, result))
  54. DEFINE_PMD_FAULT_EVENT(dax_pmd_fault);
  55. DEFINE_PMD_FAULT_EVENT(dax_pmd_fault_done);
  56. DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
  57. TP_PROTO(struct inode *inode, struct vm_fault *vmf,
  58. struct page *zero_page,
  59. void *radix_entry),
  60. TP_ARGS(inode, vmf, zero_page, radix_entry),
  61. TP_STRUCT__entry(
  62. __field(unsigned long, ino)
  63. __field(unsigned long, vm_flags)
  64. __field(unsigned long, address)
  65. __field(struct page *, zero_page)
  66. __field(void *, radix_entry)
  67. __field(dev_t, dev)
  68. ),
  69. TP_fast_assign(
  70. __entry->dev = inode->i_sb->s_dev;
  71. __entry->ino = inode->i_ino;
  72. __entry->vm_flags = vmf->vma->vm_flags;
  73. __entry->address = vmf->address;
  74. __entry->zero_page = zero_page;
  75. __entry->radix_entry = radix_entry;
  76. ),
  77. TP_printk("dev %d:%d ino %#lx %s address %#lx zero_page %p "
  78. "radix_entry %#lx",
  79. MAJOR(__entry->dev),
  80. MINOR(__entry->dev),
  81. __entry->ino,
  82. __entry->vm_flags & VM_SHARED ? "shared" : "private",
  83. __entry->address,
  84. __entry->zero_page,
  85. (unsigned long)__entry->radix_entry
  86. )
  87. )
  88. #define DEFINE_PMD_LOAD_HOLE_EVENT(name) \
  89. DEFINE_EVENT(dax_pmd_load_hole_class, name, \
  90. TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
  91. struct page *zero_page, void *radix_entry), \
  92. TP_ARGS(inode, vmf, zero_page, radix_entry))
  93. DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole);
  94. DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole_fallback);
  95. DECLARE_EVENT_CLASS(dax_pmd_insert_mapping_class,
  96. TP_PROTO(struct inode *inode, struct vm_fault *vmf,
  97. long length, pfn_t pfn, void *radix_entry),
  98. TP_ARGS(inode, vmf, length, pfn, radix_entry),
  99. TP_STRUCT__entry(
  100. __field(unsigned long, ino)
  101. __field(unsigned long, vm_flags)
  102. __field(unsigned long, address)
  103. __field(long, length)
  104. __field(u64, pfn_val)
  105. __field(void *, radix_entry)
  106. __field(dev_t, dev)
  107. __field(int, write)
  108. ),
  109. TP_fast_assign(
  110. __entry->dev = inode->i_sb->s_dev;
  111. __entry->ino = inode->i_ino;
  112. __entry->vm_flags = vmf->vma->vm_flags;
  113. __entry->address = vmf->address;
  114. __entry->write = vmf->flags & FAULT_FLAG_WRITE;
  115. __entry->length = length;
  116. __entry->pfn_val = pfn.val;
  117. __entry->radix_entry = radix_entry;
  118. ),
  119. TP_printk("dev %d:%d ino %#lx %s %s address %#lx length %#lx "
  120. "pfn %#llx %s radix_entry %#lx",
  121. MAJOR(__entry->dev),
  122. MINOR(__entry->dev),
  123. __entry->ino,
  124. __entry->vm_flags & VM_SHARED ? "shared" : "private",
  125. __entry->write ? "write" : "read",
  126. __entry->address,
  127. __entry->length,
  128. __entry->pfn_val & ~PFN_FLAGS_MASK,
  129. __print_flags_u64(__entry->pfn_val & PFN_FLAGS_MASK, "|",
  130. PFN_FLAGS_TRACE),
  131. (unsigned long)__entry->radix_entry
  132. )
  133. )
  134. #define DEFINE_PMD_INSERT_MAPPING_EVENT(name) \
  135. DEFINE_EVENT(dax_pmd_insert_mapping_class, name, \
  136. TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
  137. long length, pfn_t pfn, void *radix_entry), \
  138. TP_ARGS(inode, vmf, length, pfn, radix_entry))
  139. DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping);
  140. DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping_fallback);
  141. DECLARE_EVENT_CLASS(dax_pte_fault_class,
  142. TP_PROTO(struct inode *inode, struct vm_fault *vmf, int result),
  143. TP_ARGS(inode, vmf, result),
  144. TP_STRUCT__entry(
  145. __field(unsigned long, ino)
  146. __field(unsigned long, vm_flags)
  147. __field(unsigned long, address)
  148. __field(pgoff_t, pgoff)
  149. __field(dev_t, dev)
  150. __field(unsigned int, flags)
  151. __field(int, result)
  152. ),
  153. TP_fast_assign(
  154. __entry->dev = inode->i_sb->s_dev;
  155. __entry->ino = inode->i_ino;
  156. __entry->vm_flags = vmf->vma->vm_flags;
  157. __entry->address = vmf->address;
  158. __entry->flags = vmf->flags;
  159. __entry->pgoff = vmf->pgoff;
  160. __entry->result = result;
  161. ),
  162. TP_printk("dev %d:%d ino %#lx %s %s address %#lx pgoff %#lx %s",
  163. MAJOR(__entry->dev),
  164. MINOR(__entry->dev),
  165. __entry->ino,
  166. __entry->vm_flags & VM_SHARED ? "shared" : "private",
  167. __print_flags(__entry->flags, "|", FAULT_FLAG_TRACE),
  168. __entry->address,
  169. __entry->pgoff,
  170. __print_flags(__entry->result, "|", VM_FAULT_RESULT_TRACE)
  171. )
  172. )
  173. #define DEFINE_PTE_FAULT_EVENT(name) \
  174. DEFINE_EVENT(dax_pte_fault_class, name, \
  175. TP_PROTO(struct inode *inode, struct vm_fault *vmf, int result), \
  176. TP_ARGS(inode, vmf, result))
  177. DEFINE_PTE_FAULT_EVENT(dax_pte_fault);
  178. DEFINE_PTE_FAULT_EVENT(dax_pte_fault_done);
  179. DEFINE_PTE_FAULT_EVENT(dax_load_hole);
  180. TRACE_EVENT(dax_insert_mapping,
  181. TP_PROTO(struct inode *inode, struct vm_fault *vmf, void *radix_entry),
  182. TP_ARGS(inode, vmf, radix_entry),
  183. TP_STRUCT__entry(
  184. __field(unsigned long, ino)
  185. __field(unsigned long, vm_flags)
  186. __field(unsigned long, address)
  187. __field(void *, radix_entry)
  188. __field(dev_t, dev)
  189. __field(int, write)
  190. ),
  191. TP_fast_assign(
  192. __entry->dev = inode->i_sb->s_dev;
  193. __entry->ino = inode->i_ino;
  194. __entry->vm_flags = vmf->vma->vm_flags;
  195. __entry->address = vmf->address;
  196. __entry->write = vmf->flags & FAULT_FLAG_WRITE;
  197. __entry->radix_entry = radix_entry;
  198. ),
  199. TP_printk("dev %d:%d ino %#lx %s %s address %#lx radix_entry %#lx",
  200. MAJOR(__entry->dev),
  201. MINOR(__entry->dev),
  202. __entry->ino,
  203. __entry->vm_flags & VM_SHARED ? "shared" : "private",
  204. __entry->write ? "write" : "read",
  205. __entry->address,
  206. (unsigned long)__entry->radix_entry
  207. )
  208. )
  209. DECLARE_EVENT_CLASS(dax_writeback_range_class,
  210. TP_PROTO(struct inode *inode, pgoff_t start_index, pgoff_t end_index),
  211. TP_ARGS(inode, start_index, end_index),
  212. TP_STRUCT__entry(
  213. __field(unsigned long, ino)
  214. __field(pgoff_t, start_index)
  215. __field(pgoff_t, end_index)
  216. __field(dev_t, dev)
  217. ),
  218. TP_fast_assign(
  219. __entry->dev = inode->i_sb->s_dev;
  220. __entry->ino = inode->i_ino;
  221. __entry->start_index = start_index;
  222. __entry->end_index = end_index;
  223. ),
  224. TP_printk("dev %d:%d ino %#lx pgoff %#lx-%#lx",
  225. MAJOR(__entry->dev),
  226. MINOR(__entry->dev),
  227. __entry->ino,
  228. __entry->start_index,
  229. __entry->end_index
  230. )
  231. )
  232. #define DEFINE_WRITEBACK_RANGE_EVENT(name) \
  233. DEFINE_EVENT(dax_writeback_range_class, name, \
  234. TP_PROTO(struct inode *inode, pgoff_t start_index, pgoff_t end_index),\
  235. TP_ARGS(inode, start_index, end_index))
  236. DEFINE_WRITEBACK_RANGE_EVENT(dax_writeback_range);
  237. DEFINE_WRITEBACK_RANGE_EVENT(dax_writeback_range_done);
  238. TRACE_EVENT(dax_writeback_one,
  239. TP_PROTO(struct inode *inode, pgoff_t pgoff, pgoff_t pglen),
  240. TP_ARGS(inode, pgoff, pglen),
  241. TP_STRUCT__entry(
  242. __field(unsigned long, ino)
  243. __field(pgoff_t, pgoff)
  244. __field(pgoff_t, pglen)
  245. __field(dev_t, dev)
  246. ),
  247. TP_fast_assign(
  248. __entry->dev = inode->i_sb->s_dev;
  249. __entry->ino = inode->i_ino;
  250. __entry->pgoff = pgoff;
  251. __entry->pglen = pglen;
  252. ),
  253. TP_printk("dev %d:%d ino %#lx pgoff %#lx pglen %#lx",
  254. MAJOR(__entry->dev),
  255. MINOR(__entry->dev),
  256. __entry->ino,
  257. __entry->pgoff,
  258. __entry->pglen
  259. )
  260. )
  261. #endif /* _TRACE_FS_DAX_H */
  262. /* This part must be outside protection */
  263. #include <trace/define_trace.h>