kvm.h 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343
  1. #if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
  2. #define _TRACE_KVM_MAIN_H
  3. #include <linux/tracepoint.h>
  4. #undef TRACE_SYSTEM
  5. #define TRACE_SYSTEM kvm
  6. #define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
  7. #define kvm_trace_exit_reason \
  8. ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \
  9. ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
  10. ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
  11. ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
  12. ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL), \
  13. ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH)
  14. TRACE_EVENT(kvm_userspace_exit,
  15. TP_PROTO(__u32 reason, int errno),
  16. TP_ARGS(reason, errno),
  17. TP_STRUCT__entry(
  18. __field( __u32, reason )
  19. __field( int, errno )
  20. ),
  21. TP_fast_assign(
  22. __entry->reason = reason;
  23. __entry->errno = errno;
  24. ),
  25. TP_printk("reason %s (%d)",
  26. __entry->errno < 0 ?
  27. (__entry->errno == -EINTR ? "restart" : "error") :
  28. __print_symbolic(__entry->reason, kvm_trace_exit_reason),
  29. __entry->errno < 0 ? -__entry->errno : __entry->reason)
  30. );
  31. #if defined(CONFIG_HAVE_KVM_IRQFD)
  32. TRACE_EVENT(kvm_set_irq,
  33. TP_PROTO(unsigned int gsi, int level, int irq_source_id),
  34. TP_ARGS(gsi, level, irq_source_id),
  35. TP_STRUCT__entry(
  36. __field( unsigned int, gsi )
  37. __field( int, level )
  38. __field( int, irq_source_id )
  39. ),
  40. TP_fast_assign(
  41. __entry->gsi = gsi;
  42. __entry->level = level;
  43. __entry->irq_source_id = irq_source_id;
  44. ),
  45. TP_printk("gsi %u level %d source %d",
  46. __entry->gsi, __entry->level, __entry->irq_source_id)
  47. );
  48. #endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
  49. #if defined(__KVM_HAVE_IOAPIC)
  50. #define kvm_deliver_mode \
  51. {0x0, "Fixed"}, \
  52. {0x1, "LowPrio"}, \
  53. {0x2, "SMI"}, \
  54. {0x3, "Res3"}, \
  55. {0x4, "NMI"}, \
  56. {0x5, "INIT"}, \
  57. {0x6, "SIPI"}, \
  58. {0x7, "ExtINT"}
  59. TRACE_EVENT(kvm_ioapic_set_irq,
  60. TP_PROTO(__u64 e, int pin, bool coalesced),
  61. TP_ARGS(e, pin, coalesced),
  62. TP_STRUCT__entry(
  63. __field( __u64, e )
  64. __field( int, pin )
  65. __field( bool, coalesced )
  66. ),
  67. TP_fast_assign(
  68. __entry->e = e;
  69. __entry->pin = pin;
  70. __entry->coalesced = coalesced;
  71. ),
  72. TP_printk("pin %u dst %x vec=%u (%s|%s|%s%s)%s",
  73. __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
  74. __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
  75. (__entry->e & (1<<11)) ? "logical" : "physical",
  76. (__entry->e & (1<<15)) ? "level" : "edge",
  77. (__entry->e & (1<<16)) ? "|masked" : "",
  78. __entry->coalesced ? " (coalesced)" : "")
  79. );
  80. TRACE_EVENT(kvm_ioapic_delayed_eoi_inj,
  81. TP_PROTO(__u64 e),
  82. TP_ARGS(e),
  83. TP_STRUCT__entry(
  84. __field( __u64, e )
  85. ),
  86. TP_fast_assign(
  87. __entry->e = e;
  88. ),
  89. TP_printk("dst %x vec=%u (%s|%s|%s%s)",
  90. (u8)(__entry->e >> 56), (u8)__entry->e,
  91. __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
  92. (__entry->e & (1<<11)) ? "logical" : "physical",
  93. (__entry->e & (1<<15)) ? "level" : "edge",
  94. (__entry->e & (1<<16)) ? "|masked" : "")
  95. );
  96. TRACE_EVENT(kvm_msi_set_irq,
  97. TP_PROTO(__u64 address, __u64 data),
  98. TP_ARGS(address, data),
  99. TP_STRUCT__entry(
  100. __field( __u64, address )
  101. __field( __u64, data )
  102. ),
  103. TP_fast_assign(
  104. __entry->address = address;
  105. __entry->data = data;
  106. ),
  107. TP_printk("dst %u vec %x (%s|%s|%s%s)",
  108. (u8)(__entry->address >> 12), (u8)__entry->data,
  109. __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
  110. (__entry->address & (1<<2)) ? "logical" : "physical",
  111. (__entry->data & (1<<15)) ? "level" : "edge",
  112. (__entry->address & (1<<3)) ? "|rh" : "")
  113. );
  114. #define kvm_irqchips \
  115. {KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
  116. {KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
  117. {KVM_IRQCHIP_IOAPIC, "IOAPIC"}
  118. #endif /* defined(__KVM_HAVE_IOAPIC) */
  119. #if defined(CONFIG_HAVE_KVM_IRQFD)
  120. TRACE_EVENT(kvm_ack_irq,
  121. TP_PROTO(unsigned int irqchip, unsigned int pin),
  122. TP_ARGS(irqchip, pin),
  123. TP_STRUCT__entry(
  124. __field( unsigned int, irqchip )
  125. __field( unsigned int, pin )
  126. ),
  127. TP_fast_assign(
  128. __entry->irqchip = irqchip;
  129. __entry->pin = pin;
  130. ),
  131. #ifdef kvm_irqchips
  132. TP_printk("irqchip %s pin %u",
  133. __print_symbolic(__entry->irqchip, kvm_irqchips),
  134. __entry->pin)
  135. #else
  136. TP_printk("irqchip %d pin %u", __entry->irqchip, __entry->pin)
  137. #endif
  138. );
  139. #endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
  140. #define KVM_TRACE_MMIO_READ_UNSATISFIED 0
  141. #define KVM_TRACE_MMIO_READ 1
  142. #define KVM_TRACE_MMIO_WRITE 2
  143. #define kvm_trace_symbol_mmio \
  144. { KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
  145. { KVM_TRACE_MMIO_READ, "read" }, \
  146. { KVM_TRACE_MMIO_WRITE, "write" }
  147. TRACE_EVENT(kvm_mmio,
  148. TP_PROTO(int type, int len, u64 gpa, u64 val),
  149. TP_ARGS(type, len, gpa, val),
  150. TP_STRUCT__entry(
  151. __field( u32, type )
  152. __field( u32, len )
  153. __field( u64, gpa )
  154. __field( u64, val )
  155. ),
  156. TP_fast_assign(
  157. __entry->type = type;
  158. __entry->len = len;
  159. __entry->gpa = gpa;
  160. __entry->val = val;
  161. ),
  162. TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
  163. __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
  164. __entry->len, __entry->gpa, __entry->val)
  165. );
  166. #define kvm_fpu_load_symbol \
  167. {0, "unload"}, \
  168. {1, "load"}
  169. TRACE_EVENT(kvm_fpu,
  170. TP_PROTO(int load),
  171. TP_ARGS(load),
  172. TP_STRUCT__entry(
  173. __field( u32, load )
  174. ),
  175. TP_fast_assign(
  176. __entry->load = load;
  177. ),
  178. TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
  179. );
  180. TRACE_EVENT(kvm_age_page,
  181. TP_PROTO(ulong gfn, int level, struct kvm_memory_slot *slot, int ref),
  182. TP_ARGS(gfn, level, slot, ref),
  183. TP_STRUCT__entry(
  184. __field( u64, hva )
  185. __field( u64, gfn )
  186. __field( u8, level )
  187. __field( u8, referenced )
  188. ),
  189. TP_fast_assign(
  190. __entry->gfn = gfn;
  191. __entry->level = level;
  192. __entry->hva = ((gfn - slot->base_gfn) <<
  193. PAGE_SHIFT) + slot->userspace_addr;
  194. __entry->referenced = ref;
  195. ),
  196. TP_printk("hva %llx gfn %llx level %u %s",
  197. __entry->hva, __entry->gfn, __entry->level,
  198. __entry->referenced ? "YOUNG" : "OLD")
  199. );
  200. #ifdef CONFIG_KVM_ASYNC_PF
  201. DECLARE_EVENT_CLASS(kvm_async_get_page_class,
  202. TP_PROTO(u64 gva, u64 gfn),
  203. TP_ARGS(gva, gfn),
  204. TP_STRUCT__entry(
  205. __field(__u64, gva)
  206. __field(u64, gfn)
  207. ),
  208. TP_fast_assign(
  209. __entry->gva = gva;
  210. __entry->gfn = gfn;
  211. ),
  212. TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
  213. );
  214. DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
  215. TP_PROTO(u64 gva, u64 gfn),
  216. TP_ARGS(gva, gfn)
  217. );
  218. DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
  219. TP_PROTO(u64 gva, u64 gfn),
  220. TP_ARGS(gva, gfn)
  221. );
  222. DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
  223. TP_PROTO(u64 token, u64 gva),
  224. TP_ARGS(token, gva),
  225. TP_STRUCT__entry(
  226. __field(__u64, token)
  227. __field(__u64, gva)
  228. ),
  229. TP_fast_assign(
  230. __entry->token = token;
  231. __entry->gva = gva;
  232. ),
  233. TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
  234. );
  235. DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
  236. TP_PROTO(u64 token, u64 gva),
  237. TP_ARGS(token, gva)
  238. );
  239. DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
  240. TP_PROTO(u64 token, u64 gva),
  241. TP_ARGS(token, gva)
  242. );
  243. TRACE_EVENT(
  244. kvm_async_pf_completed,
  245. TP_PROTO(unsigned long address, u64 gva),
  246. TP_ARGS(address, gva),
  247. TP_STRUCT__entry(
  248. __field(unsigned long, address)
  249. __field(u64, gva)
  250. ),
  251. TP_fast_assign(
  252. __entry->address = address;
  253. __entry->gva = gva;
  254. ),
  255. TP_printk("gva %#llx address %#lx", __entry->gva,
  256. __entry->address)
  257. );
  258. #endif
  259. #endif /* _TRACE_KVM_MAIN_H */
  260. /* This part must be outside protection */
  261. #include <trace/define_trace.h>