trace_event_perf.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382
  1. /*
  2. * trace event based perf event profiling/tracing
  3. *
  4. * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
  5. * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
  6. */
  7. #include <linux/module.h>
  8. #include <linux/kprobes.h>
  9. #include "trace.h"
  10. static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
  11. /*
  12. * Force it to be aligned to unsigned long to avoid misaligned accesses
  13. * suprises
  14. */
  15. typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
  16. perf_trace_t;
  17. /* Count the events in use (per event id, not per instance) */
  18. static int total_ref_count;
  19. static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
  20. struct perf_event *p_event)
  21. {
  22. if (tp_event->perf_perm) {
  23. int ret = tp_event->perf_perm(tp_event, p_event);
  24. if (ret)
  25. return ret;
  26. }
  27. /*
  28. * We checked and allowed to create parent,
  29. * allow children without checking.
  30. */
  31. if (p_event->parent)
  32. return 0;
  33. /*
  34. * It's ok to check current process (owner) permissions in here,
  35. * because code below is called only via perf_event_open syscall.
  36. */
  37. /* The ftrace function trace is allowed only for root. */
  38. if (ftrace_event_is_function(tp_event)) {
  39. if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
  40. return -EPERM;
  41. /*
  42. * We don't allow user space callchains for function trace
  43. * event, due to issues with page faults while tracing page
  44. * fault handler and its overall trickiness nature.
  45. */
  46. if (!p_event->attr.exclude_callchain_user)
  47. return -EINVAL;
  48. /*
  49. * Same reason to disable user stack dump as for user space
  50. * callchains above.
  51. */
  52. if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
  53. return -EINVAL;
  54. }
  55. /* No tracing, just counting, so no obvious leak */
  56. if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
  57. return 0;
  58. /* Some events are ok to be traced by non-root users... */
  59. if (p_event->attach_state == PERF_ATTACH_TASK) {
  60. if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
  61. return 0;
  62. }
  63. /*
  64. * ...otherwise raw tracepoint data can be a severe data leak,
  65. * only allow root to have these.
  66. */
  67. if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
  68. return -EPERM;
  69. return 0;
  70. }
  71. static int perf_trace_event_reg(struct ftrace_event_call *tp_event,
  72. struct perf_event *p_event)
  73. {
  74. struct hlist_head __percpu *list;
  75. int ret = -ENOMEM;
  76. int cpu;
  77. p_event->tp_event = tp_event;
  78. if (tp_event->perf_refcount++ > 0)
  79. return 0;
  80. list = alloc_percpu(struct hlist_head);
  81. if (!list)
  82. goto fail;
  83. for_each_possible_cpu(cpu)
  84. INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
  85. tp_event->perf_events = list;
  86. if (!total_ref_count) {
  87. char __percpu *buf;
  88. int i;
  89. for (i = 0; i < PERF_NR_CONTEXTS; i++) {
  90. buf = (char __percpu *)alloc_percpu(perf_trace_t);
  91. if (!buf)
  92. goto fail;
  93. perf_trace_buf[i] = buf;
  94. }
  95. }
  96. ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
  97. if (ret)
  98. goto fail;
  99. total_ref_count++;
  100. return 0;
  101. fail:
  102. if (!total_ref_count) {
  103. int i;
  104. for (i = 0; i < PERF_NR_CONTEXTS; i++) {
  105. free_percpu(perf_trace_buf[i]);
  106. perf_trace_buf[i] = NULL;
  107. }
  108. }
  109. if (!--tp_event->perf_refcount) {
  110. free_percpu(tp_event->perf_events);
  111. tp_event->perf_events = NULL;
  112. }
  113. return ret;
  114. }
  115. static void perf_trace_event_unreg(struct perf_event *p_event)
  116. {
  117. struct ftrace_event_call *tp_event = p_event->tp_event;
  118. int i;
  119. if (--tp_event->perf_refcount > 0)
  120. goto out;
  121. tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
  122. /*
  123. * Ensure our callback won't be called anymore. The buffers
  124. * will be freed after that.
  125. */
  126. tracepoint_synchronize_unregister();
  127. free_percpu(tp_event->perf_events);
  128. tp_event->perf_events = NULL;
  129. if (!--total_ref_count) {
  130. for (i = 0; i < PERF_NR_CONTEXTS; i++) {
  131. free_percpu(perf_trace_buf[i]);
  132. perf_trace_buf[i] = NULL;
  133. }
  134. }
  135. out:
  136. module_put(tp_event->mod);
  137. }
  138. static int perf_trace_event_open(struct perf_event *p_event)
  139. {
  140. struct ftrace_event_call *tp_event = p_event->tp_event;
  141. return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
  142. }
  143. static void perf_trace_event_close(struct perf_event *p_event)
  144. {
  145. struct ftrace_event_call *tp_event = p_event->tp_event;
  146. tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
  147. }
  148. static int perf_trace_event_init(struct ftrace_event_call *tp_event,
  149. struct perf_event *p_event)
  150. {
  151. int ret;
  152. ret = perf_trace_event_perm(tp_event, p_event);
  153. if (ret)
  154. return ret;
  155. ret = perf_trace_event_reg(tp_event, p_event);
  156. if (ret)
  157. return ret;
  158. ret = perf_trace_event_open(p_event);
  159. if (ret) {
  160. perf_trace_event_unreg(p_event);
  161. return ret;
  162. }
  163. return 0;
  164. }
  165. int perf_trace_init(struct perf_event *p_event)
  166. {
  167. struct ftrace_event_call *tp_event;
  168. u64 event_id = p_event->attr.config;
  169. int ret = -EINVAL;
  170. mutex_lock(&event_mutex);
  171. list_for_each_entry(tp_event, &ftrace_events, list) {
  172. if (tp_event->event.type == event_id &&
  173. tp_event->class && tp_event->class->reg &&
  174. try_module_get(tp_event->mod)) {
  175. ret = perf_trace_event_init(tp_event, p_event);
  176. if (ret)
  177. module_put(tp_event->mod);
  178. break;
  179. }
  180. }
  181. mutex_unlock(&event_mutex);
  182. return ret;
  183. }
  184. void perf_trace_destroy(struct perf_event *p_event)
  185. {
  186. mutex_lock(&event_mutex);
  187. perf_trace_event_close(p_event);
  188. perf_trace_event_unreg(p_event);
  189. mutex_unlock(&event_mutex);
  190. }
  191. int perf_trace_add(struct perf_event *p_event, int flags)
  192. {
  193. struct ftrace_event_call *tp_event = p_event->tp_event;
  194. struct hlist_head __percpu *pcpu_list;
  195. struct hlist_head *list;
  196. pcpu_list = tp_event->perf_events;
  197. if (WARN_ON_ONCE(!pcpu_list))
  198. return -EINVAL;
  199. if (!(flags & PERF_EF_START))
  200. p_event->hw.state = PERF_HES_STOPPED;
  201. list = this_cpu_ptr(pcpu_list);
  202. hlist_add_head_rcu(&p_event->hlist_entry, list);
  203. return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event);
  204. }
  205. void perf_trace_del(struct perf_event *p_event, int flags)
  206. {
  207. struct ftrace_event_call *tp_event = p_event->tp_event;
  208. hlist_del_rcu(&p_event->hlist_entry);
  209. tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
  210. }
  211. void *perf_trace_buf_prepare(int size, unsigned short type,
  212. struct pt_regs *regs, int *rctxp)
  213. {
  214. struct trace_entry *entry;
  215. unsigned long flags;
  216. char *raw_data;
  217. int pc;
  218. BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
  219. if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
  220. "perf buffer not large enough"))
  221. return NULL;
  222. pc = preempt_count();
  223. *rctxp = perf_swevent_get_recursion_context();
  224. if (*rctxp < 0)
  225. return NULL;
  226. raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
  227. /* zero the dead bytes from align to not leak stack to user */
  228. memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
  229. entry = (struct trace_entry *)raw_data;
  230. local_save_flags(flags);
  231. tracing_generic_entry_update(entry, flags, pc);
  232. entry->type = type;
  233. return raw_data;
  234. }
  235. EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
  236. NOKPROBE_SYMBOL(perf_trace_buf_prepare);
  237. #ifdef CONFIG_FUNCTION_TRACER
  238. static void
  239. perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
  240. struct ftrace_ops *ops, struct pt_regs *pt_regs)
  241. {
  242. struct ftrace_entry *entry;
  243. struct hlist_head *head;
  244. struct pt_regs regs;
  245. int rctx;
  246. head = this_cpu_ptr(event_function.perf_events);
  247. if (hlist_empty(head))
  248. return;
  249. #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
  250. sizeof(u64)) - sizeof(u32))
  251. BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
  252. perf_fetch_caller_regs(&regs);
  253. entry = perf_trace_buf_prepare(ENTRY_SIZE, TRACE_FN, NULL, &rctx);
  254. if (!entry)
  255. return;
  256. entry->ip = ip;
  257. entry->parent_ip = parent_ip;
  258. perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
  259. 1, &regs, head, NULL);
  260. #undef ENTRY_SIZE
  261. }
  262. static int perf_ftrace_function_register(struct perf_event *event)
  263. {
  264. struct ftrace_ops *ops = &event->ftrace_ops;
  265. ops->flags |= FTRACE_OPS_FL_CONTROL;
  266. ops->func = perf_ftrace_function_call;
  267. return register_ftrace_function(ops);
  268. }
  269. static int perf_ftrace_function_unregister(struct perf_event *event)
  270. {
  271. struct ftrace_ops *ops = &event->ftrace_ops;
  272. int ret = unregister_ftrace_function(ops);
  273. ftrace_free_filter(ops);
  274. return ret;
  275. }
  276. static void perf_ftrace_function_enable(struct perf_event *event)
  277. {
  278. ftrace_function_local_enable(&event->ftrace_ops);
  279. }
  280. static void perf_ftrace_function_disable(struct perf_event *event)
  281. {
  282. ftrace_function_local_disable(&event->ftrace_ops);
  283. }
  284. int perf_ftrace_event_register(struct ftrace_event_call *call,
  285. enum trace_reg type, void *data)
  286. {
  287. switch (type) {
  288. case TRACE_REG_REGISTER:
  289. case TRACE_REG_UNREGISTER:
  290. break;
  291. case TRACE_REG_PERF_REGISTER:
  292. case TRACE_REG_PERF_UNREGISTER:
  293. return 0;
  294. case TRACE_REG_PERF_OPEN:
  295. return perf_ftrace_function_register(data);
  296. case TRACE_REG_PERF_CLOSE:
  297. return perf_ftrace_function_unregister(data);
  298. case TRACE_REG_PERF_ADD:
  299. perf_ftrace_function_enable(data);
  300. return 0;
  301. case TRACE_REG_PERF_DEL:
  302. perf_ftrace_function_disable(data);
  303. return 0;
  304. }
  305. return -EINVAL;
  306. }
  307. #endif /* CONFIG_FUNCTION_TRACER */