ordered-events.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286
  1. #include <linux/list.h>
  2. #include <linux/compiler.h>
  3. #include <linux/string.h>
  4. #include "ordered-events.h"
  5. #include "evlist.h"
  6. #include "session.h"
  7. #include "asm/bug.h"
  8. #include "debug.h"
  9. #define pr_N(n, fmt, ...) \
  10. eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
  11. #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
  12. static void queue_event(struct ordered_events *oe, struct ordered_event *new)
  13. {
  14. struct ordered_event *last = oe->last;
  15. u64 timestamp = new->timestamp;
  16. struct list_head *p;
  17. ++oe->nr_events;
  18. oe->last = new;
  19. pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
  20. if (!last) {
  21. list_add(&new->list, &oe->events);
  22. oe->max_timestamp = timestamp;
  23. return;
  24. }
  25. /*
  26. * last event might point to some random place in the list as it's
  27. * the last queued event. We expect that the new event is close to
  28. * this.
  29. */
  30. if (last->timestamp <= timestamp) {
  31. while (last->timestamp <= timestamp) {
  32. p = last->list.next;
  33. if (p == &oe->events) {
  34. list_add_tail(&new->list, &oe->events);
  35. oe->max_timestamp = timestamp;
  36. return;
  37. }
  38. last = list_entry(p, struct ordered_event, list);
  39. }
  40. list_add_tail(&new->list, &last->list);
  41. } else {
  42. while (last->timestamp > timestamp) {
  43. p = last->list.prev;
  44. if (p == &oe->events) {
  45. list_add(&new->list, &oe->events);
  46. return;
  47. }
  48. last = list_entry(p, struct ordered_event, list);
  49. }
  50. list_add(&new->list, &last->list);
  51. }
  52. }
  53. static union perf_event *__dup_event(struct ordered_events *oe,
  54. union perf_event *event)
  55. {
  56. union perf_event *new_event = NULL;
  57. if (oe->cur_alloc_size < oe->max_alloc_size) {
  58. new_event = memdup(event, event->header.size);
  59. if (new_event)
  60. oe->cur_alloc_size += event->header.size;
  61. }
  62. return new_event;
  63. }
  64. static union perf_event *dup_event(struct ordered_events *oe,
  65. union perf_event *event)
  66. {
  67. return oe->copy_on_queue ? __dup_event(oe, event) : event;
  68. }
  69. static void free_dup_event(struct ordered_events *oe, union perf_event *event)
  70. {
  71. if (oe->copy_on_queue) {
  72. oe->cur_alloc_size -= event->header.size;
  73. free(event);
  74. }
  75. }
  76. #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
  77. static struct ordered_event *alloc_event(struct ordered_events *oe,
  78. union perf_event *event)
  79. {
  80. struct list_head *cache = &oe->cache;
  81. struct ordered_event *new = NULL;
  82. union perf_event *new_event;
  83. new_event = dup_event(oe, event);
  84. if (!new_event)
  85. return NULL;
  86. if (!list_empty(cache)) {
  87. new = list_entry(cache->next, struct ordered_event, list);
  88. list_del(&new->list);
  89. } else if (oe->buffer) {
  90. new = oe->buffer + oe->buffer_idx;
  91. if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
  92. oe->buffer = NULL;
  93. } else if (oe->cur_alloc_size < oe->max_alloc_size) {
  94. size_t size = MAX_SAMPLE_BUFFER * sizeof(*new);
  95. oe->buffer = malloc(size);
  96. if (!oe->buffer) {
  97. free_dup_event(oe, new_event);
  98. return NULL;
  99. }
  100. pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
  101. oe->cur_alloc_size, size, oe->max_alloc_size);
  102. oe->cur_alloc_size += size;
  103. list_add(&oe->buffer->list, &oe->to_free);
  104. /* First entry is abused to maintain the to_free list. */
  105. oe->buffer_idx = 2;
  106. new = oe->buffer + 1;
  107. } else {
  108. pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
  109. }
  110. new->event = new_event;
  111. return new;
  112. }
  113. struct ordered_event *
  114. ordered_events__new(struct ordered_events *oe, u64 timestamp,
  115. union perf_event *event)
  116. {
  117. struct ordered_event *new;
  118. new = alloc_event(oe, event);
  119. if (new) {
  120. new->timestamp = timestamp;
  121. queue_event(oe, new);
  122. }
  123. return new;
  124. }
  125. void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
  126. {
  127. list_move(&event->list, &oe->cache);
  128. oe->nr_events--;
  129. free_dup_event(oe, event->event);
  130. }
  131. static int __ordered_events__flush(struct perf_session *s,
  132. struct perf_tool *tool)
  133. {
  134. struct ordered_events *oe = &s->ordered_events;
  135. struct list_head *head = &oe->events;
  136. struct ordered_event *tmp, *iter;
  137. struct perf_sample sample;
  138. u64 limit = oe->next_flush;
  139. u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
  140. bool show_progress = limit == ULLONG_MAX;
  141. struct ui_progress prog;
  142. int ret;
  143. if (!tool->ordered_events || !limit)
  144. return 0;
  145. if (show_progress)
  146. ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
  147. list_for_each_entry_safe(iter, tmp, head, list) {
  148. if (session_done())
  149. return 0;
  150. if (iter->timestamp > limit)
  151. break;
  152. ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample);
  153. if (ret)
  154. pr_err("Can't parse sample, err = %d\n", ret);
  155. else {
  156. ret = perf_session__deliver_event(s, iter->event, &sample, tool,
  157. iter->file_offset);
  158. if (ret)
  159. return ret;
  160. }
  161. ordered_events__delete(oe, iter);
  162. oe->last_flush = iter->timestamp;
  163. if (show_progress)
  164. ui_progress__update(&prog, 1);
  165. }
  166. if (list_empty(head))
  167. oe->last = NULL;
  168. else if (last_ts <= limit)
  169. oe->last = list_entry(head->prev, struct ordered_event, list);
  170. return 0;
  171. }
  172. int ordered_events__flush(struct perf_session *s, struct perf_tool *tool,
  173. enum oe_flush how)
  174. {
  175. struct ordered_events *oe = &s->ordered_events;
  176. static const char * const str[] = {
  177. "NONE",
  178. "FINAL",
  179. "ROUND",
  180. "HALF ",
  181. };
  182. int err;
  183. switch (how) {
  184. case OE_FLUSH__FINAL:
  185. oe->next_flush = ULLONG_MAX;
  186. break;
  187. case OE_FLUSH__HALF:
  188. {
  189. struct ordered_event *first, *last;
  190. struct list_head *head = &oe->events;
  191. first = list_entry(head->next, struct ordered_event, list);
  192. last = oe->last;
  193. /* Warn if we are called before any event got allocated. */
  194. if (WARN_ONCE(!last || list_empty(head), "empty queue"))
  195. return 0;
  196. oe->next_flush = first->timestamp;
  197. oe->next_flush += (last->timestamp - first->timestamp) / 2;
  198. break;
  199. }
  200. case OE_FLUSH__ROUND:
  201. case OE_FLUSH__NONE:
  202. default:
  203. break;
  204. };
  205. pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE %s, nr_events %u\n",
  206. str[how], oe->nr_events);
  207. pr_oe_time(oe->max_timestamp, "max_timestamp\n");
  208. err = __ordered_events__flush(s, tool);
  209. if (!err) {
  210. if (how == OE_FLUSH__ROUND)
  211. oe->next_flush = oe->max_timestamp;
  212. oe->last_flush_type = how;
  213. }
  214. pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
  215. str[how], oe->nr_events);
  216. pr_oe_time(oe->last_flush, "last_flush\n");
  217. return err;
  218. }
  219. void ordered_events__init(struct ordered_events *oe)
  220. {
  221. INIT_LIST_HEAD(&oe->events);
  222. INIT_LIST_HEAD(&oe->cache);
  223. INIT_LIST_HEAD(&oe->to_free);
  224. oe->max_alloc_size = (u64) -1;
  225. oe->cur_alloc_size = 0;
  226. }
  227. void ordered_events__free(struct ordered_events *oe)
  228. {
  229. while (!list_empty(&oe->to_free)) {
  230. struct ordered_event *event;
  231. event = list_entry(oe->to_free.next, struct ordered_event, list);
  232. list_del(&event->list);
  233. free_dup_event(oe, event->event);
  234. free(event);
  235. }
  236. }