ordered-events.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <errno.h>
  3. #include <inttypes.h>
  4. #include <linux/list.h>
  5. #include <linux/compiler.h>
  6. #include <linux/string.h>
  7. #include "ordered-events.h"
  8. #include "session.h"
  9. #include "asm/bug.h"
  10. #include "debug.h"
  11. #define pr_N(n, fmt, ...) \
  12. eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
  13. #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
  14. static void queue_event(struct ordered_events *oe, struct ordered_event *new)
  15. {
  16. struct ordered_event *last = oe->last;
  17. u64 timestamp = new->timestamp;
  18. struct list_head *p;
  19. ++oe->nr_events;
  20. oe->last = new;
  21. pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
  22. if (!last) {
  23. list_add(&new->list, &oe->events);
  24. oe->max_timestamp = timestamp;
  25. return;
  26. }
  27. /*
  28. * last event might point to some random place in the list as it's
  29. * the last queued event. We expect that the new event is close to
  30. * this.
  31. */
  32. if (last->timestamp <= timestamp) {
  33. while (last->timestamp <= timestamp) {
  34. p = last->list.next;
  35. if (p == &oe->events) {
  36. list_add_tail(&new->list, &oe->events);
  37. oe->max_timestamp = timestamp;
  38. return;
  39. }
  40. last = list_entry(p, struct ordered_event, list);
  41. }
  42. list_add_tail(&new->list, &last->list);
  43. } else {
  44. while (last->timestamp > timestamp) {
  45. p = last->list.prev;
  46. if (p == &oe->events) {
  47. list_add(&new->list, &oe->events);
  48. return;
  49. }
  50. last = list_entry(p, struct ordered_event, list);
  51. }
  52. list_add(&new->list, &last->list);
  53. }
  54. }
  55. static union perf_event *__dup_event(struct ordered_events *oe,
  56. union perf_event *event)
  57. {
  58. union perf_event *new_event = NULL;
  59. if (oe->cur_alloc_size < oe->max_alloc_size) {
  60. new_event = memdup(event, event->header.size);
  61. if (new_event)
  62. oe->cur_alloc_size += event->header.size;
  63. }
  64. return new_event;
  65. }
  66. static union perf_event *dup_event(struct ordered_events *oe,
  67. union perf_event *event)
  68. {
  69. return oe->copy_on_queue ? __dup_event(oe, event) : event;
  70. }
  71. static void __free_dup_event(struct ordered_events *oe, union perf_event *event)
  72. {
  73. if (event) {
  74. oe->cur_alloc_size -= event->header.size;
  75. free(event);
  76. }
  77. }
  78. static void free_dup_event(struct ordered_events *oe, union perf_event *event)
  79. {
  80. if (oe->copy_on_queue)
  81. __free_dup_event(oe, event);
  82. }
  83. #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
  84. static struct ordered_event *alloc_event(struct ordered_events *oe,
  85. union perf_event *event)
  86. {
  87. struct list_head *cache = &oe->cache;
  88. struct ordered_event *new = NULL;
  89. union perf_event *new_event;
  90. size_t size;
  91. new_event = dup_event(oe, event);
  92. if (!new_event)
  93. return NULL;
  94. /*
  95. * We maintain the following scheme of buffers for ordered
  96. * event allocation:
  97. *
  98. * to_free list -> buffer1 (64K)
  99. * buffer2 (64K)
  100. * ...
  101. *
  102. * Each buffer keeps an array of ordered events objects:
  103. * buffer -> event[0]
  104. * event[1]
  105. * ...
  106. *
  107. * Each allocated ordered event is linked to one of
  108. * following lists:
  109. * - time ordered list 'events'
  110. * - list of currently removed events 'cache'
  111. *
  112. * Allocation of the ordered event uses the following order
  113. * to get the memory:
  114. * - use recently removed object from 'cache' list
  115. * - use available object in current allocation buffer
  116. * - allocate new buffer if the current buffer is full
  117. *
  118. * Removal of ordered event object moves it from events to
  119. * the cache list.
  120. */
  121. size = sizeof(*oe->buffer) + MAX_SAMPLE_BUFFER * sizeof(*new);
  122. if (!list_empty(cache)) {
  123. new = list_entry(cache->next, struct ordered_event, list);
  124. list_del(&new->list);
  125. } else if (oe->buffer) {
  126. new = &oe->buffer->event[oe->buffer_idx];
  127. if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
  128. oe->buffer = NULL;
  129. } else if ((oe->cur_alloc_size + size) < oe->max_alloc_size) {
  130. oe->buffer = malloc(size);
  131. if (!oe->buffer) {
  132. free_dup_event(oe, new_event);
  133. return NULL;
  134. }
  135. pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
  136. oe->cur_alloc_size, size, oe->max_alloc_size);
  137. oe->cur_alloc_size += size;
  138. list_add(&oe->buffer->list, &oe->to_free);
  139. oe->buffer_idx = 1;
  140. new = &oe->buffer->event[0];
  141. } else {
  142. pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
  143. return NULL;
  144. }
  145. new->event = new_event;
  146. return new;
  147. }
  148. static struct ordered_event *
  149. ordered_events__new_event(struct ordered_events *oe, u64 timestamp,
  150. union perf_event *event)
  151. {
  152. struct ordered_event *new;
  153. new = alloc_event(oe, event);
  154. if (new) {
  155. new->timestamp = timestamp;
  156. queue_event(oe, new);
  157. }
  158. return new;
  159. }
  160. void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
  161. {
  162. list_move(&event->list, &oe->cache);
  163. oe->nr_events--;
  164. free_dup_event(oe, event->event);
  165. event->event = NULL;
  166. }
  167. int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
  168. u64 timestamp, u64 file_offset)
  169. {
  170. struct ordered_event *oevent;
  171. if (!timestamp || timestamp == ~0ULL)
  172. return -ETIME;
  173. if (timestamp < oe->last_flush) {
  174. pr_oe_time(timestamp, "out of order event\n");
  175. pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
  176. oe->last_flush_type);
  177. oe->nr_unordered_events++;
  178. }
  179. oevent = ordered_events__new_event(oe, timestamp, event);
  180. if (!oevent) {
  181. ordered_events__flush(oe, OE_FLUSH__HALF);
  182. oevent = ordered_events__new_event(oe, timestamp, event);
  183. }
  184. if (!oevent)
  185. return -ENOMEM;
  186. oevent->file_offset = file_offset;
  187. return 0;
  188. }
  189. static int __ordered_events__flush(struct ordered_events *oe)
  190. {
  191. struct list_head *head = &oe->events;
  192. struct ordered_event *tmp, *iter;
  193. u64 limit = oe->next_flush;
  194. u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
  195. bool show_progress = limit == ULLONG_MAX;
  196. struct ui_progress prog;
  197. int ret;
  198. if (!limit)
  199. return 0;
  200. if (show_progress)
  201. ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
  202. list_for_each_entry_safe(iter, tmp, head, list) {
  203. if (session_done())
  204. return 0;
  205. if (iter->timestamp > limit)
  206. break;
  207. ret = oe->deliver(oe, iter);
  208. if (ret)
  209. return ret;
  210. ordered_events__delete(oe, iter);
  211. oe->last_flush = iter->timestamp;
  212. if (show_progress)
  213. ui_progress__update(&prog, 1);
  214. }
  215. if (list_empty(head))
  216. oe->last = NULL;
  217. else if (last_ts <= limit)
  218. oe->last = list_entry(head->prev, struct ordered_event, list);
  219. if (show_progress)
  220. ui_progress__finish();
  221. return 0;
  222. }
  223. int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
  224. {
  225. static const char * const str[] = {
  226. "NONE",
  227. "FINAL",
  228. "ROUND",
  229. "HALF ",
  230. };
  231. int err;
  232. if (oe->nr_events == 0)
  233. return 0;
  234. switch (how) {
  235. case OE_FLUSH__FINAL:
  236. oe->next_flush = ULLONG_MAX;
  237. break;
  238. case OE_FLUSH__HALF:
  239. {
  240. struct ordered_event *first, *last;
  241. struct list_head *head = &oe->events;
  242. first = list_entry(head->next, struct ordered_event, list);
  243. last = oe->last;
  244. /* Warn if we are called before any event got allocated. */
  245. if (WARN_ONCE(!last || list_empty(head), "empty queue"))
  246. return 0;
  247. oe->next_flush = first->timestamp;
  248. oe->next_flush += (last->timestamp - first->timestamp) / 2;
  249. break;
  250. }
  251. case OE_FLUSH__ROUND:
  252. case OE_FLUSH__NONE:
  253. default:
  254. break;
  255. };
  256. pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE %s, nr_events %u\n",
  257. str[how], oe->nr_events);
  258. pr_oe_time(oe->max_timestamp, "max_timestamp\n");
  259. err = __ordered_events__flush(oe);
  260. if (!err) {
  261. if (how == OE_FLUSH__ROUND)
  262. oe->next_flush = oe->max_timestamp;
  263. oe->last_flush_type = how;
  264. }
  265. pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
  266. str[how], oe->nr_events);
  267. pr_oe_time(oe->last_flush, "last_flush\n");
  268. return err;
  269. }
  270. void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver)
  271. {
  272. INIT_LIST_HEAD(&oe->events);
  273. INIT_LIST_HEAD(&oe->cache);
  274. INIT_LIST_HEAD(&oe->to_free);
  275. oe->max_alloc_size = (u64) -1;
  276. oe->cur_alloc_size = 0;
  277. oe->deliver = deliver;
  278. }
  279. static void
  280. ordered_events_buffer__free(struct ordered_events_buffer *buffer,
  281. unsigned int max, struct ordered_events *oe)
  282. {
  283. if (oe->copy_on_queue) {
  284. unsigned int i;
  285. for (i = 0; i < max; i++)
  286. __free_dup_event(oe, buffer->event[i].event);
  287. }
  288. free(buffer);
  289. }
  290. void ordered_events__free(struct ordered_events *oe)
  291. {
  292. struct ordered_events_buffer *buffer, *tmp;
  293. if (list_empty(&oe->to_free))
  294. return;
  295. /*
  296. * Current buffer might not have all the events allocated
  297. * yet, we need to free only allocated ones ...
  298. */
  299. list_del(&oe->buffer->list);
  300. ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
  301. /* ... and continue with the rest */
  302. list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) {
  303. list_del(&buffer->list);
  304. ordered_events_buffer__free(buffer, MAX_SAMPLE_BUFFER, oe);
  305. }
  306. }
  307. void ordered_events__reinit(struct ordered_events *oe)
  308. {
  309. ordered_events__deliver_t old_deliver = oe->deliver;
  310. ordered_events__free(oe);
  311. memset(oe, '\0', sizeof(*oe));
  312. ordered_events__init(oe, old_deliver);
  313. }