^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <inttypes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "ordered-events.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "session.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "asm/bug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "ui/progress.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define pr_N(n, fmt, ...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) static void queue_event(struct ordered_events *oe, struct ordered_event *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct ordered_event *last = oe->last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) u64 timestamp = new->timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct list_head *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) ++oe->nr_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) oe->last = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) if (!last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) list_add(&new->list, &oe->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) oe->max_timestamp = timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * last event might point to some random place in the list as it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * the last queued event. We expect that the new event is close to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) if (last->timestamp <= timestamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) while (last->timestamp <= timestamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) p = last->list.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (p == &oe->events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) list_add_tail(&new->list, &oe->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) oe->max_timestamp = timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) last = list_entry(p, struct ordered_event, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) list_add_tail(&new->list, &last->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) while (last->timestamp > timestamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) p = last->list.prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (p == &oe->events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) list_add(&new->list, &oe->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) last = list_entry(p, struct ordered_event, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) list_add(&new->list, &last->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static union perf_event *__dup_event(struct ordered_events *oe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) union perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) union perf_event *new_event = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (oe->cur_alloc_size < oe->max_alloc_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) new_event = memdup(event, event->header.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (new_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) oe->cur_alloc_size += event->header.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return new_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static union perf_event *dup_event(struct ordered_events *oe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) union perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return oe->copy_on_queue ? __dup_event(oe, event) : event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static void __free_dup_event(struct ordered_events *oe, union perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) oe->cur_alloc_size -= event->header.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) free(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static void free_dup_event(struct ordered_events *oe, union perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (oe->copy_on_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) __free_dup_event(oe, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static struct ordered_event *alloc_event(struct ordered_events *oe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) union perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct list_head *cache = &oe->cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct ordered_event *new = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) union perf_event *new_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) new_event = dup_event(oe, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (!new_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * We maintain the following scheme of buffers for ordered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * event allocation:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * to_free list -> buffer1 (64K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * buffer2 (64K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * Each buffer keeps an array of ordered events objects:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * buffer -> event[0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * event[1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * Each allocated ordered event is linked to one of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * following lists:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * - time ordered list 'events'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * - list of currently removed events 'cache'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * Allocation of the ordered event uses the following order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * to get the memory:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * - use recently removed object from 'cache' list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * - use available object in current allocation buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * - allocate new buffer if the current buffer is full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * Removal of ordered event object moves it from events to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * the cache list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) size = sizeof(*oe->buffer) + MAX_SAMPLE_BUFFER * sizeof(*new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (!list_empty(cache)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) new = list_entry(cache->next, struct ordered_event, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) list_del_init(&new->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) } else if (oe->buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) new = &oe->buffer->event[oe->buffer_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) oe->buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) } else if ((oe->cur_alloc_size + size) < oe->max_alloc_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) oe->buffer = malloc(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (!oe->buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) free_dup_event(oe, new_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) oe->cur_alloc_size, size, oe->max_alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) oe->cur_alloc_size += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) list_add(&oe->buffer->list, &oe->to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) oe->buffer_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) new = &oe->buffer->event[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) new->event = new_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static struct ordered_event *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) ordered_events__new_event(struct ordered_events *oe, u64 timestamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) union perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct ordered_event *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) new = alloc_event(oe, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) new->timestamp = timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) queue_event(oe, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) list_move(&event->list, &oe->cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) oe->nr_events--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) free_dup_event(oe, event->event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) event->event = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) u64 timestamp, u64 file_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct ordered_event *oevent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (!timestamp || timestamp == ~0ULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return -ETIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (timestamp < oe->last_flush) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) pr_oe_time(timestamp, "out of order event\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) oe->last_flush_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) oe->nr_unordered_events++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) oevent = ordered_events__new_event(oe, timestamp, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (!oevent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ordered_events__flush(oe, OE_FLUSH__HALF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) oevent = ordered_events__new_event(oe, timestamp, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (!oevent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) oevent->file_offset = file_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static int do_flush(struct ordered_events *oe, bool show_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct list_head *head = &oe->events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct ordered_event *tmp, *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) u64 limit = oe->next_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct ui_progress prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (!limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (show_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) list_for_each_entry_safe(iter, tmp, head, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (session_done())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (iter->timestamp > limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) ret = oe->deliver(oe, iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) ordered_events__delete(oe, iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) oe->last_flush = iter->timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (show_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) ui_progress__update(&prog, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (list_empty(head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) oe->last = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) else if (last_ts <= limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) oe->last = list_entry(head->prev, struct ordered_event, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (show_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) ui_progress__finish();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static int __ordered_events__flush(struct ordered_events *oe, enum oe_flush how,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) u64 timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static const char * const str[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) "NONE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) "FINAL",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) "ROUND",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) "HALF ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) "TOP ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) "TIME ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) bool show_progress = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (oe->nr_events == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) switch (how) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) case OE_FLUSH__FINAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) show_progress = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) __fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) case OE_FLUSH__TOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) oe->next_flush = ULLONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) case OE_FLUSH__HALF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct ordered_event *first, *last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct list_head *head = &oe->events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) first = list_entry(head->next, struct ordered_event, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) last = oe->last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* Warn if we are called before any event got allocated. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (WARN_ONCE(!last || list_empty(head), "empty queue"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) oe->next_flush = first->timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) oe->next_flush += (last->timestamp - first->timestamp) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) case OE_FLUSH__TIME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) oe->next_flush = timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) show_progress = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) case OE_FLUSH__ROUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) case OE_FLUSH__NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE %s, nr_events %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) str[how], oe->nr_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) pr_oe_time(oe->max_timestamp, "max_timestamp\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) err = do_flush(oe, show_progress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (how == OE_FLUSH__ROUND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) oe->next_flush = oe->max_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) oe->last_flush_type = how;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) str[how], oe->nr_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) pr_oe_time(oe->last_flush, "last_flush\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return __ordered_events__flush(oe, how, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) int ordered_events__flush_time(struct ordered_events *oe, u64 timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return __ordered_events__flush(oe, OE_FLUSH__TIME, timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) u64 ordered_events__first_time(struct ordered_events *oe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct ordered_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (list_empty(&oe->events))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) event = list_first_entry(&oe->events, struct ordered_event, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return event->timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) INIT_LIST_HEAD(&oe->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) INIT_LIST_HEAD(&oe->cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) INIT_LIST_HEAD(&oe->to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) oe->max_alloc_size = (u64) -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) oe->cur_alloc_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) oe->deliver = deliver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) oe->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) ordered_events_buffer__free(struct ordered_events_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) unsigned int max, struct ordered_events *oe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (oe->copy_on_queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) for (i = 0; i < max; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) __free_dup_event(oe, buffer->event[i].event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) void ordered_events__free(struct ordered_events *oe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct ordered_events_buffer *buffer, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (list_empty(&oe->to_free))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * Current buffer might not have all the events allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * yet, we need to free only allocated ones ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (oe->buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) list_del_init(&oe->buffer->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) /* ... and continue with the rest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) list_del_init(&buffer->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) ordered_events_buffer__free(buffer, MAX_SAMPLE_BUFFER, oe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) void ordered_events__reinit(struct ordered_events *oe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) ordered_events__deliver_t old_deliver = oe->deliver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) ordered_events__free(oe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) memset(oe, '\0', sizeof(*oe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) ordered_events__init(oe, old_deliver, oe->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }