^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <perf/threadmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <stdlib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/refcount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <internal/threadmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <asm/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <stdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) static void perf_thread_map__reset(struct perf_thread_map *map, int start, int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) size_t size = (nr - start) * sizeof(map->map[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) memset(&map->map[start], 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) map->err_thread = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) struct perf_thread_map *perf_thread_map__realloc(struct perf_thread_map *map, int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) size_t size = sizeof(*map) + sizeof(map->map[0]) * nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) int start = map ? map->nr : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) map = realloc(map, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * We only realloc to add more items, let's reset new items.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) if (map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) perf_thread_map__reset(map, start, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) return map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define thread_map__alloc(__nr) perf_thread_map__realloc(NULL, __nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) void perf_thread_map__set_pid(struct perf_thread_map *map, int thread, pid_t pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) map->map[thread].pid = pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) char *perf_thread_map__comm(struct perf_thread_map *map, int thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) return map->map[thread].comm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct perf_thread_map *perf_thread_map__new_dummy(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct perf_thread_map *threads = thread_map__alloc(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (threads != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) perf_thread_map__set_pid(threads, 0, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) threads->nr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) refcount_set(&threads->refcnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static void perf_thread_map__delete(struct perf_thread_map *threads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (threads) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) WARN_ONCE(refcount_read(&threads->refcnt) != 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) "thread map refcnt unbalanced\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) for (i = 0; i < threads->nr; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) free(perf_thread_map__comm(threads, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) free(threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct perf_thread_map *perf_thread_map__get(struct perf_thread_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) refcount_inc(&map->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) void perf_thread_map__put(struct perf_thread_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (map && refcount_dec_and_test(&map->refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) perf_thread_map__delete(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) int perf_thread_map__nr(struct perf_thread_map *threads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return threads ? threads->nr : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) pid_t perf_thread_map__pid(struct perf_thread_map *map, int thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return map->map[thread].pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }