^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include "tests.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <stdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include "cpumap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include "event.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "util/synthetic-events.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <perf/cpumap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) struct machine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) static int process_event_mask(struct perf_tool *tool __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) struct perf_sample *sample __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) struct machine *machine __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct perf_record_cpu_map *map_event = &event->cpu_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct perf_record_record_cpu_map *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct perf_record_cpu_map_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct perf_cpu_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) data = &map_event->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) TEST_ASSERT_VAL("wrong type", data->type == PERF_CPU_MAP__MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) mask = (struct perf_record_record_cpu_map *)data->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) TEST_ASSERT_VAL("wrong nr", mask->nr == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) for (i = 0; i < 20; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) TEST_ASSERT_VAL("wrong cpu", test_bit(i, mask->mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) map = cpu_map__new_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) TEST_ASSERT_VAL("wrong nr", map->nr == 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) for (i = 0; i < 20; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) TEST_ASSERT_VAL("wrong cpu", map->map[i] == i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) perf_cpu_map__put(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static int process_event_cpus(struct perf_tool *tool __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct perf_sample *sample __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct machine *machine __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct perf_record_cpu_map *map_event = &event->cpu_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct cpu_map_entries *cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct perf_record_cpu_map_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct perf_cpu_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) data = &map_event->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) TEST_ASSERT_VAL("wrong type", data->type == PERF_CPU_MAP__CPUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) cpus = (struct cpu_map_entries *)data->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) TEST_ASSERT_VAL("wrong nr", cpus->nr == 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) TEST_ASSERT_VAL("wrong cpu", cpus->cpu[0] == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) TEST_ASSERT_VAL("wrong cpu", cpus->cpu[1] == 256);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) map = cpu_map__new_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) TEST_ASSERT_VAL("wrong nr", map->nr == 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) TEST_ASSERT_VAL("wrong cpu", map->map[0] == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) TEST_ASSERT_VAL("wrong cpu", map->map[1] == 256);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) TEST_ASSERT_VAL("wrong refcnt", refcount_read(&map->refcnt) == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) perf_cpu_map__put(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int test__cpu_map_synthesize(struct test *test __maybe_unused, int subtest __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct perf_cpu_map *cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /* This one is better stores in mask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) cpus = perf_cpu_map__new("0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) TEST_ASSERT_VAL("failed to synthesize map",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) !perf_event__synthesize_cpu_map(NULL, cpus, process_event_mask, NULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) perf_cpu_map__put(cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /* This one is better stores in cpu values. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) cpus = perf_cpu_map__new("1,256");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) TEST_ASSERT_VAL("failed to synthesize map",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) !perf_event__synthesize_cpu_map(NULL, cpus, process_event_cpus, NULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) perf_cpu_map__put(cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static int cpu_map_print(const char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct perf_cpu_map *map = perf_cpu_map__new(str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) char buf[100];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (!map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) cpu_map__snprint(map, buf, sizeof(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return !strcmp(buf, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int test__cpu_map_print(struct test *test __maybe_unused, int subtest __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1,5"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1,3,5,7,9,11,13,15,17,19,21-40"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) TEST_ASSERT_VAL("failed to convert map", cpu_map_print("2-5"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1,3-6,8-10,24,35-37"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1,3-6,8-10,24,35-37"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1-10,12-20,22-30,32-40"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) int test__cpu_map_merge(struct test *test __maybe_unused, int subtest __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct perf_cpu_map *a = perf_cpu_map__new("4,2,1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct perf_cpu_map *b = perf_cpu_map__new("4,5,7");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct perf_cpu_map *c = perf_cpu_map__merge(a, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) char buf[100];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) TEST_ASSERT_VAL("failed to merge map: bad nr", c->nr == 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) cpu_map__snprint(c, buf, sizeof(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) TEST_ASSERT_VAL("failed to merge map: bad result", !strcmp(buf, "1-2,4-5,7"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) perf_cpu_map__put(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) perf_cpu_map__put(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }