Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * copyright notes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <sys/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <inttypes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <asm/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/zalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <stdlib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <unistd.h> // sysconf()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <perf/mmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #ifdef HAVE_LIBNUMA_SUPPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <numaif.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include "cpumap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include "debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include "event.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include "mmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include "../perf.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <internal/lib.h> /* page_size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define MASK_SIZE 1023
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) void mmap_cpu_mask__scnprintf(struct mmap_cpu_mask *mask, const char *tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	char buf[MASK_SIZE + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	len = bitmap_scnprintf(mask->bits, mask->nbits, buf, MASK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	buf[len] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	pr_debug("%p: %s mask[%zd]: %s\n", mask, tag, mask->nbits, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) size_t mmap__mmap_len(struct mmap *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	return perf_mmap__mmap_len(&map->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 			       struct auxtrace_mmap_params *mp __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 			       void *userpg __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 			       int fd __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 				       off_t auxtrace_offset __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 				       unsigned int auxtrace_pages __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 				       bool auxtrace_overwrite __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 					  struct evlist *evlist __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 					  int idx __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 					  bool per_cpu __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) #ifdef HAVE_AIO_SUPPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) static int perf_mmap__aio_enabled(struct mmap *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	return map->aio.nr_cblocks > 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) #ifdef HAVE_LIBNUMA_SUPPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) static int perf_mmap__aio_alloc(struct mmap *map, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	map->aio.data[idx] = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 				  MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	if (map->aio.data[idx] == MAP_FAILED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		map->aio.data[idx] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) static void perf_mmap__aio_free(struct mmap *map, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	if (map->aio.data[idx]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		munmap(map->aio.data[idx], mmap__mmap_len(map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		map->aio.data[idx] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	size_t mmap_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	unsigned long *node_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	unsigned long node_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		data = map->aio.data[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		mmap_len = mmap__mmap_len(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		node_index = cpu__get_node(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		node_mask = bitmap_alloc(node_index + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		if (!node_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 			pr_err("Failed to allocate node mask for mbind: error %m\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		set_bit(node_index, node_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		if (mbind(data, mmap_len, MPOL_BIND, node_mask, node_index + 1 + 1, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			pr_err("Failed to bind [%p-%p] AIO buffer to node %lu: error %m\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 				data, data + mmap_len, node_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 			err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		bitmap_free(node_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #else /* !HAVE_LIBNUMA_SUPPORT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static int perf_mmap__aio_alloc(struct mmap *map, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	map->aio.data[idx] = malloc(mmap__mmap_len(map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	if (map->aio.data[idx] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static void perf_mmap__aio_free(struct mmap *map, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	zfree(&(map->aio.data[idx]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static int perf_mmap__aio_bind(struct mmap *map __maybe_unused, int idx __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		int cpu __maybe_unused, int affinity __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static int perf_mmap__aio_mmap(struct mmap *map, struct mmap_params *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	int delta_max, i, prio, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	map->aio.nr_cblocks = mp->nr_cblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	if (map->aio.nr_cblocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		if (!map->aio.aiocb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 			pr_debug2("failed to allocate aiocb for data buffer, error %m\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		if (!map->aio.cblocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 			pr_debug2("failed to allocate cblocks for data buffer, error %m\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		if (!map->aio.data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 			pr_debug2("failed to allocate data buffer, error %m\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		for (i = 0; i < map->aio.nr_cblocks; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 			ret = perf_mmap__aio_alloc(map, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 			if (ret == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 				pr_debug2("failed to allocate data buffer area, error %m");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 				return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 			ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 			if (ret == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 				return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 			 * Use cblock.aio_fildes value different from -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 			 * to denote started aio write operation on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 			 * cblock so it requires explicit record__aio_sync()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 			 * call prior the cblock may be reused again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 			map->aio.cblocks[i].aio_fildes = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 			 * Allocate cblocks with priority delta to have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 			 * faster aio write system calls because queued requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 			 * are kept in separate per-prio queues and adding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 			 * a new request will iterate thru shorter per-prio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 			 * list. Blocks with numbers higher than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 			 *  _SC_AIO_PRIO_DELTA_MAX go with priority 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 			prio = delta_max - i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 			map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static void perf_mmap__aio_munmap(struct mmap *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	for (i = 0; i < map->aio.nr_cblocks; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		perf_mmap__aio_free(map, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	if (map->aio.data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		zfree(&map->aio.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	zfree(&map->aio.cblocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	zfree(&map->aio.aiocb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #else /* !HAVE_AIO_SUPPORT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static int perf_mmap__aio_enabled(struct mmap *map __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static int perf_mmap__aio_mmap(struct mmap *map __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 			       struct mmap_params *mp __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) void mmap__munmap(struct mmap *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	bitmap_free(map->affinity_mask.bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	perf_mmap__aio_munmap(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	if (map->data != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		munmap(map->data, mmap__mmap_len(map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		map->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	auxtrace_mmap__munmap(&map->auxtrace_mmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static void build_node_mask(int node, struct mmap_cpu_mask *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	int c, cpu, nr_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	const struct perf_cpu_map *cpu_map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	cpu_map = cpu_map__online();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	if (!cpu_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	nr_cpus = perf_cpu_map__nr(cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	for (c = 0; c < nr_cpus; c++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		cpu = cpu_map->map[c]; /* map c index to online cpu index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		if (cpu__get_node(cpu) == node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 			set_bit(cpu, mask->bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	map->affinity_mask.nbits = cpu__max_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	map->affinity_mask.bits = bitmap_alloc(map->affinity_mask.nbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	if (!map->affinity_mask.bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	else if (mp->affinity == PERF_AFFINITY_CPU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		set_bit(map->core.cpu, map->affinity_mask.bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		pr_debug2("failed to mmap perf event ring buffer, error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 			  errno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	if (mp->affinity != PERF_AFFINITY_SYS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		perf_mmap__setup_affinity_mask(map, mp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		pr_debug2("failed to alloc mmap affinity mask, error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 			  errno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	if (verbose == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		mmap_cpu_mask__scnprintf(&map->affinity_mask, "mmap");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	map->core.flush = mp->flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	map->comp_level = mp->comp_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	if (map->comp_level && !perf_mmap__aio_enabled(map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		map->data = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 				 MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		if (map->data == MAP_FAILED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 			pr_debug2("failed to mmap data buffer, error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 					errno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 			map->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 				&mp->auxtrace_mp, map->core.base, fd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	return perf_mmap__aio_mmap(map, mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) int perf_mmap__push(struct mmap *md, void *to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		    int push(struct mmap *map, void *to, void *buf, size_t size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	u64 head = perf_mmap__read_head(&md->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	unsigned char *data = md->core.base + page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	rc = perf_mmap__read_init(&md->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 		return (rc == -EAGAIN) ? 1 : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	size = md->core.end - md->core.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	if ((md->core.start & md->core.mask) + size != (md->core.end & md->core.mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		buf = &data[md->core.start & md->core.mask];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		size = md->core.mask + 1 - (md->core.start & md->core.mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		md->core.start += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		if (push(md, to, buf, size) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 			rc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	buf = &data[md->core.start & md->core.mask];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	size = md->core.end - md->core.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	md->core.start += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	if (push(md, to, buf, size) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		rc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	md->core.prev = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	perf_mmap__consume(&md->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }