^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/numa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/rculist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/threads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/preempt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/irqflags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/device-mapper.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "dm-core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "dm-stats.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define DM_MSG_PREFIX "stats"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static int dm_stat_need_rcu_barrier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Using 64-bit values to avoid overflow (which is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * problem that block/genhd.c's IO accounting has).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct dm_stat_percpu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) unsigned long long sectors[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) unsigned long long ios[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) unsigned long long merges[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) unsigned long long ticks[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) unsigned long long io_ticks[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) unsigned long long io_ticks_total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) unsigned long long time_in_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) unsigned long long *histogram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct dm_stat_shared {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) atomic_t in_flight[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) unsigned long long stamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct dm_stat_percpu tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct dm_stat {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct list_head list_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) unsigned stat_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) size_t n_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) sector_t start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) sector_t end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) sector_t step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) unsigned n_histogram_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned long long *histogram_boundaries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) const char *program_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) const char *aux_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct rcu_head rcu_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) size_t shared_alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) size_t percpu_alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) size_t histogram_alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct dm_stat_percpu *stat_percpu[NR_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct dm_stat_shared stat_shared[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define STAT_PRECISE_TIMESTAMPS 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct dm_stats_last_position {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) sector_t last_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) unsigned last_rw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * A typo on the command line could possibly make the kernel run out of memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * and crash. To prevent the crash we account all used memory. We fail if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * exhaust 1/4 of all memory or 1/2 of vmalloc space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define DM_STATS_MEMORY_FACTOR 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define DM_STATS_VMALLOC_FACTOR 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static DEFINE_SPINLOCK(shared_memory_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static unsigned long shared_memory_amount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static bool __check_shared_memory(size_t alloc_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) size_t a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) a = shared_memory_amount + alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (a < shared_memory_amount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (a >> PAGE_SHIFT > totalram_pages() / DM_STATS_MEMORY_FACTOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static bool check_shared_memory(size_t alloc_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) spin_lock_irq(&shared_memory_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) ret = __check_shared_memory(alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) spin_unlock_irq(&shared_memory_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static bool claim_shared_memory(size_t alloc_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) spin_lock_irq(&shared_memory_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (!__check_shared_memory(alloc_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) spin_unlock_irq(&shared_memory_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) shared_memory_amount += alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) spin_unlock_irq(&shared_memory_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static void free_shared_memory(size_t alloc_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) spin_lock_irqsave(&shared_memory_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (WARN_ON_ONCE(shared_memory_amount < alloc_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) spin_unlock_irqrestore(&shared_memory_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) DMCRIT("Memory usage accounting bug.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) shared_memory_amount -= alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) spin_unlock_irqrestore(&shared_memory_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static void *dm_kvzalloc(size_t alloc_size, int node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (!claim_shared_memory(alloc_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) p = kvzalloc_node(alloc_size, GFP_KERNEL | __GFP_NOMEMALLOC, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) free_shared_memory(alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static void dm_kvfree(void *ptr, size_t alloc_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (!ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) free_shared_memory(alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) kvfree(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static void dm_stat_free(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct dm_stat *s = container_of(head, struct dm_stat, rcu_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) kfree(s->histogram_boundaries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) kfree(s->program_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) kfree(s->aux_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) dm_kvfree(s->stat_percpu[cpu][0].histogram, s->histogram_alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) dm_kvfree(s->stat_percpu[cpu], s->percpu_alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) dm_kvfree(s->stat_shared[0].tmp.histogram, s->histogram_alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) dm_kvfree(s, s->shared_alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static int dm_stat_in_flight(struct dm_stat_shared *shared)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return atomic_read(&shared->in_flight[READ]) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) atomic_read(&shared->in_flight[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) void dm_stats_init(struct dm_stats *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct dm_stats_last_position *last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) mutex_init(&stats->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) INIT_LIST_HEAD(&stats->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) stats->last = alloc_percpu(struct dm_stats_last_position);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) last = per_cpu_ptr(stats->last, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) last->last_sector = (sector_t)ULLONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) last->last_rw = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) void dm_stats_cleanup(struct dm_stats *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) size_t ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct dm_stat *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct dm_stat_shared *shared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) while (!list_empty(&stats->list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) s = container_of(stats->list.next, struct dm_stat, list_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) list_del(&s->list_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) for (ni = 0; ni < s->n_entries; ni++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) shared = &s->stat_shared[ni];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (WARN_ON(dm_stat_in_flight(shared))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) DMCRIT("leaked in-flight counter at index %lu "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) "(start %llu, end %llu, step %llu): reads %d, writes %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) (unsigned long)ni,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) (unsigned long long)s->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) (unsigned long long)s->end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) (unsigned long long)s->step,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) atomic_read(&shared->in_flight[READ]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) atomic_read(&shared->in_flight[WRITE]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) dm_stat_free(&s->rcu_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) free_percpu(stats->last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) mutex_destroy(&stats->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) sector_t step, unsigned stat_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) unsigned n_histogram_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) unsigned long long *histogram_boundaries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) const char *program_id, const char *aux_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) void (*suspend_callback)(struct mapped_device *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) void (*resume_callback)(struct mapped_device *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct mapped_device *md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct list_head *l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct dm_stat *s, *tmp_s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) sector_t n_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) size_t ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) size_t shared_alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) size_t percpu_alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) size_t histogram_alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct dm_stat_percpu *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) int ret_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (end < start || !step)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) n_entries = end - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (dm_sector_div64(n_entries, step))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) n_entries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) shared_alloc_size = struct_size(s, stat_shared, n_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) percpu_alloc_size = (size_t)n_entries * sizeof(struct dm_stat_percpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (percpu_alloc_size / sizeof(struct dm_stat_percpu) != n_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) histogram_alloc_size = (n_histogram_entries + 1) * (size_t)n_entries * sizeof(unsigned long long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (histogram_alloc_size / (n_histogram_entries + 1) != (size_t)n_entries * sizeof(unsigned long long))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (!check_shared_memory(shared_alloc_size + histogram_alloc_size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) num_possible_cpus() * (percpu_alloc_size + histogram_alloc_size)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) s = dm_kvzalloc(shared_alloc_size, NUMA_NO_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (!s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) s->stat_flags = stat_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) s->n_entries = n_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) s->start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) s->end = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) s->step = step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) s->shared_alloc_size = shared_alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) s->percpu_alloc_size = percpu_alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) s->histogram_alloc_size = histogram_alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) s->n_histogram_entries = n_histogram_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) s->histogram_boundaries = kmemdup(histogram_boundaries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) s->n_histogram_entries * sizeof(unsigned long long), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (!s->histogram_boundaries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) s->program_id = kstrdup(program_id, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (!s->program_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) s->aux_data = kstrdup(aux_data, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (!s->aux_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) for (ni = 0; ni < n_entries; ni++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (s->n_histogram_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) unsigned long long *hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) hi = dm_kvzalloc(s->histogram_alloc_size, NUMA_NO_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (!hi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) for (ni = 0; ni < n_entries; ni++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) s->stat_shared[ni].tmp.histogram = hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) hi += s->n_histogram_entries + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) p = dm_kvzalloc(percpu_alloc_size, cpu_to_node(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) s->stat_percpu[cpu] = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (s->n_histogram_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) unsigned long long *hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) hi = dm_kvzalloc(s->histogram_alloc_size, cpu_to_node(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (!hi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) for (ni = 0; ni < n_entries; ni++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) p[ni].histogram = hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) hi += s->n_histogram_entries + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * Suspend/resume to make sure there is no i/o in flight,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * so that newly created statistics will be exact.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * (note: we couldn't suspend earlier because we must not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * allocate memory while suspended)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) suspend_callback(md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) mutex_lock(&stats->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) s->id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) list_for_each(l, &stats->list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) tmp_s = container_of(l, struct dm_stat, list_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (WARN_ON(tmp_s->id < s->id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) goto out_unlock_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (tmp_s->id > s->id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (unlikely(s->id == INT_MAX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) r = -ENFILE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) goto out_unlock_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) s->id++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) ret_id = s->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) list_add_tail_rcu(&s->list_entry, l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) mutex_unlock(&stats->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) resume_callback(md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return ret_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) out_unlock_resume:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) mutex_unlock(&stats->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) resume_callback(md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) dm_stat_free(&s->rcu_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) static struct dm_stat *__dm_stats_find(struct dm_stats *stats, int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct dm_stat *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) list_for_each_entry(s, &stats->list, list_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (s->id > id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (s->id == id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static int dm_stats_delete(struct dm_stats *stats, int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) struct dm_stat *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) mutex_lock(&stats->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) s = __dm_stats_find(stats, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (!s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) mutex_unlock(&stats->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) list_del_rcu(&s->list_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) mutex_unlock(&stats->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * vfree can't be called from RCU callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (is_vmalloc_addr(s->stat_percpu) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) is_vmalloc_addr(s->stat_percpu[cpu][0].histogram))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) goto do_sync_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (is_vmalloc_addr(s) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) is_vmalloc_addr(s->stat_shared[0].tmp.histogram)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) do_sync_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) synchronize_rcu_expedited();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) dm_stat_free(&s->rcu_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) WRITE_ONCE(dm_stat_need_rcu_barrier, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) call_rcu(&s->rcu_head, dm_stat_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) static int dm_stats_list(struct dm_stats *stats, const char *program,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) char *result, unsigned maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) struct dm_stat *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) sector_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) unsigned sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * Output format:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * <region_id>: <start_sector>+<length> <step> <program_id> <aux_data>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) mutex_lock(&stats->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) list_for_each_entry(s, &stats->list, list_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (!program || !strcmp(program, s->program_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) len = s->end - s->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) DMEMIT("%d: %llu+%llu %llu %s %s", s->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) (unsigned long long)s->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) (unsigned long long)len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) (unsigned long long)s->step,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) s->program_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) s->aux_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) DMEMIT(" precise_timestamps");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (s->n_histogram_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) DMEMIT(" histogram:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) for (i = 0; i < s->n_histogram_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) DMEMIT(",");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) DMEMIT("%llu", s->histogram_boundaries[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) DMEMIT("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) mutex_unlock(&stats->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) struct dm_stat_percpu *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * This is racy, but so is part_round_stats_single.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) unsigned long long now, difference;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) unsigned in_flight_read, in_flight_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (likely(!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) now = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) now = ktime_to_ns(ktime_get());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) difference = now - shared->stamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (!difference)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) in_flight_read = (unsigned)atomic_read(&shared->in_flight[READ]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) in_flight_write = (unsigned)atomic_read(&shared->in_flight[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (in_flight_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) p->io_ticks[READ] += difference;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (in_flight_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) p->io_ticks[WRITE] += difference;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (in_flight_read + in_flight_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) p->io_ticks_total += difference;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) p->time_in_queue += (in_flight_read + in_flight_write) * difference;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) shared->stamp = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) int idx, sector_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) struct dm_stats_aux *stats_aux, bool end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) unsigned long duration_jiffies)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) struct dm_stat_shared *shared = &s->stat_shared[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct dm_stat_percpu *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * For strict correctness we should use local_irq_save/restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * instead of preempt_disable/enable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * preempt_disable/enable is racy if the driver finishes bios
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * from non-interrupt context as well as from interrupt context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * or from more different interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * On 64-bit architectures the race only results in not counting some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * events, so it is acceptable. On 32-bit architectures the race could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * cause the counter going off by 2^32, so we need to do proper locking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * part_stat_lock()/part_stat_unlock() have this race too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) #if BITS_PER_LONG == 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) p = &s->stat_percpu[smp_processor_id()][entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (!end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) dm_stat_round(s, shared, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) atomic_inc(&shared->in_flight[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) unsigned long long duration;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) dm_stat_round(s, shared, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) atomic_dec(&shared->in_flight[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) p->sectors[idx] += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) p->ios[idx] += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) p->merges[idx] += stats_aux->merged;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) p->ticks[idx] += duration_jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) duration = jiffies_to_msecs(duration_jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) p->ticks[idx] += stats_aux->duration_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) duration = stats_aux->duration_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (s->n_histogram_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) unsigned lo = 0, hi = s->n_histogram_entries + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) while (lo + 1 < hi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) unsigned mid = (lo + hi) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (s->histogram_boundaries[mid - 1] > duration) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) hi = mid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) lo = mid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) p->histogram[lo]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) #if BITS_PER_LONG == 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) static void __dm_stat_bio(struct dm_stat *s, int bi_rw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) sector_t bi_sector, sector_t end_sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) bool end, unsigned long duration_jiffies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) struct dm_stats_aux *stats_aux)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) sector_t rel_sector, offset, todo, fragment_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) size_t entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (end_sector <= s->start || bi_sector >= s->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (unlikely(bi_sector < s->start)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) rel_sector = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) todo = end_sector - s->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) rel_sector = bi_sector - s->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) todo = end_sector - bi_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (unlikely(end_sector > s->end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) todo -= (end_sector - s->end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) offset = dm_sector_div64(rel_sector, s->step);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) entry = rel_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (WARN_ON_ONCE(entry >= s->n_entries)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) DMCRIT("Invalid area access in region id %d", s->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) fragment_len = todo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (fragment_len > s->step - offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) fragment_len = s->step - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) dm_stat_for_entry(s, entry, bi_rw, fragment_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) stats_aux, end, duration_jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) todo -= fragment_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) entry++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) } while (unlikely(todo != 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) sector_t bi_sector, unsigned bi_sectors, bool end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) unsigned long duration_jiffies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) struct dm_stats_aux *stats_aux)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) struct dm_stat *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) sector_t end_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) struct dm_stats_last_position *last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) bool got_precise_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (unlikely(!bi_sectors))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) end_sector = bi_sector + bi_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (!end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * A race condition can at worst result in the merged flag being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * misrepresented, so we don't have to disable preemption here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) last = raw_cpu_ptr(stats->last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) stats_aux->merged =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) (bi_sector == (READ_ONCE(last->last_sector) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) ((bi_rw == WRITE) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) (READ_ONCE(last->last_rw) == WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) WRITE_ONCE(last->last_sector, end_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) WRITE_ONCE(last->last_rw, bi_rw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) got_precise_time = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) list_for_each_entry_rcu(s, &stats->list, list_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (s->stat_flags & STAT_PRECISE_TIMESTAMPS && !got_precise_time) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (!end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) stats_aux->duration_ns = ktime_to_ns(ktime_get());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) stats_aux->duration_ns = ktime_to_ns(ktime_get()) - stats_aux->duration_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) got_precise_time = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) __dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration_jiffies, stats_aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) struct dm_stat *s, size_t x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct dm_stat_percpu *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) p = &s->stat_percpu[smp_processor_id()][x];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) dm_stat_round(s, shared, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) shared->tmp.sectors[READ] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) shared->tmp.sectors[WRITE] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) shared->tmp.ios[READ] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) shared->tmp.ios[WRITE] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) shared->tmp.merges[READ] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) shared->tmp.merges[WRITE] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) shared->tmp.ticks[READ] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) shared->tmp.ticks[WRITE] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) shared->tmp.io_ticks[READ] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) shared->tmp.io_ticks[WRITE] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) shared->tmp.io_ticks_total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) shared->tmp.time_in_queue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (s->n_histogram_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) memset(shared->tmp.histogram, 0, (s->n_histogram_entries + 1) * sizeof(unsigned long long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) p = &s->stat_percpu[cpu][x];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) shared->tmp.ios[READ] += READ_ONCE(p->ios[READ]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) shared->tmp.ios[WRITE] += READ_ONCE(p->ios[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) shared->tmp.merges[READ] += READ_ONCE(p->merges[READ]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) shared->tmp.merges[WRITE] += READ_ONCE(p->merges[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) shared->tmp.ticks[READ] += READ_ONCE(p->ticks[READ]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) shared->tmp.ticks[WRITE] += READ_ONCE(p->ticks[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) shared->tmp.io_ticks[READ] += READ_ONCE(p->io_ticks[READ]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) shared->tmp.io_ticks[WRITE] += READ_ONCE(p->io_ticks[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (s->n_histogram_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) for (i = 0; i < s->n_histogram_entries + 1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) bool init_tmp_percpu_totals)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) size_t x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) struct dm_stat_shared *shared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) struct dm_stat_percpu *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) for (x = idx_start; x < idx_end; x++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) shared = &s->stat_shared[x];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (init_tmp_percpu_totals)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) __dm_stat_init_temporary_percpu_totals(shared, s, x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) p = &s->stat_percpu[smp_processor_id()][x];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) p->sectors[READ] -= shared->tmp.sectors[READ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) p->sectors[WRITE] -= shared->tmp.sectors[WRITE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) p->ios[READ] -= shared->tmp.ios[READ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) p->ios[WRITE] -= shared->tmp.ios[WRITE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) p->merges[READ] -= shared->tmp.merges[READ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) p->merges[WRITE] -= shared->tmp.merges[WRITE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) p->ticks[READ] -= shared->tmp.ticks[READ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) p->ticks[WRITE] -= shared->tmp.ticks[WRITE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) p->io_ticks[READ] -= shared->tmp.io_ticks[READ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) p->io_ticks[WRITE] -= shared->tmp.io_ticks[WRITE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) p->io_ticks_total -= shared->tmp.io_ticks_total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) p->time_in_queue -= shared->tmp.time_in_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (s->n_histogram_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) for (i = 0; i < s->n_histogram_entries + 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) p = &s->stat_percpu[smp_processor_id()][x];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) p->histogram[i] -= shared->tmp.histogram[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) static int dm_stats_clear(struct dm_stats *stats, int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) struct dm_stat *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) mutex_lock(&stats->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) s = __dm_stats_find(stats, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (!s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) mutex_unlock(&stats->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) __dm_stat_clear(s, 0, s->n_entries, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) mutex_unlock(&stats->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * This is like jiffies_to_msec, but works for 64-bit values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) static unsigned long long dm_jiffies_to_msec64(struct dm_stat *s, unsigned long long j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) unsigned long long result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) unsigned mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) return j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) result = jiffies_to_msecs(j & 0x3fffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (j >= 1 << 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) mult = jiffies_to_msecs(1 << 22);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) result += (unsigned long long)mult * (unsigned long long)jiffies_to_msecs((j >> 22) & 0x3fffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (j >= 1ULL << 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) result += (unsigned long long)mult * (unsigned long long)mult * (unsigned long long)jiffies_to_msecs(j >> 44);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) static int dm_stats_print(struct dm_stats *stats, int id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) size_t idx_start, size_t idx_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) bool clear, char *result, unsigned maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) unsigned sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) struct dm_stat *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) size_t x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) sector_t start, end, step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) size_t idx_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) struct dm_stat_shared *shared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * Output format:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * <start_sector>+<length> counters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) mutex_lock(&stats->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) s = __dm_stats_find(stats, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (!s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) mutex_unlock(&stats->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) idx_end = idx_start + idx_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (idx_end < idx_start ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) idx_end > s->n_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) idx_end = s->n_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (idx_start > idx_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) idx_start = idx_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) step = s->step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) start = s->start + (step * idx_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) for (x = idx_start; x < idx_end; x++, start = end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) shared = &s->stat_shared[x];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) end = start + step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (unlikely(end > s->end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) end = s->end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) __dm_stat_init_temporary_percpu_totals(shared, s, x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) DMEMIT("%llu+%llu %llu %llu %llu %llu %llu %llu %llu %llu %d %llu %llu %llu %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) (unsigned long long)start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) (unsigned long long)step,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) shared->tmp.ios[READ],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) shared->tmp.merges[READ],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) shared->tmp.sectors[READ],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) dm_jiffies_to_msec64(s, shared->tmp.ticks[READ]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) shared->tmp.ios[WRITE],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) shared->tmp.merges[WRITE],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) shared->tmp.sectors[WRITE],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) dm_jiffies_to_msec64(s, shared->tmp.ticks[WRITE]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) dm_stat_in_flight(shared),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) dm_jiffies_to_msec64(s, shared->tmp.io_ticks_total),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) dm_jiffies_to_msec64(s, shared->tmp.time_in_queue),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) dm_jiffies_to_msec64(s, shared->tmp.io_ticks[READ]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) dm_jiffies_to_msec64(s, shared->tmp.io_ticks[WRITE]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (s->n_histogram_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) for (i = 0; i < s->n_histogram_entries + 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) DMEMIT("%s%llu", !i ? " " : ":", shared->tmp.histogram[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) DMEMIT("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (unlikely(sz + 1 >= maxlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) goto buffer_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (clear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) __dm_stat_clear(s, idx_start, idx_end, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) buffer_overflow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) mutex_unlock(&stats->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) struct dm_stat *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) const char *new_aux_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) mutex_lock(&stats->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) s = __dm_stats_find(stats, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (!s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) mutex_unlock(&stats->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) new_aux_data = kstrdup(aux_data, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (!new_aux_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) mutex_unlock(&stats->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) kfree(s->aux_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) s->aux_data = new_aux_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) mutex_unlock(&stats->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) static int parse_histogram(const char *h, unsigned *n_histogram_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) unsigned long long **histogram_boundaries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) const char *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) unsigned n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) unsigned long long last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) *n_histogram_entries = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) for (q = h; *q; q++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (*q == ',')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) (*n_histogram_entries)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) *histogram_boundaries = kmalloc_array(*n_histogram_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) sizeof(unsigned long long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (!*histogram_boundaries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) last = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) unsigned long long hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) int s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) char ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) s = sscanf(h, "%llu%c", &hi, &ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (!s || (s == 2 && ch != ','))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if (hi <= last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) last = hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) (*histogram_boundaries)[n] = hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (s == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) h = strchr(h, ',') + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) static int message_stats_create(struct mapped_device *md,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) unsigned argc, char **argv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) char *result, unsigned maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) char dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) unsigned long long start, end, len, step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) unsigned divisor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) const char *program_id, *aux_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) unsigned stat_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) unsigned n_histogram_entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) unsigned long long *histogram_boundaries = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) struct dm_arg_set as, as_backup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) const char *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) unsigned feature_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * Input format:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * <range> <step> [<extra_parameters> <parameters>] [<program_id> [<aux_data>]]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (argc < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) goto ret_einval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) as.argc = argc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) as.argv = argv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) dm_consume_args(&as, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) a = dm_shift_arg(&as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (!strcmp(a, "-")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) len = dm_get_size(md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) } else if (sscanf(a, "%llu+%llu%c", &start, &len, &dummy) != 2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) start != (sector_t)start || len != (sector_t)len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) goto ret_einval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) end = start + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (start >= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) goto ret_einval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) a = dm_shift_arg(&as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if (sscanf(a, "/%u%c", &divisor, &dummy) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if (!divisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) step = end - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (do_div(step, divisor))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) step++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (!step)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) step = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) } else if (sscanf(a, "%llu%c", &step, &dummy) != 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) step != (sector_t)step || !step)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) goto ret_einval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) as_backup = as;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) a = dm_shift_arg(&as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (a && sscanf(a, "%u%c", &feature_args, &dummy) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) while (feature_args--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) a = dm_shift_arg(&as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (!a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) goto ret_einval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (!strcasecmp(a, "precise_timestamps"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) stat_flags |= STAT_PRECISE_TIMESTAMPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) else if (!strncasecmp(a, "histogram:", 10)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (n_histogram_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) goto ret_einval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if ((r = parse_histogram(a + 10, &n_histogram_entries, &histogram_boundaries)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) goto ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) goto ret_einval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) as = as_backup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) program_id = "-";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) aux_data = "-";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) a = dm_shift_arg(&as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) program_id = a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) a = dm_shift_arg(&as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) aux_data = a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) if (as.argc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) goto ret_einval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * If a buffer overflow happens after we created the region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * it's too late (the userspace would retry with a larger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * buffer, but the region id that caused the overflow is already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * leaked). So we must detect buffer overflow in advance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) snprintf(result, maxlen, "%d", INT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (dm_message_test_buffer_overflow(result, maxlen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) r = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) goto ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) id = dm_stats_create(dm_get_stats(md), start, end, step, stat_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) n_histogram_entries, histogram_boundaries, program_id, aux_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) dm_internal_suspend_fast, dm_internal_resume_fast, md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (id < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) r = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) goto ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) snprintf(result, maxlen, "%d", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) r = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) goto ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) ret_einval:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) ret:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) kfree(histogram_boundaries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) static int message_stats_delete(struct mapped_device *md,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) unsigned argc, char **argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) char dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) if (argc != 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) return dm_stats_delete(dm_get_stats(md), id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) static int message_stats_clear(struct mapped_device *md,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) unsigned argc, char **argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) char dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (argc != 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) return dm_stats_clear(dm_get_stats(md), id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) static int message_stats_list(struct mapped_device *md,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) unsigned argc, char **argv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) char *result, unsigned maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) const char *program = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (argc < 1 || argc > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) if (argc > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) program = kstrdup(argv[1], GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (!program)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) r = dm_stats_list(dm_get_stats(md), program, result, maxlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) kfree(program);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) static int message_stats_print(struct mapped_device *md,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) unsigned argc, char **argv, bool clear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) char *result, unsigned maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) char dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) unsigned long idx_start = 0, idx_len = ULONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) if (argc != 2 && argc != 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (argc > 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (strcmp(argv[2], "-") &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) sscanf(argv[2], "%lu%c", &idx_start, &dummy) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (strcmp(argv[3], "-") &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) sscanf(argv[3], "%lu%c", &idx_len, &dummy) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) return dm_stats_print(dm_get_stats(md), id, idx_start, idx_len, clear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) result, maxlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static int message_stats_set_aux(struct mapped_device *md,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) unsigned argc, char **argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) char dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (argc != 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) return dm_stats_set_aux(dm_get_stats(md), id, argv[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) char *result, unsigned maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) /* All messages here must start with '@' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) if (!strcasecmp(argv[0], "@stats_create"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) r = message_stats_create(md, argc, argv, result, maxlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) else if (!strcasecmp(argv[0], "@stats_delete"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) r = message_stats_delete(md, argc, argv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) else if (!strcasecmp(argv[0], "@stats_clear"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) r = message_stats_clear(md, argc, argv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) else if (!strcasecmp(argv[0], "@stats_list"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) r = message_stats_list(md, argc, argv, result, maxlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) else if (!strcasecmp(argv[0], "@stats_print"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) r = message_stats_print(md, argc, argv, false, result, maxlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) else if (!strcasecmp(argv[0], "@stats_print_clear"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) r = message_stats_print(md, argc, argv, true, result, maxlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) else if (!strcasecmp(argv[0], "@stats_set_aux"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) r = message_stats_set_aux(md, argc, argv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) return 2; /* this wasn't a stats message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (r == -EINVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) DMWARN("Invalid parameters for message %s", argv[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) int __init dm_statistics_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) shared_memory_amount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) dm_stat_need_rcu_barrier = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) void dm_statistics_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) if (dm_stat_need_rcu_barrier)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) rcu_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (WARN_ON(shared_memory_amount))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) DMCRIT("shared_memory_amount leaked: %lu", shared_memory_amount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) module_param_named(stats_current_allocated_bytes, shared_memory_amount, ulong, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) MODULE_PARM_DESC(stats_current_allocated_bytes, "Memory currently used by statistics");