^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * RDMA Transport Layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #undef pr_fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "rtrs-clt.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) struct rtrs_clt_stats *stats = sess->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) struct rtrs_clt_stats_pcpu *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) s = this_cpu_ptr(stats->pcpu_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) if (unlikely(con->cpu != cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) s->cpu_migr.to++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /* Careful here, override s pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) s = per_cpu_ptr(stats->pcpu_stats, con->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) atomic_inc(&s->cpu_migr.from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct rtrs_clt_stats_pcpu *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) s = this_cpu_ptr(stats->pcpu_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) s->rdma.failover_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) int rtrs_clt_stats_migration_cnt_to_str(struct rtrs_clt_stats *stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct rtrs_clt_stats_pcpu *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) size_t used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) used = scnprintf(buf, len, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) used += scnprintf(buf + used, len - used, " CPU%u", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) used += scnprintf(buf + used, len - used, "\nfrom:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) s = per_cpu_ptr(stats->pcpu_stats, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) used += scnprintf(buf + used, len - used, " %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) atomic_read(&s->cpu_migr.from));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) used += scnprintf(buf + used, len - used, "\nto :");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) s = per_cpu_ptr(stats->pcpu_stats, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) used += scnprintf(buf + used, len - used, " %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) s->cpu_migr.to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) used += scnprintf(buf + used, len - used, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return scnprintf(buf, len, "%d %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) stats->reconnects.successful_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) stats->reconnects.fail_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) char *page, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct rtrs_clt_stats_rdma sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct rtrs_clt_stats_rdma *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) memset(&sum, 0, sizeof(sum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) r = &per_cpu_ptr(stats->pcpu_stats, cpu)->rdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) sum.dir[READ].cnt += r->dir[READ].cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) sum.dir[READ].size_total += r->dir[READ].size_total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) sum.dir[WRITE].cnt += r->dir[WRITE].cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) sum.dir[WRITE].size_total += r->dir[WRITE].size_total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) sum.failover_cnt += r->failover_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return scnprintf(page, len, "%llu %llu %llu %llu %u %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) sum.dir[READ].cnt, sum.dir[READ].size_total,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) sum.dir[WRITE].cnt, sum.dir[WRITE].size_total,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) atomic_read(&stats->inflight), sum.failover_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) char *page, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return scnprintf(page, len, "echo 1 to reset all statistics\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct rtrs_clt_stats_pcpu *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (!enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) s = per_cpu_ptr(stats->pcpu_stats, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) memset(&s->rdma, 0, sizeof(s->rdma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) int rtrs_clt_reset_cpu_migr_stats(struct rtrs_clt_stats *stats, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct rtrs_clt_stats_pcpu *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (!enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) s = per_cpu_ptr(stats->pcpu_stats, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) memset(&s->cpu_migr, 0, sizeof(s->cpu_migr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) int rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats *stats, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (!enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) memset(&stats->reconnects, 0, sizeof(stats->reconnects));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) int rtrs_clt_reset_all_stats(struct rtrs_clt_stats *s, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) rtrs_clt_reset_rdma_stats(s, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) rtrs_clt_reset_cpu_migr_stats(s, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) rtrs_clt_reset_reconnects_stat(s, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) atomic_set(&s->inflight, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) size_t size, int d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct rtrs_clt_stats_pcpu *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) s = this_cpu_ptr(stats->pcpu_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) s->rdma.dir[d].cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) s->rdma.dir[d].size_total += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct rtrs_clt_con *con = req->con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct rtrs_clt_stats *stats = sess->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) len = req->usr_len + req->data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) rtrs_clt_update_rdma_stats(stats, len, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) atomic_inc(&stats->inflight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) int rtrs_clt_init_stats(struct rtrs_clt_stats *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) stats->pcpu_stats = alloc_percpu(typeof(*stats->pcpu_stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (!stats->pcpu_stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * successful_cnt will be set to 0 after session
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * is established for the first time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) stats->reconnects.successful_cnt = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }