^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Copyright (c) 2019 Facebook */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/bpf_verifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/btf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/filter.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <net/bpf_sk_storage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) static u32 optional_ops[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) offsetof(struct tcp_congestion_ops, init),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) offsetof(struct tcp_congestion_ops, release),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) offsetof(struct tcp_congestion_ops, set_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) offsetof(struct tcp_congestion_ops, cwnd_event),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) offsetof(struct tcp_congestion_ops, in_ack_event),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) offsetof(struct tcp_congestion_ops, pkts_acked),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) offsetof(struct tcp_congestion_ops, min_tso_segs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) offsetof(struct tcp_congestion_ops, sndbuf_expand),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) offsetof(struct tcp_congestion_ops, cong_control),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static u32 unsupported_ops[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) offsetof(struct tcp_congestion_ops, get_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static const struct btf_type *tcp_sock_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static u32 tcp_sock_id, sock_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static int bpf_tcp_ca_init(struct btf *btf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) s32 type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) type_id = btf_find_by_name_kind(btf, "sock", BTF_KIND_STRUCT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) if (type_id < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) sock_id = type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) type_id = btf_find_by_name_kind(btf, "tcp_sock", BTF_KIND_STRUCT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) if (type_id < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) tcp_sock_id = type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static bool is_optional(u32 member_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) for (i = 0; i < ARRAY_SIZE(optional_ops); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (member_offset == optional_ops[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static bool is_unsupported(u32 member_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) for (i = 0; i < ARRAY_SIZE(unsupported_ops); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (member_offset == unsupported_ops[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) extern struct btf *btf_vmlinux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static bool bpf_tcp_ca_is_valid_access(int off, int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) enum bpf_access_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) const struct bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct bpf_insn_access_aux *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (type != BPF_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (off % size != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (!btf_ctx_access(off, size, type, prog, info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (info->reg_type == PTR_TO_BTF_ID && info->btf_id == sock_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* promote it to tcp_sock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) info->btf_id = tcp_sock_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) const struct btf_type *t, int off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) int size, enum bpf_access_type atype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) u32 *next_btf_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) size_t end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (atype == BPF_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return btf_struct_access(log, t, off, size, atype, next_btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (t != tcp_sock_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) bpf_log(log, "only read is supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) switch (off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) case bpf_ctx_range(struct inet_connection_sock, icsk_ca_priv):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) end = offsetofend(struct inet_connection_sock, icsk_ca_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) case offsetof(struct inet_connection_sock, icsk_ack.pending):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) end = offsetofend(struct inet_connection_sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) icsk_ack.pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) case offsetof(struct tcp_sock, snd_cwnd):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) end = offsetofend(struct tcp_sock, snd_cwnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) case offsetof(struct tcp_sock, snd_cwnd_cnt):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) end = offsetofend(struct tcp_sock, snd_cwnd_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) case offsetof(struct tcp_sock, snd_ssthresh):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) end = offsetofend(struct tcp_sock, snd_ssthresh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) case offsetof(struct tcp_sock, ecn_flags):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) end = offsetofend(struct tcp_sock, ecn_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) bpf_log(log, "no write support to tcp_sock at off %d\n", off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (off + size > end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) bpf_log(log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) "write access at off %d with size %d beyond the member of tcp_sock ended at %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) off, size, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return NOT_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) BPF_CALL_2(bpf_tcp_send_ack, struct tcp_sock *, tp, u32, rcv_nxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* bpf_tcp_ca prog cannot have NULL tp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) __tcp_send_ack((struct sock *)tp, rcv_nxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static const struct bpf_func_proto bpf_tcp_send_ack_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) .func = bpf_tcp_send_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) .gpl_only = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* In case we want to report error later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) .arg1_type = ARG_PTR_TO_BTF_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) .arg1_btf_id = &tcp_sock_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) .arg2_type = ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static const struct bpf_func_proto *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) const struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) switch (func_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) case BPF_FUNC_tcp_send_ack:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return &bpf_tcp_send_ack_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) case BPF_FUNC_sk_storage_get:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return &bpf_sk_storage_get_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) case BPF_FUNC_sk_storage_delete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return &bpf_sk_storage_delete_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return bpf_base_func_proto(func_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) .get_func_proto = bpf_tcp_ca_get_func_proto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) .is_valid_access = bpf_tcp_ca_is_valid_access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) .btf_struct_access = bpf_tcp_ca_btf_struct_access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static int bpf_tcp_ca_init_member(const struct btf_type *t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) const struct btf_member *member,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) void *kdata, const void *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) const struct tcp_congestion_ops *utcp_ca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct tcp_congestion_ops *tcp_ca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) int prog_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) u32 moff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) utcp_ca = (const struct tcp_congestion_ops *)udata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) tcp_ca = (struct tcp_congestion_ops *)kdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) moff = btf_member_bit_offset(t, member) / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) switch (moff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) case offsetof(struct tcp_congestion_ops, flags):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (utcp_ca->flags & ~TCP_CONG_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) tcp_ca->flags = utcp_ca->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) case offsetof(struct tcp_congestion_ops, name):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) sizeof(tcp_ca->name)) <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (tcp_ca_find(utcp_ca->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (!btf_type_resolve_func_ptr(btf_vmlinux, member->type, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* Ensure bpf_prog is provided for compulsory func ptr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) prog_fd = (int)(*(unsigned long *)(udata + moff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (!prog_fd && !is_optional(moff) && !is_unsupported(moff))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static int bpf_tcp_ca_check_member(const struct btf_type *t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) const struct btf_member *member)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (is_unsupported(btf_member_bit_offset(t, member) / 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static int bpf_tcp_ca_reg(void *kdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return tcp_register_congestion_control(kdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static void bpf_tcp_ca_unreg(void *kdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) tcp_unregister_congestion_control(kdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /* Avoid sparse warning. It is only used in bpf_struct_ops.c. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) extern struct bpf_struct_ops bpf_tcp_congestion_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct bpf_struct_ops bpf_tcp_congestion_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) .verifier_ops = &bpf_tcp_ca_verifier_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) .reg = bpf_tcp_ca_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) .unreg = bpf_tcp_ca_unreg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) .check_member = bpf_tcp_ca_check_member,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) .init_member = bpf_tcp_ca_init_member,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) .init = bpf_tcp_ca_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) .name = "tcp_congestion_ops",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) };