^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * inet_diag.c Module for monitoring INET transport protocols sockets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/fcntl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <net/icmp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <net/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <net/inet_common.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <net/inet_connection_sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <net/inet_hashtables.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <net/inet_timewait_sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <net/inet6_hashtables.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <net/bpf_sk_storage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <net/netlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/inet.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/stddef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/inet_diag.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/sock_diag.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static const struct inet_diag_handler **inet_diag_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct inet_diag_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) const __be32 *saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) const __be32 *daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) u16 sport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) u16 dport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) u16 family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) u16 userlocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) u32 ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) u32 mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #ifdef CONFIG_SOCK_CGROUP_DATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) u64 cgroup_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static DEFINE_MUTEX(inet_diag_table_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (proto < 0 || proto >= IPPROTO_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) mutex_lock(&inet_diag_table_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (!inet_diag_table[proto])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) sock_load_diag_module(AF_INET, proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) mutex_lock(&inet_diag_table_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (!inet_diag_table[proto])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return inet_diag_table[proto];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static void inet_diag_unlock_handler(const struct inet_diag_handler *handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) mutex_unlock(&inet_diag_table_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) r->idiag_family = sk->sk_family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) r->id.idiag_sport = htons(sk->sk_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) r->id.idiag_dport = sk->sk_dport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) r->id.idiag_if = sk->sk_bound_dev_if;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) sock_diag_save_cookie(sk, r->id.idiag_cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (sk->sk_family == AF_INET6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) *(struct in6_addr *)r->id.idiag_src = sk->sk_v6_rcv_saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) *(struct in6_addr *)r->id.idiag_dst = sk->sk_v6_daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) r->id.idiag_src[0] = sk->sk_rcv_saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) r->id.idiag_dst[0] = sk->sk_daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) EXPORT_SYMBOL_GPL(inet_diag_msg_common_fill);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static size_t inet_sk_attr_size(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) const struct inet_diag_req_v2 *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) bool net_admin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) const struct inet_diag_handler *handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) size_t aux = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) handler = inet_diag_table[req->sdiag_protocol];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (handler && handler->idiag_get_aux_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) aux = handler->idiag_get_aux_size(sk, net_admin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return nla_total_size(sizeof(struct tcp_info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) + nla_total_size(sizeof(struct inet_diag_msg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) + inet_diag_msg_attrs_size()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) + nla_total_size(sizeof(struct inet_diag_meminfo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) + nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) + nla_total_size(TCP_CA_NAME_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) + nla_total_size(sizeof(struct tcpvegas_info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) + aux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) + 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct inet_diag_msg *r, int ext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct user_namespace *user_ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) bool net_admin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) const struct inet_sock *inet = inet_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct inet_diag_sockopt inet_sockopt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * hence this needs to be included regardless of socket family.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (ext & (1 << (INET_DIAG_TOS - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (nla_put_u8(skb, INET_DIAG_TOS, inet->tos) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (r->idiag_family == AF_INET6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (ext & (1 << (INET_DIAG_TCLASS - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (nla_put_u8(skb, INET_DIAG_TCLASS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) inet6_sk(sk)->tclass) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) nla_put_u8(skb, INET_DIAG_SKV6ONLY, ipv6_only_sock(sk)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, sk->sk_mark))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) ext & (1 << (INET_DIAG_TCLASS - 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) u32 classid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #ifdef CONFIG_SOCK_CGROUP_DATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) classid = sock_cgroup_classid(&sk->sk_cgrp_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /* Fallback to socket priority if class id isn't set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * Classful qdiscs use it as direct reference to class.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * For cgroup2 classid is always zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (!classid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) classid = sk->sk_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #ifdef CONFIG_SOCK_CGROUP_DATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (nla_put_u64_64bit(skb, INET_DIAG_CGROUP_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) cgroup_id(sock_cgroup_ptr(&sk->sk_cgrp_data)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) INET_DIAG_PAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) r->idiag_inode = sock_i_ino(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) memset(&inet_sockopt, 0, sizeof(inet_sockopt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) inet_sockopt.recverr = inet->recverr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) inet_sockopt.is_icsk = inet->is_icsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) inet_sockopt.freebind = inet->freebind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) inet_sockopt.hdrincl = inet->hdrincl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) inet_sockopt.mc_loop = inet->mc_loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) inet_sockopt.transparent = inet->transparent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) inet_sockopt.mc_all = inet->mc_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) inet_sockopt.nodefrag = inet->nodefrag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) inet_sockopt.bind_address_no_port = inet->bind_address_no_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) inet_sockopt.recverr_rfc4884 = inet->recverr_rfc4884;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) inet_sockopt.defer_connect = inet->defer_connect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (nla_put(skb, INET_DIAG_SOCKOPT, sizeof(inet_sockopt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) &inet_sockopt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) EXPORT_SYMBOL_GPL(inet_diag_msg_attrs_fill);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static int inet_diag_parse_attrs(const struct nlmsghdr *nlh, int hdrlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct nlattr **req_nlas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct nlattr *nla;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) int remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) nlmsg_for_each_attr(nla, nlh, hdrlen, remaining) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) int type = nla_type(nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (type == INET_DIAG_REQ_PROTOCOL && nla_len(nla) != sizeof(u32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (type < __INET_DIAG_REQ_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) req_nlas[type] = nla;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static int inet_diag_get_protocol(const struct inet_diag_req_v2 *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) const struct inet_diag_dump_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (data->req_nlas[INET_DIAG_REQ_PROTOCOL])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return nla_get_u32(data->req_nlas[INET_DIAG_REQ_PROTOCOL]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return req->sdiag_protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) #define MAX_DUMP_ALLOC_SIZE (KMALLOC_MAX_SIZE - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct sk_buff *skb, struct netlink_callback *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) const struct inet_diag_req_v2 *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) u16 nlmsg_flags, bool net_admin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) const struct tcp_congestion_ops *ca_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) const struct inet_diag_handler *handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct inet_diag_dump_data *cb_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) int ext = req->idiag_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct inet_diag_msg *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct nlmsghdr *nlh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct nlattr *attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) void *info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) cb_data = cb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) handler = inet_diag_table[inet_diag_get_protocol(req, cb_data)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) BUG_ON(!handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) cb->nlh->nlmsg_type, sizeof(*r), nlmsg_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (!nlh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) r = nlmsg_data(nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) BUG_ON(!sk_fullsock(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) inet_diag_msg_common_fill(r, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) r->idiag_state = sk->sk_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) r->idiag_timer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) r->idiag_retrans = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) r->idiag_expires = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (inet_diag_msg_attrs_fill(sk, skb, r, ext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) sk_user_ns(NETLINK_CB(cb->skb).sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) net_admin))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct inet_diag_meminfo minfo = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) .idiag_rmem = sk_rmem_alloc_get(sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) .idiag_wmem = READ_ONCE(sk->sk_wmem_queued),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) .idiag_fmem = sk->sk_forward_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) .idiag_tmem = sk_wmem_alloc_get(sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (nla_put(skb, INET_DIAG_MEMINFO, sizeof(minfo), &minfo) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (ext & (1 << (INET_DIAG_SKMEMINFO - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * RAW sockets might have user-defined protocols assigned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * so report the one supplied on socket creation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (sk->sk_type == SOCK_RAW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (nla_put_u8(skb, INET_DIAG_PROTOCOL, sk->sk_protocol))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (!icsk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) handler->idiag_get_info(sk, r, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) r->idiag_timer = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) r->idiag_retrans = icsk->icsk_retransmits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) r->idiag_expires =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) jiffies_delta_to_msecs(icsk->icsk_timeout - jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) r->idiag_timer = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) r->idiag_retrans = icsk->icsk_probes_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) r->idiag_expires =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) jiffies_delta_to_msecs(icsk->icsk_timeout - jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) } else if (timer_pending(&sk->sk_timer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) r->idiag_timer = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) r->idiag_retrans = icsk->icsk_probes_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) r->idiag_expires =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) jiffies_delta_to_msecs(sk->sk_timer.expires - jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if ((ext & (1 << (INET_DIAG_INFO - 1))) && handler->idiag_info_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) attr = nla_reserve_64bit(skb, INET_DIAG_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) handler->idiag_info_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) INET_DIAG_PAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (!attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) info = nla_data(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (ext & (1 << (INET_DIAG_CONG - 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) ca_ops = READ_ONCE(icsk->icsk_ca_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (ca_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) err = nla_put_string(skb, INET_DIAG_CONG, ca_ops->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) handler->idiag_get_info(sk, r, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (ext & (1 << (INET_DIAG_INFO - 1)) && handler->idiag_get_aux)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (handler->idiag_get_aux(sk, net_admin, skb) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (sk->sk_state < TCP_TIME_WAIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) union tcp_cc_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) size_t sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) int attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) ca_ops = READ_ONCE(icsk->icsk_ca_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (ca_ops && ca_ops->get_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) sz = ca_ops->get_info(sk, ext, &attr, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (sz && nla_put(skb, attr, sz, &info) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /* Keep it at the end for potential retry with a larger skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * or else do best-effort fitting, which is only done for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * first_nlmsg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (cb_data->bpf_stg_diag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) bool first_nlmsg = ((unsigned char *)nlh == skb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) unsigned int prev_min_dump_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) unsigned int total_nla_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) unsigned int msg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) msg_len = skb_tail_pointer(skb) - (unsigned char *)nlh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) err = bpf_sk_storage_diag_put(cb_data->bpf_stg_diag, sk, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) INET_DIAG_SK_BPF_STORAGES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) &total_nla_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) total_nla_size += msg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) prev_min_dump_alloc = cb->min_dump_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (total_nla_size > prev_min_dump_alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) cb->min_dump_alloc = min_t(u32, total_nla_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) MAX_DUMP_ALLOC_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (!first_nlmsg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (cb->min_dump_alloc > prev_min_dump_alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* Retry with pskb_expand_head() with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * __GFP_DIRECT_RECLAIM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) WARN_ON_ONCE(total_nla_size <= prev_min_dump_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) /* Send what we have for this sk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * and move on to the next sk in the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * dump()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) nlmsg_end(skb, nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) nlmsg_cancel(skb, nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static int inet_twsk_diag_fill(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) struct netlink_callback *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) u16 nlmsg_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct inet_timewait_sock *tw = inet_twsk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct inet_diag_msg *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) struct nlmsghdr *nlh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) long tmo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) cb->nlh->nlmsg_seq, cb->nlh->nlmsg_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) sizeof(*r), nlmsg_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (!nlh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) r = nlmsg_data(nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) BUG_ON(tw->tw_state != TCP_TIME_WAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) inet_diag_msg_common_fill(r, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) r->idiag_retrans = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) r->idiag_state = tw->tw_substate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) r->idiag_timer = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) tmo = tw->tw_timer.expires - jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) r->idiag_expires = jiffies_delta_to_msecs(tmo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) r->idiag_rqueue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) r->idiag_wqueue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) r->idiag_uid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) r->idiag_inode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) nlmsg_end(skb, nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct netlink_callback *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) u16 nlmsg_flags, bool net_admin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct request_sock *reqsk = inet_reqsk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct inet_diag_msg *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct nlmsghdr *nlh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) long tmo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) cb->nlh->nlmsg_type, sizeof(*r), nlmsg_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (!nlh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) r = nlmsg_data(nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) inet_diag_msg_common_fill(r, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) r->idiag_state = TCP_SYN_RECV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) r->idiag_timer = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) r->idiag_retrans = reqsk->num_retrans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) BUILD_BUG_ON(offsetof(struct inet_request_sock, ir_cookie) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) offsetof(struct sock, sk_cookie));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) tmo = inet_reqsk(sk)->rsk_timer.expires - jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) r->idiag_expires = jiffies_delta_to_msecs(tmo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) r->idiag_rqueue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) r->idiag_wqueue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) r->idiag_uid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) r->idiag_inode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (net_admin && nla_put_u32(skb, INET_DIAG_MARK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) inet_rsk(reqsk)->ir_mark)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) nlmsg_cancel(skb, nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) nlmsg_end(skb, nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct netlink_callback *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) const struct inet_diag_req_v2 *r,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) u16 nlmsg_flags, bool net_admin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (sk->sk_state == TCP_TIME_WAIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return inet_twsk_diag_fill(sk, skb, cb, nlmsg_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (sk->sk_state == TCP_NEW_SYN_RECV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) return inet_req_diag_fill(sk, skb, cb, nlmsg_flags, net_admin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return inet_sk_diag_fill(sk, inet_csk(sk), skb, cb, r, nlmsg_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) net_admin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct sock *inet_diag_find_one_icsk(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) struct inet_hashinfo *hashinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) const struct inet_diag_req_v2 *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (req->sdiag_family == AF_INET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) sk = inet_lookup(net, hashinfo, NULL, 0, req->id.idiag_dst[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) req->id.idiag_dport, req->id.idiag_src[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) req->id.idiag_sport, req->id.idiag_if);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) else if (req->sdiag_family == AF_INET6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) sk = inet_lookup(net, hashinfo, NULL, 0, req->id.idiag_dst[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) req->id.idiag_dport, req->id.idiag_src[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) req->id.idiag_sport, req->id.idiag_if);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) sk = inet6_lookup(net, hashinfo, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) (struct in6_addr *)req->id.idiag_dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) req->id.idiag_dport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) (struct in6_addr *)req->id.idiag_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) req->id.idiag_sport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) req->id.idiag_if);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (!sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (sock_diag_check_cookie(sk, req->id.idiag_cookie)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) sock_gen_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) EXPORT_SYMBOL_GPL(inet_diag_find_one_icsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) struct netlink_callback *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) const struct inet_diag_req_v2 *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct sk_buff *in_skb = cb->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) bool net_admin = netlink_net_capable(in_skb, CAP_NET_ADMIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct net *net = sock_net(in_skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct sk_buff *rep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) sk = inet_diag_find_one_icsk(net, hashinfo, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (IS_ERR(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) return PTR_ERR(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) rep = nlmsg_new(inet_sk_attr_size(sk, req, net_admin), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (!rep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) err = sk_diag_fill(sk, rep, cb, req, 0, net_admin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) WARN_ON(err == -EMSGSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) nlmsg_free(rep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) MSG_DONTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (err > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) sock_gen_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) static int inet_diag_cmd_exact(int cmd, struct sk_buff *in_skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) const struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) int hdrlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) const struct inet_diag_req_v2 *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) const struct inet_diag_handler *handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct inet_diag_dump_data dump_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) int err, protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) memset(&dump_data, 0, sizeof(dump_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) err = inet_diag_parse_attrs(nlh, hdrlen, dump_data.req_nlas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) protocol = inet_diag_get_protocol(req, &dump_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) handler = inet_diag_lock_handler(protocol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (IS_ERR(handler)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) err = PTR_ERR(handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) } else if (cmd == SOCK_DIAG_BY_FAMILY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct netlink_callback cb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) .nlh = nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) .skb = in_skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) .data = &dump_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) err = handler->dump_one(&cb, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) } else if (cmd == SOCK_DESTROY && handler->destroy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) err = handler->destroy(in_skb, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) inet_diag_unlock_handler(handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) int words = bits >> 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) bits &= 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (words) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (memcmp(a1, a2, words << 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) __be32 w1, w2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) __be32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) w1 = a1[words];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) w2 = a2[words];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) mask = htonl((0xffffffff) << (32 - bits));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if ((w1 ^ w2) & mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) static int inet_diag_bc_run(const struct nlattr *_bc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) const struct inet_diag_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) const void *bc = nla_data(_bc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) int len = nla_len(_bc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) while (len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) int yes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) const struct inet_diag_bc_op *op = bc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) switch (op->code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) case INET_DIAG_BC_NOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) case INET_DIAG_BC_JMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) yes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) case INET_DIAG_BC_S_EQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) yes = entry->sport == op[1].no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) case INET_DIAG_BC_S_GE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) yes = entry->sport >= op[1].no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) case INET_DIAG_BC_S_LE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) yes = entry->sport <= op[1].no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) case INET_DIAG_BC_D_EQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) yes = entry->dport == op[1].no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) case INET_DIAG_BC_D_GE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) yes = entry->dport >= op[1].no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) case INET_DIAG_BC_D_LE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) yes = entry->dport <= op[1].no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) case INET_DIAG_BC_AUTO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) yes = !(entry->userlocks & SOCK_BINDPORT_LOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) case INET_DIAG_BC_S_COND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) case INET_DIAG_BC_D_COND: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) const struct inet_diag_hostcond *cond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) const __be32 *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) cond = (const struct inet_diag_hostcond *)(op + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (cond->port != -1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) cond->port != (op->code == INET_DIAG_BC_S_COND ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) entry->sport : entry->dport)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) yes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (op->code == INET_DIAG_BC_S_COND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) addr = entry->saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) addr = entry->daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (cond->family != AF_UNSPEC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) cond->family != entry->family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (entry->family == AF_INET6 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) cond->family == AF_INET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (addr[0] == 0 && addr[1] == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) addr[2] == htonl(0xffff) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) bitstring_match(addr + 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) cond->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) cond->prefix_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) yes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (cond->prefix_len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (bitstring_match(addr, cond->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) cond->prefix_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) yes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) case INET_DIAG_BC_DEV_COND: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) u32 ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) ifindex = *((const u32 *)(op + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (ifindex != entry->ifindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) yes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) case INET_DIAG_BC_MARK_COND: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) struct inet_diag_markcond *cond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) cond = (struct inet_diag_markcond *)(op + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if ((entry->mark & cond->mask) != cond->mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) yes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) #ifdef CONFIG_SOCK_CGROUP_DATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) case INET_DIAG_BC_CGROUP_COND: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) u64 cgroup_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) cgroup_id = get_unaligned((const u64 *)(op + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (cgroup_id != entry->cgroup_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) yes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (yes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) len -= op->yes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) bc += op->yes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) len -= op->no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) bc += op->no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) return len == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /* This helper is available for all sockets (ESTABLISH, TIMEWAIT, SYN_RECV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) static void entry_fill_addrs(struct inet_diag_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (sk->sk_family == AF_INET6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) entry->saddr = sk->sk_v6_rcv_saddr.s6_addr32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) entry->daddr = sk->sk_v6_daddr.s6_addr32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) entry->saddr = &sk->sk_rcv_saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) entry->daddr = &sk->sk_daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) struct inet_sock *inet = inet_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) struct inet_diag_entry entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (!bc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) entry.family = sk->sk_family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) entry_fill_addrs(&entry, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) entry.sport = inet->inet_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) entry.dport = ntohs(inet->inet_dport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) entry.ifindex = sk->sk_bound_dev_if;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) entry.userlocks = sk_fullsock(sk) ? sk->sk_userlocks : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (sk_fullsock(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) entry.mark = sk->sk_mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) else if (sk->sk_state == TCP_NEW_SYN_RECV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) entry.mark = inet_rsk(inet_reqsk(sk))->ir_mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) entry.mark = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) #ifdef CONFIG_SOCK_CGROUP_DATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) entry.cgroup_id = sk_fullsock(sk) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) cgroup_id(sock_cgroup_ptr(&sk->sk_cgrp_data)) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) return inet_diag_bc_run(bc, &entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) EXPORT_SYMBOL_GPL(inet_diag_bc_sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) static int valid_cc(const void *bc, int len, int cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) while (len >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) const struct inet_diag_bc_op *op = bc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (cc > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (cc == len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (op->yes < 4 || op->yes & 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) len -= op->yes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) bc += op->yes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) /* data is u32 ifindex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) static bool valid_devcond(const struct inet_diag_bc_op *op, int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) int *min_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) /* Check ifindex space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) *min_len += sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (len < *min_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) /* Validate an inet_diag_hostcond. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) static bool valid_hostcond(const struct inet_diag_bc_op *op, int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) int *min_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) struct inet_diag_hostcond *cond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) int addr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /* Check hostcond space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) *min_len += sizeof(struct inet_diag_hostcond);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (len < *min_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) cond = (struct inet_diag_hostcond *)(op + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) /* Check address family and address length. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) switch (cond->family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) case AF_UNSPEC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) addr_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) case AF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) addr_len = sizeof(struct in_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) case AF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) addr_len = sizeof(struct in6_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) *min_len += addr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (len < *min_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) /* Check prefix length (in bits) vs address length (in bytes). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (cond->prefix_len > 8 * addr_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) /* Validate a port comparison operator. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) static bool valid_port_comparison(const struct inet_diag_bc_op *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) int len, int *min_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) /* Port comparisons put the port in a follow-on inet_diag_bc_op. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) *min_len += sizeof(struct inet_diag_bc_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (len < *min_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) static bool valid_markcond(const struct inet_diag_bc_op *op, int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) int *min_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) *min_len += sizeof(struct inet_diag_markcond);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) return len >= *min_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) #ifdef CONFIG_SOCK_CGROUP_DATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) static bool valid_cgroupcond(const struct inet_diag_bc_op *op, int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) int *min_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) *min_len += sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) return len >= *min_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) static int inet_diag_bc_audit(const struct nlattr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) bool net_admin = netlink_net_capable(skb, CAP_NET_ADMIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) const void *bytecode, *bc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) int bytecode_len, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (!attr || nla_len(attr) < sizeof(struct inet_diag_bc_op))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) bytecode = bc = nla_data(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) len = bytecode_len = nla_len(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) while (len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) int min_len = sizeof(struct inet_diag_bc_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) const struct inet_diag_bc_op *op = bc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) switch (op->code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) case INET_DIAG_BC_S_COND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) case INET_DIAG_BC_D_COND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (!valid_hostcond(bc, len, &min_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) case INET_DIAG_BC_DEV_COND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (!valid_devcond(bc, len, &min_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) case INET_DIAG_BC_S_EQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) case INET_DIAG_BC_S_GE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) case INET_DIAG_BC_S_LE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) case INET_DIAG_BC_D_EQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) case INET_DIAG_BC_D_GE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) case INET_DIAG_BC_D_LE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (!valid_port_comparison(bc, len, &min_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) case INET_DIAG_BC_MARK_COND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (!net_admin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (!valid_markcond(bc, len, &min_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) #ifdef CONFIG_SOCK_CGROUP_DATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) case INET_DIAG_BC_CGROUP_COND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (!valid_cgroupcond(bc, len, &min_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) case INET_DIAG_BC_AUTO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) case INET_DIAG_BC_JMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) case INET_DIAG_BC_NOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (op->code != INET_DIAG_BC_NOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (op->no < min_len || op->no > len + 4 || op->no & 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (op->no < len &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) !valid_cc(bytecode, bytecode_len, len - op->no))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) if (op->yes < min_len || op->yes > len + 4 || op->yes & 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) bc += op->yes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) len -= op->yes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) return len == 0 ? 0 : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) static void twsk_build_assert(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_family) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) offsetof(struct sock, sk_family));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_num) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) offsetof(struct inet_sock, inet_num));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_dport) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) offsetof(struct inet_sock, inet_dport));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_rcv_saddr) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) offsetof(struct inet_sock, inet_rcv_saddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_daddr) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) offsetof(struct inet_sock, inet_daddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_v6_rcv_saddr) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) offsetof(struct sock, sk_v6_rcv_saddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_v6_daddr) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) offsetof(struct sock, sk_v6_daddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) struct netlink_callback *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) const struct inet_diag_req_v2 *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) struct inet_diag_dump_data *cb_data = cb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) struct net *net = sock_net(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) u32 idiag_states = r->idiag_states;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) int i, num, s_i, s_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) struct nlattr *bc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) bc = cb_data->inet_diag_nla_bc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (idiag_states & TCPF_SYN_RECV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) idiag_states |= TCPF_NEW_SYN_RECV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) s_i = cb->args[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) s_num = num = cb->args[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (cb->args[0] == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (!(idiag_states & TCPF_LISTEN) || r->id.idiag_dport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) goto skip_listen_ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) struct inet_listen_hashbucket *ilb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) struct hlist_nulls_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) ilb = &hashinfo->listening_hash[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) spin_lock(&ilb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) sk_nulls_for_each(sk, node, &ilb->nulls_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) struct inet_sock *inet = inet_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (!net_eq(sock_net(sk), net))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) if (num < s_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (r->sdiag_family != AF_UNSPEC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) sk->sk_family != r->sdiag_family)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) goto next_listen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (r->id.idiag_sport != inet->inet_sport &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) r->id.idiag_sport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) goto next_listen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (!inet_diag_bc_sk(bc, sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) goto next_listen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (inet_sk_diag_fill(sk, inet_csk(sk), skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) cb, r, NLM_F_MULTI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) net_admin) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) spin_unlock(&ilb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) next_listen:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) ++num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) spin_unlock(&ilb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) s_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) skip_listen_ht:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) cb->args[0] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) s_i = num = s_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (!(idiag_states & ~TCPF_LISTEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) #define SKARR_SZ 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) for (i = s_i; i <= hashinfo->ehash_mask; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) struct inet_ehash_bucket *head = &hashinfo->ehash[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) spinlock_t *lock = inet_ehash_lockp(hashinfo, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) struct hlist_nulls_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) struct sock *sk_arr[SKARR_SZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) int num_arr[SKARR_SZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) int idx, accum, res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (hlist_nulls_empty(&head->chain))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (i > s_i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) s_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) next_chunk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) accum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) spin_lock_bh(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) sk_nulls_for_each(sk, node, &head->chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (!net_eq(sock_net(sk), net))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) if (num < s_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) goto next_normal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) state = (sk->sk_state == TCP_TIME_WAIT) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) inet_twsk(sk)->tw_substate : sk->sk_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (!(idiag_states & (1 << state)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) goto next_normal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) if (r->sdiag_family != AF_UNSPEC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) sk->sk_family != r->sdiag_family)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) goto next_normal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) if (r->id.idiag_sport != htons(sk->sk_num) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) r->id.idiag_sport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) goto next_normal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (r->id.idiag_dport != sk->sk_dport &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) r->id.idiag_dport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) goto next_normal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) twsk_build_assert();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (!inet_diag_bc_sk(bc, sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) goto next_normal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) if (!refcount_inc_not_zero(&sk->sk_refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) goto next_normal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) num_arr[accum] = num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) sk_arr[accum] = sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) if (++accum == SKARR_SZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) next_normal:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) ++num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) spin_unlock_bh(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) for (idx = 0; idx < accum; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (res >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) res = sk_diag_fill(sk_arr[idx], skb, cb, r,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) NLM_F_MULTI, net_admin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (res < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) num = num_arr[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) sock_gen_put(sk_arr[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) if (res < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (accum == SKARR_SZ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) s_num = num + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) goto next_chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) cb->args[1] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) cb->args[2] = num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) EXPORT_SYMBOL_GPL(inet_diag_dump_icsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) const struct inet_diag_req_v2 *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) struct inet_diag_dump_data *cb_data = cb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) const struct inet_diag_handler *handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) u32 prev_min_dump_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) int protocol, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) protocol = inet_diag_get_protocol(r, cb_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) prev_min_dump_alloc = cb->min_dump_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) handler = inet_diag_lock_handler(protocol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (!IS_ERR(handler))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) handler->dump(skb, cb, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) err = PTR_ERR(handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) inet_diag_unlock_handler(handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) /* The skb is not large enough to fit one sk info and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * inet_sk_diag_fill() has requested for a larger skb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) if (!skb->len && cb->min_dump_alloc > prev_min_dump_alloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) err = pskb_expand_head(skb, 0, cb->min_dump_alloc, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) return err ? : skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) static int __inet_diag_dump_start(struct netlink_callback *cb, int hdrlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) const struct nlmsghdr *nlh = cb->nlh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) struct inet_diag_dump_data *cb_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) struct sk_buff *skb = cb->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) struct nlattr *nla;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) cb_data = kzalloc(sizeof(*cb_data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (!cb_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) err = inet_diag_parse_attrs(nlh, hdrlen, cb_data->req_nlas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) kfree(cb_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) nla = cb_data->inet_diag_nla_bc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (nla) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) err = inet_diag_bc_audit(nla, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) kfree(cb_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) nla = cb_data->inet_diag_nla_bpf_stgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (nla) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) struct bpf_sk_storage_diag *bpf_stg_diag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) bpf_stg_diag = bpf_sk_storage_diag_alloc(nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (IS_ERR(bpf_stg_diag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) kfree(cb_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) return PTR_ERR(bpf_stg_diag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) cb_data->bpf_stg_diag = bpf_stg_diag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) cb->data = cb_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) static int inet_diag_dump_start(struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) return __inet_diag_dump_start(cb, sizeof(struct inet_diag_req_v2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) static int inet_diag_dump_start_compat(struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) return __inet_diag_dump_start(cb, sizeof(struct inet_diag_req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) static int inet_diag_dump_done(struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) struct inet_diag_dump_data *cb_data = cb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) bpf_sk_storage_diag_free(cb_data->bpf_stg_diag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) kfree(cb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) static int inet_diag_type2proto(int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) case TCPDIAG_GETSOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) return IPPROTO_TCP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) case DCCPDIAG_GETSOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) return IPPROTO_DCCP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) static int inet_diag_dump_compat(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) struct inet_diag_req *rc = nlmsg_data(cb->nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) struct inet_diag_req_v2 req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) req.sdiag_family = AF_UNSPEC; /* compatibility */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) req.idiag_ext = rc->idiag_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) req.idiag_states = rc->idiag_states;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) req.id = rc->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) return __inet_diag_dump(skb, cb, &req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) const struct nlmsghdr *nlh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) struct inet_diag_req *rc = nlmsg_data(nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) struct inet_diag_req_v2 req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) req.sdiag_family = rc->idiag_family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) req.idiag_ext = rc->idiag_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) req.idiag_states = rc->idiag_states;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) req.id = rc->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) return inet_diag_cmd_exact(SOCK_DIAG_BY_FAMILY, in_skb, nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) sizeof(struct inet_diag_req), &req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) int hdrlen = sizeof(struct inet_diag_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) struct net *net = sock_net(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) nlmsg_len(nlh) < hdrlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) if (nlh->nlmsg_flags & NLM_F_DUMP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) struct netlink_dump_control c = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) .start = inet_diag_dump_start_compat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) .done = inet_diag_dump_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) .dump = inet_diag_dump_compat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) return netlink_dump_start(net->diag_nlsk, skb, nlh, &c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) return inet_diag_get_exact_compat(skb, nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) static int inet_diag_handler_cmd(struct sk_buff *skb, struct nlmsghdr *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) int hdrlen = sizeof(struct inet_diag_req_v2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) struct net *net = sock_net(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (nlmsg_len(h) < hdrlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) if (h->nlmsg_type == SOCK_DIAG_BY_FAMILY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) h->nlmsg_flags & NLM_F_DUMP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) struct netlink_dump_control c = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) .start = inet_diag_dump_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) .done = inet_diag_dump_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) .dump = inet_diag_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) return netlink_dump_start(net->diag_nlsk, skb, h, &c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) return inet_diag_cmd_exact(h->nlmsg_type, skb, h, hdrlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) nlmsg_data(h));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) int inet_diag_handler_get_info(struct sk_buff *skb, struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) const struct inet_diag_handler *handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) struct nlmsghdr *nlh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) struct nlattr *attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) struct inet_diag_msg *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) void *info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) nlh = nlmsg_put(skb, 0, 0, SOCK_DIAG_BY_FAMILY, sizeof(*r), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) if (!nlh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) r = nlmsg_data(nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) memset(r, 0, sizeof(*r));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) inet_diag_msg_common_fill(r, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_STREAM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) r->id.idiag_sport = inet_sk(sk)->inet_sport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) r->idiag_state = sk->sk_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) if ((err = nla_put_u8(skb, INET_DIAG_PROTOCOL, sk->sk_protocol))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) nlmsg_cancel(skb, nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) handler = inet_diag_lock_handler(sk->sk_protocol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) if (IS_ERR(handler)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) inet_diag_unlock_handler(handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) nlmsg_cancel(skb, nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) return PTR_ERR(handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) attr = handler->idiag_info_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) ? nla_reserve_64bit(skb, INET_DIAG_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) handler->idiag_info_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) INET_DIAG_PAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) if (attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) info = nla_data(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) handler->idiag_get_info(sk, r, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) inet_diag_unlock_handler(handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) nlmsg_end(skb, nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) static const struct sock_diag_handler inet_diag_handler = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) .family = AF_INET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) .dump = inet_diag_handler_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) .get_info = inet_diag_handler_get_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) .destroy = inet_diag_handler_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) static const struct sock_diag_handler inet6_diag_handler = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) .family = AF_INET6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) .dump = inet_diag_handler_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) .get_info = inet_diag_handler_get_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) .destroy = inet_diag_handler_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) int inet_diag_register(const struct inet_diag_handler *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) const __u16 type = h->idiag_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) if (type >= IPPROTO_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) mutex_lock(&inet_diag_table_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) err = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) if (!inet_diag_table[type]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) inet_diag_table[type] = h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) mutex_unlock(&inet_diag_table_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) EXPORT_SYMBOL_GPL(inet_diag_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) void inet_diag_unregister(const struct inet_diag_handler *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) const __u16 type = h->idiag_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (type >= IPPROTO_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) mutex_lock(&inet_diag_table_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) inet_diag_table[type] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) mutex_unlock(&inet_diag_table_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) EXPORT_SYMBOL_GPL(inet_diag_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) static int __init inet_diag_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) const int inet_diag_table_size = (IPPROTO_MAX *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) sizeof(struct inet_diag_handler *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) inet_diag_table = kzalloc(inet_diag_table_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) if (!inet_diag_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) err = sock_diag_register(&inet_diag_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) goto out_free_nl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) err = sock_diag_register(&inet6_diag_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) goto out_free_inet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) sock_diag_register_inet_compat(inet_diag_rcv_msg_compat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) out_free_inet:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) sock_diag_unregister(&inet_diag_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) out_free_nl:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) kfree(inet_diag_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) static void __exit inet_diag_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) sock_diag_unregister(&inet6_diag_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) sock_diag_unregister(&inet_diag_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) sock_diag_unregister_inet_compat(inet_diag_rcv_msg_compat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) kfree(inet_diag_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) module_init(inet_diag_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) module_exit(inet_diag_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2 /* AF_INET */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 10 /* AF_INET6 */);