^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Shared Memory Communications over RDMA (SMC-R) and RoCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Monitoring SMC transport protocol sockets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright IBM Corp. 2016
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/sock_diag.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/inet_diag.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/smc_diag.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <net/netlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <net/smc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "smc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "smc_core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct smc_diag_dump_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) int pos[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static struct smc_diag_dump_ctx *smc_dump_context(struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) return (struct smc_diag_dump_ctx *)cb->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static void smc_gid_be16_convert(__u8 *buf, u8 *gid_raw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) be16_to_cpu(((__be16 *)gid_raw)[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) be16_to_cpu(((__be16 *)gid_raw)[1]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) be16_to_cpu(((__be16 *)gid_raw)[2]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) be16_to_cpu(((__be16 *)gid_raw)[3]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) be16_to_cpu(((__be16 *)gid_raw)[4]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) be16_to_cpu(((__be16 *)gid_raw)[5]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) be16_to_cpu(((__be16 *)gid_raw)[6]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) be16_to_cpu(((__be16 *)gid_raw)[7]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static void smc_diag_msg_common_fill(struct smc_diag_msg *r, struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct smc_sock *smc = smc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) memset(r, 0, sizeof(*r));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) r->diag_family = sk->sk_family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) sock_diag_save_cookie(sk, r->id.idiag_cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (!smc->clcsock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) r->id.idiag_sport = htons(smc->clcsock->sk->sk_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) r->id.idiag_dport = smc->clcsock->sk->sk_dport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) r->id.idiag_if = smc->clcsock->sk->sk_bound_dev_if;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (sk->sk_protocol == SMCPROTO_SMC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) r->id.idiag_src[0] = smc->clcsock->sk->sk_rcv_saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) r->id.idiag_dst[0] = smc->clcsock->sk->sk_daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) } else if (sk->sk_protocol == SMCPROTO_SMC6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) memcpy(&r->id.idiag_src, &smc->clcsock->sk->sk_v6_rcv_saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) sizeof(smc->clcsock->sk->sk_v6_rcv_saddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) memcpy(&r->id.idiag_dst, &smc->clcsock->sk->sk_v6_daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) sizeof(smc->clcsock->sk->sk_v6_daddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static int smc_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct smc_diag_msg *r,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct user_namespace *user_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (nla_put_u8(skb, SMC_DIAG_SHUTDOWN, sk->sk_shutdown))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) r->diag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) r->diag_inode = sock_i_ino(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct netlink_callback *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) const struct smc_diag_req *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct nlattr *bc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct smc_sock *smc = smc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct smc_diag_fallback fallback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct user_namespace *user_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct smc_diag_msg *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct nlmsghdr *nlh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) cb->nlh->nlmsg_type, sizeof(*r), NLM_F_MULTI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (!nlh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) r = nlmsg_data(nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) smc_diag_msg_common_fill(r, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) r->diag_state = sk->sk_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (smc->use_fallback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) r->diag_mode = SMC_DIAG_MODE_FALLBACK_TCP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) else if (smc->conn.lgr && smc->conn.lgr->is_smcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) r->diag_mode = SMC_DIAG_MODE_SMCD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) r->diag_mode = SMC_DIAG_MODE_SMCR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) user_ns = sk_user_ns(NETLINK_CB(cb->skb).sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (smc_diag_msg_attrs_fill(sk, skb, r, user_ns))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) fallback.reason = smc->fallback_rsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) fallback.peer_diagnosis = smc->peer_diagnosis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (nla_put(skb, SMC_DIAG_FALLBACK, sizeof(fallback), &fallback) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if ((req->diag_ext & (1 << (SMC_DIAG_CONNINFO - 1))) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) smc->conn.alert_token_local) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct smc_connection *conn = &smc->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct smc_diag_conninfo cinfo = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) .token = conn->alert_token_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) .sndbuf_size = conn->sndbuf_desc ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) conn->sndbuf_desc->len : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) .rmbe_size = conn->rmb_desc ? conn->rmb_desc->len : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) .peer_rmbe_size = conn->peer_rmbe_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) .rx_prod.wrap = conn->local_rx_ctrl.prod.wrap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) .rx_prod.count = conn->local_rx_ctrl.prod.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) .rx_cons.wrap = conn->local_rx_ctrl.cons.wrap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) .rx_cons.count = conn->local_rx_ctrl.cons.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) .tx_prod.wrap = conn->local_tx_ctrl.prod.wrap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) .tx_prod.count = conn->local_tx_ctrl.prod.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) .tx_cons.wrap = conn->local_tx_ctrl.cons.wrap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) .tx_cons.count = conn->local_tx_ctrl.cons.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) .tx_prod_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) *(u8 *)&conn->local_tx_ctrl.prod_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) .tx_conn_state_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) *(u8 *)&conn->local_tx_ctrl.conn_state_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) .rx_prod_flags = *(u8 *)&conn->local_rx_ctrl.prod_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) .rx_conn_state_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) *(u8 *)&conn->local_rx_ctrl.conn_state_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) .tx_prep.wrap = conn->tx_curs_prep.wrap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) .tx_prep.count = conn->tx_curs_prep.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) .tx_sent.wrap = conn->tx_curs_sent.wrap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) .tx_sent.count = conn->tx_curs_sent.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) .tx_fin.wrap = conn->tx_curs_fin.wrap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) .tx_fin.count = conn->tx_curs_fin.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (nla_put(skb, SMC_DIAG_CONNINFO, sizeof(cinfo), &cinfo) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (smc->conn.lgr && !smc->conn.lgr->is_smcd &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) (req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) !list_empty(&smc->conn.lgr->list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct smc_diag_lgrinfo linfo = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) .role = smc->conn.lgr->role,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) .lnk[0].ibport = smc->conn.lgr->lnk[0].ibport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) .lnk[0].link_id = smc->conn.lgr->lnk[0].link_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) memcpy(linfo.lnk[0].ibname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) smc->conn.lgr->lnk[0].smcibdev->ibdev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) sizeof(smc->conn.lgr->lnk[0].smcibdev->ibdev->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) smc_gid_be16_convert(linfo.lnk[0].gid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) smc->conn.lgr->lnk[0].gid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) smc_gid_be16_convert(linfo.lnk[0].peer_gid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) smc->conn.lgr->lnk[0].peer_gid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (nla_put(skb, SMC_DIAG_LGRINFO, sizeof(linfo), &linfo) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (smc->conn.lgr && smc->conn.lgr->is_smcd &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) !list_empty(&smc->conn.lgr->list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct smc_connection *conn = &smc->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct smcd_diag_dmbinfo dinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) memset(&dinfo, 0, sizeof(dinfo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) dinfo.linkid = *((u32 *)conn->lgr->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) dinfo.peer_gid = conn->lgr->peer_gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) dinfo.my_gid = conn->lgr->smcd->local_gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) dinfo.token = conn->rmb_desc->token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) dinfo.peer_token = conn->peer_token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (nla_put(skb, SMC_DIAG_DMBINFO, sizeof(dinfo), &dinfo) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) nlmsg_end(skb, nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) nlmsg_cancel(skb, nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static int smc_diag_dump_proto(struct proto *prot, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct netlink_callback *cb, int p_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct smc_diag_dump_ctx *cb_ctx = smc_dump_context(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct net *net = sock_net(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) int snum = cb_ctx->pos[p_type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct nlattr *bc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct hlist_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) int rc = 0, num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) read_lock(&prot->h.smc_hash->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) head = &prot->h.smc_hash->ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (hlist_empty(head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) sk_for_each(sk, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (!net_eq(sock_net(sk), net))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (num < snum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) rc = __smc_diag_dump(sk, skb, cb, nlmsg_data(cb->nlh), bc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) read_unlock(&prot->h.smc_hash->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) cb_ctx->pos[p_type] = num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static int smc_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) rc = smc_diag_dump_proto(&smc_proto, skb, cb, SMCPROTO_SMC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) smc_diag_dump_proto(&smc_proto6, skb, cb, SMCPROTO_SMC6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static int smc_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct net *net = sock_net(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (h->nlmsg_type == SOCK_DIAG_BY_FAMILY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) h->nlmsg_flags & NLM_F_DUMP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct netlink_dump_control c = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) .dump = smc_diag_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) .min_dump_alloc = SKB_WITH_OVERHEAD(32768),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return netlink_dump_start(net->diag_nlsk, skb, h, &c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static const struct sock_diag_handler smc_diag_handler = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) .family = AF_SMC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) .dump = smc_diag_handler_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static int __init smc_diag_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return sock_diag_register(&smc_diag_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static void __exit smc_diag_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) sock_diag_unregister(&smc_diag_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) module_init(smc_diag_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) module_exit(smc_diag_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 43 /* AF_SMC */);