^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Multipath TCP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2017 - 2019, Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define pr_fmt(fmt) "MPTCP: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <crypto/algapi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <crypto/sha.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <net/inet_common.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <net/inet_hashtables.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <net/protocol.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #if IS_ENABLED(CONFIG_MPTCP_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <net/ip6_route.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <net/mptcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <uapi/linux/mptcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "protocol.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "mib.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) enum linux_mptcp_mib_field field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static void subflow_req_destructor(struct request_sock *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) pr_debug("subflow_req=%p", subflow_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) if (subflow_req->msk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) sock_put((struct sock *)subflow_req->msk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) mptcp_token_destroy_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) tcp_request_sock_ops.destructor(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) void *hmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) u8 msg[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) put_unaligned_be32(nonce1, &msg[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) put_unaligned_be32(nonce2, &msg[4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return mptcp_is_fully_established((void *)msk) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) READ_ONCE(msk->pm.accept_subflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* validate received token and create truncated hmac and nonce for SYN-ACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static struct mptcp_sock *subflow_token_join_request(struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) u8 hmac[SHA256_DIGEST_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct mptcp_sock *msk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) int local_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (!msk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (local_id < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) sock_put((struct sock *)msk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) subflow_req->local_id = local_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) subflow_generate_hmac(msk->local_key, msk->remote_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) subflow_req->local_nonce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) subflow_req->remote_nonce, hmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) subflow_req->thmac = get_unaligned_be64(hmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return msk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static int __subflow_init_req(struct request_sock *req, const struct sock *sk_listener)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) subflow_req->mp_capable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) subflow_req->mp_join = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) subflow_req->msk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) mptcp_token_init_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * TCP option space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static void subflow_init_req(struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) const struct sock *sk_listener,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct mptcp_options_received mp_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) ret = __subflow_init_req(req, sk_listener);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) mptcp_get_options(skb, &mp_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (mp_opt.mp_capable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (mp_opt.mp_join)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) } else if (mp_opt.mp_join) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (mp_opt.mp_capable && listener->request_mptcp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) int err, retries = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) get_random_bytes(&subflow_req->local_key, sizeof(subflow_req->local_key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) } while (subflow_req->local_key == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (unlikely(req->syncookie)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) mptcp_crypto_key_sha(subflow_req->local_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) &subflow_req->token,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) &subflow_req->idsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (mptcp_token_exists(subflow_req->token)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (retries-- > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) subflow_req->mp_capable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) err = mptcp_token_new_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) subflow_req->mp_capable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) else if (retries-- > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) } else if (mp_opt.mp_join && listener->request_mptcp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) subflow_req->mp_join = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) subflow_req->backup = mp_opt.backup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) subflow_req->remote_id = mp_opt.join_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) subflow_req->token = mp_opt.token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) subflow_req->remote_nonce = mp_opt.nonce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) subflow_req->msk = subflow_token_join_request(req, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (unlikely(req->syncookie) && subflow_req->msk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (mptcp_can_accept_new_subflow(subflow_req->msk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) subflow_init_req_cookie_join_save(subflow_req, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) subflow_req->remote_nonce, subflow_req->msk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) int mptcp_subflow_init_cookie_req(struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) const struct sock *sk_listener,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct mptcp_options_received mp_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) err = __subflow_init_req(req, sk_listener);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) mptcp_get_options(skb, &mp_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (mp_opt.mp_capable && mp_opt.mp_join)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (mp_opt.mp_capable && listener->request_mptcp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (mp_opt.sndr_key == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) subflow_req->local_key = mp_opt.rcvr_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) err = mptcp_token_new_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) subflow_req->mp_capable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) } else if (mp_opt.mp_join && listener->request_mptcp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (!mptcp_token_join_cookie_init_state(subflow_req, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (mptcp_can_accept_new_subflow(subflow_req->msk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) subflow_req->mp_join = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static void subflow_v4_init_req(struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) const struct sock *sk_listener,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) tcp_rsk(req)->is_mptcp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) tcp_request_sock_ipv4_ops.init_req(req, sk_listener, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) subflow_init_req(req, sk_listener, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) #if IS_ENABLED(CONFIG_MPTCP_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static void subflow_v6_init_req(struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) const struct sock *sk_listener,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) tcp_rsk(req)->is_mptcp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) tcp_request_sock_ipv6_ops.init_req(req, sk_listener, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) subflow_init_req(req, sk_listener, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /* validate received truncated hmac and create hmac for third ACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) u8 hmac[SHA256_DIGEST_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) u64 thmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) subflow_generate_hmac(subflow->remote_key, subflow->local_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) subflow->remote_nonce, subflow->local_nonce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) hmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) thmac = get_unaligned_be64(hmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) subflow, subflow->token,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) (unsigned long long)thmac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) (unsigned long long)subflow->thmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return thmac == subflow->thmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) void mptcp_subflow_reset(struct sock *ssk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct sock *sk = subflow->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) tcp_set_state(ssk, TCP_CLOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) tcp_send_active_reset(ssk, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) tcp_done(ssk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) schedule_work(&mptcp_sk(sk)->work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) sock_hold(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct mptcp_options_received mp_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct sock *parent = subflow->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) inet_sk_state_store(parent, TCP_ESTABLISHED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) parent->sk_state_change(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /* be sure no special action on any packet other than syn-ack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (subflow->conn_finished)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) subflow->rel_write_seq = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) subflow->conn_finished = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) mptcp_get_options(skb, &mp_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (subflow->request_mptcp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (!mp_opt.mp_capable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) MPTCP_INC_STATS(sock_net(sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) mptcp_do_fallback(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) pr_fallback(mptcp_sk(subflow->conn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) goto fallback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) subflow->mp_capable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) subflow->can_ack = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) subflow->remote_key = mp_opt.sndr_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) pr_debug("subflow=%p, remote_key=%llu", subflow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) subflow->remote_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) mptcp_finish_connect(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) } else if (subflow->request_join) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) u8 hmac[SHA256_DIGEST_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (!mp_opt.mp_join)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) goto do_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) subflow->thmac = mp_opt.thmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) subflow->remote_nonce = mp_opt.nonce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) subflow->thmac, subflow->remote_nonce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (!subflow_thmac_valid(subflow)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) goto do_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (!mptcp_finish_join(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) goto do_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) subflow_generate_hmac(subflow->local_key, subflow->remote_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) subflow->local_nonce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) subflow->remote_nonce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) hmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) subflow->mp_join = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) } else if (mptcp_check_fallback(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) fallback:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) mptcp_rcv_space_init(mptcp_sk(parent), sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) do_reset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) mptcp_subflow_reset(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct request_sock_ops mptcp_subflow_request_sock_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) EXPORT_SYMBOL_GPL(mptcp_subflow_request_sock_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) pr_debug("subflow=%p", subflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /* Never answer to SYNs sent to broadcast or multicast */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return tcp_conn_request(&mptcp_subflow_request_sock_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) &subflow_request_sock_ipv4_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) tcp_listendrop(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) #if IS_ENABLED(CONFIG_MPTCP_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static struct inet_connection_sock_af_ops subflow_v6_specific;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static struct inet_connection_sock_af_ops subflow_v6m_specific;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) pr_debug("subflow=%p", subflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (skb->protocol == htons(ETH_P_IP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return subflow_v4_conn_request(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (!ipv6_unicast_destination(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return tcp_conn_request(&mptcp_subflow_request_sock_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) &subflow_request_sock_ipv6_ops, sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) tcp_listendrop(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return 0; /* don't send reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) /* validate hmac received in third ACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static bool subflow_hmac_valid(const struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) const struct mptcp_options_received *mp_opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) const struct mptcp_subflow_request_sock *subflow_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) u8 hmac[SHA256_DIGEST_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) struct mptcp_sock *msk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) subflow_req = mptcp_subflow_rsk(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) msk = subflow_req->msk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (!msk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) subflow_generate_hmac(msk->remote_key, msk->local_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) subflow_req->remote_nonce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) subflow_req->local_nonce, hmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static void mptcp_sock_destruct(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /* if new mptcp socket isn't accepted, it is free'd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * from the tcp listener sockets request queue, linked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * from req->sk. The tcp socket is released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * This calls the ULP release function which will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * also remove the mptcp socket, via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * sock_put(ctx->conn).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * Problem is that the mptcp socket will be in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * ESTABLISHED state and will not have the SOCK_DEAD flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * Both result in warnings from inet_sock_destruct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) sk->sk_state = TCP_CLOSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) WARN_ON_ONCE(sk->sk_socket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) sock_orphan(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) mptcp_destroy_common(mptcp_sk(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) inet_sock_destruct(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static void mptcp_force_close(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) inet_sk_state_store(sk, TCP_CLOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) sk_common_release(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) static void subflow_ulp_fallback(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct mptcp_subflow_context *old_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct inet_connection_sock *icsk = inet_csk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) mptcp_subflow_tcp_fallback(sk, old_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) icsk->icsk_ulp_ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) tcp_sk(sk)->is_mptcp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static void subflow_drop_ctx(struct sock *ssk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) subflow_ulp_fallback(ssk, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (ctx->conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) sock_put(ctx->conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) kfree_rcu(ctx, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct mptcp_options_received *mp_opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) struct mptcp_sock *msk = mptcp_sk(subflow->conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) subflow->remote_key = mp_opt->sndr_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) subflow->fully_established = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) subflow->can_ack = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) WRITE_ONCE(msk->fully_established, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) static struct sock *subflow_syn_recv_sock(const struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct dst_entry *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct request_sock *req_unhash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) bool *own_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct mptcp_subflow_request_sock *subflow_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) struct mptcp_options_received mp_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) bool fallback, fallback_is_fatal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) struct sock *new_msk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct sock *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /* After child creation we must look for 'mp_capable' even when options
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * are not parsed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) mp_opt.mp_capable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /* hopefully temporary handling for MP_JOIN+syncookie */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) subflow_req = mptcp_subflow_rsk(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) fallback = !tcp_rsk(req)->is_mptcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (fallback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) goto create_child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /* if the sk is MP_CAPABLE, we try to fetch the client key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (subflow_req->mp_capable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) /* we can receive and accept an in-window, out-of-order pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * which may not carry the MP_CAPABLE opt even on mptcp enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * paths: always try to extract the peer key, and fallback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * for packets missing it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * Even OoO DSS packets coming legitly after dropped or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * reordered MPC will cause fallback, but we don't have other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * options.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) mptcp_get_options(skb, &mp_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (!mp_opt.mp_capable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) fallback = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) goto create_child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (!new_msk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) fallback = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) } else if (subflow_req->mp_join) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) mptcp_get_options(skb, &mp_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (!mp_opt.mp_join || !subflow_hmac_valid(req, &mp_opt) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) !mptcp_can_accept_new_subflow(subflow_req->msk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) fallback = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) create_child:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) req_unhash, own_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (child && *own_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) tcp_rsk(req)->drop_req = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /* we need to fallback on ctx allocation failure and on pre-reqs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * checking above. In the latter scenario we additionally need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * to reset the context to non MPTCP status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (!ctx || fallback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (fallback_is_fatal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) goto dispose_child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) subflow_drop_ctx(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (ctx->mp_capable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) /* this can't race with mptcp_close(), as the msk is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * not yet exposted to user-space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) inet_sk_state_store((void *)new_msk, TCP_ESTABLISHED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /* new mpc subflow takes ownership of the newly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * created mptcp socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) new_msk->sk_destruct = mptcp_sock_destruct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) mptcp_pm_new_connection(mptcp_sk(new_msk), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) mptcp_token_accept(subflow_req, mptcp_sk(new_msk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) ctx->conn = new_msk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) new_msk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /* with OoO packets we can reach here without ingress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * mpc option
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (mp_opt.mp_capable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) mptcp_subflow_fully_established(ctx, &mp_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) } else if (ctx->mp_join) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct mptcp_sock *owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) owner = subflow_req->msk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (!owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) goto dispose_child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /* move the msk reference ownership to the subflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) subflow_req->msk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) ctx->conn = (struct sock *)owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (!mptcp_finish_join(child))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) goto dispose_child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) tcp_rsk(req)->drop_req = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /* dispose of the left over mptcp master, if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (unlikely(new_msk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) mptcp_force_close(new_msk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /* check for expected invariant - should never trigger, just help
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * catching eariler subtle bugs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) (!mptcp_subflow_ctx(child) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) !mptcp_subflow_ctx(child)->conn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) return child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) dispose_child:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) subflow_drop_ctx(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) tcp_rsk(req)->drop_req = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) inet_csk_prepare_for_destroy_sock(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) tcp_done(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) req->rsk_ops->send_reset(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /* The last child reference will be released by the caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) static struct inet_connection_sock_af_ops subflow_specific;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) enum mapping_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) MAPPING_OK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) MAPPING_INVALID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) MAPPING_EMPTY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) MAPPING_DATA_FIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) MAPPING_DUMMY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if ((u32)seq == (u32)old_seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) return old_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) /* Assume map covers data not mapped yet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) return seq | ((old_seq + old_data_len + 1) & GENMASK_ULL(63, 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) ssn, subflow->map_subflow_seq, subflow->map_data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) unsigned int skb_consumed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (WARN_ON_ONCE(skb_consumed >= skb->len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return skb->len - skb_consumed <= subflow->map_data_len -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) mptcp_subflow_get_map_offset(subflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (unlikely(before(ssn, subflow->map_subflow_seq))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) /* Mapping covers data later in the subflow stream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * currently unsupported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) dbg_bad_map(subflow, ssn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (unlikely(!before(ssn, subflow->map_subflow_seq +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) subflow->map_data_len))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) /* Mapping does covers past subflow data, invalid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) dbg_bad_map(subflow, ssn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) static enum mapping_status get_mapping_status(struct sock *ssk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) struct mptcp_sock *msk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct mptcp_ext *mpext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) u16 data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) u64 map_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) skb = skb_peek(&ssk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) return MAPPING_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (mptcp_check_fallback(ssk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) return MAPPING_DUMMY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) mpext = mptcp_get_ext(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (!mpext || !mpext->use_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (!subflow->map_valid && !skb->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) /* the TCP stack deliver 0 len FIN pkt to the receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * queue, that is the only 0len pkts ever expected here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * and we can admit no mapping only for 0 len pkts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) WARN_ONCE(1, "0len seq %d:%d flags %x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) TCP_SKB_CB(skb)->seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) TCP_SKB_CB(skb)->end_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) TCP_SKB_CB(skb)->tcp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) sk_eat_skb(ssk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) return MAPPING_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (!subflow->map_valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return MAPPING_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) goto validate_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) pr_debug("seq=%llu is64=%d ssn=%u data_len=%u data_fin=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) mpext->data_seq, mpext->dsn64, mpext->subflow_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) mpext->data_len, mpext->data_fin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) data_len = mpext->data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) if (data_len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return MAPPING_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (mpext->data_fin == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (data_len == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) mpext->dsn64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (subflow->map_valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /* A DATA_FIN might arrive in a DSS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * option before the previous mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * has been fully consumed. Continue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * handling the existing mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) skb_ext_del(skb, SKB_EXT_MPTCP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) return MAPPING_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (updated && schedule_work(&msk->work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) sock_hold((struct sock *)msk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return MAPPING_DATA_FIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) u64 data_fin_seq = mpext->data_seq + data_len - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /* If mpext->data_seq is a 32-bit value, data_fin_seq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * must also be limited to 32 bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (!mpext->dsn64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) data_fin_seq &= GENMASK_ULL(31, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) data_fin_seq, mpext->dsn64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) /* Adjust for DATA_FIN using 1 byte of sequence space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) data_len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (!mpext->dsn64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) map_seq = expand_seq(subflow->map_seq, subflow->map_data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) mpext->data_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) pr_debug("expanded seq=%llu", subflow->map_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) map_seq = mpext->data_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (subflow->map_valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) /* Allow replacing only with an identical map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (subflow->map_seq == map_seq &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) subflow->map_subflow_seq == mpext->subflow_seq &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) subflow->map_data_len == data_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) skb_ext_del(skb, SKB_EXT_MPTCP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) return MAPPING_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) /* If this skb data are fully covered by the current mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * the new map would need caching, which is not supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (skb_is_fully_mapped(ssk, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) return MAPPING_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) /* will validate the next map after consuming the current one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) return MAPPING_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) subflow->map_seq = map_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) subflow->map_subflow_seq = mpext->subflow_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) subflow->map_data_len = data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) subflow->map_valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) subflow->mpc_map = mpext->mpc_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) pr_debug("new map seq=%llu subflow_seq=%u data_len=%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) subflow->map_seq, subflow->map_subflow_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) subflow->map_data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) validate_seq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) /* we revalidate valid mapping on new skb, because we must ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * the current skb is completely covered by the available mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (!validate_mapping(ssk, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return MAPPING_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) skb_ext_del(skb, SKB_EXT_MPTCP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) return MAPPING_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) u64 limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) u32 incr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) incr = limit >= skb->len ? skb->len + fin : limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) pr_debug("discarding=%d len=%d seq=%d", incr, skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) subflow->map_subflow_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) tcp_sk(ssk)->copied_seq += incr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) sk_eat_skb(ssk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) subflow->map_valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (incr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) tcp_cleanup_rbuf(ssk, incr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) static bool subflow_check_data_avail(struct sock *ssk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) enum mapping_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) struct mptcp_sock *msk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) pr_debug("msk=%p ssk=%p data_avail=%d skb=%p", subflow->conn, ssk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) subflow->data_avail, skb_peek(&ssk->sk_receive_queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (!skb_peek(&ssk->sk_receive_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) subflow->data_avail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (subflow->data_avail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) msk = mptcp_sk(subflow->conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) u64 ack_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) u64 old_ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) status = get_mapping_status(ssk, msk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) pr_debug("msk=%p ssk=%p status=%d", msk, ssk, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (status == MAPPING_INVALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) ssk->sk_err = EBADMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) goto fatal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (status == MAPPING_DUMMY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) __mptcp_do_fallback(msk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) skb = skb_peek(&ssk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) subflow->map_valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) subflow->map_seq = READ_ONCE(msk->ack_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) subflow->map_data_len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) subflow->ssn_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (status != MAPPING_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) skb = skb_peek(&ssk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (WARN_ON_ONCE(!skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) /* if msk lacks the remote key, this subflow must provide an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * MP_CAPABLE-based mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (unlikely(!READ_ONCE(msk->can_ack))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (!subflow->mpc_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) ssk->sk_err = EBADMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) goto fatal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) WRITE_ONCE(msk->remote_key, subflow->remote_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) WRITE_ONCE(msk->ack_seq, subflow->map_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) WRITE_ONCE(msk->can_ack, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) old_ack = READ_ONCE(msk->ack_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) ack_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (ack_seq == old_ack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) } else if (after64(ack_seq, old_ack)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) subflow->data_avail = MPTCP_SUBFLOW_OOO_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) /* only accept in-sequence mapping. Old values are spurious
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * retransmission
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) fatal:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) /* fatal protocol error, close the socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) /* This barrier is coupled with smp_rmb() in tcp_poll() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) ssk->sk_error_report(ssk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) tcp_set_state(ssk, TCP_CLOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) tcp_send_active_reset(ssk, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) subflow->data_avail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) bool mptcp_subflow_data_available(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) /* check if current mapping is still valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (subflow->map_valid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) subflow->map_valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) subflow->data_avail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) pr_debug("Done with mapping: seq=%u data_len=%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) subflow->map_subflow_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) subflow->map_data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) return subflow_check_data_avail(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * not the ssk one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * In mptcp, rwin is about the mptcp-level connection data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * Data that is still on the ssk rx queue can thus be ignored,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * as far as mptcp peer is concerened that data is still inflight.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * DSS ACK is updated when skb is moved to the mptcp rx queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) void mptcp_space(const struct sock *ssk, int *space, int *full_space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) const struct sock *sk = subflow->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) *space = tcp_space(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) *full_space = tcp_full_space(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) static void subflow_data_ready(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) u16 state = 1 << inet_sk_state_load(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) struct sock *parent = subflow->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) struct mptcp_sock *msk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) msk = mptcp_sk(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (state & TCPF_LISTEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) /* MPJ subflow are removed from accept queue before reaching here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) * avoid stray wakeups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) set_bit(MPTCP_DATA_READY, &msk->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) parent->sk_data_ready(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) !subflow->mp_join && !(state & TCPF_CLOSE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (mptcp_subflow_data_available(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) mptcp_data_ready(parent, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) static void subflow_write_space(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) struct sock *parent = subflow->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (!sk_stream_is_writeable(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (sk_stream_is_writeable(parent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) set_bit(MPTCP_SEND_SPACE, &mptcp_sk(parent)->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) /* set SEND_SPACE before sk_stream_write_space clears NOSPACE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) sk_stream_write_space(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) static struct inet_connection_sock_af_ops *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) subflow_default_af_ops(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) #if IS_ENABLED(CONFIG_MPTCP_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (sk->sk_family == AF_INET6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return &subflow_v6_specific;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) return &subflow_specific;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) #if IS_ENABLED(CONFIG_MPTCP_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) struct inet_connection_sock *icsk = inet_csk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) struct inet_connection_sock_af_ops *target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if (likely(icsk->icsk_af_ops == target))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) subflow->icsk_af_ops = icsk->icsk_af_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) icsk->icsk_af_ops = target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) static void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) struct sockaddr_storage *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) memset(addr, 0, sizeof(*addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) addr->ss_family = info->family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (addr->ss_family == AF_INET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) struct sockaddr_in *in_addr = (struct sockaddr_in *)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) in_addr->sin_addr = info->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) in_addr->sin_port = info->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) #if IS_ENABLED(CONFIG_MPTCP_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) else if (addr->ss_family == AF_INET6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) in6_addr->sin6_addr = info->addr6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) in6_addr->sin6_port = info->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) const struct mptcp_addr_info *remote)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) struct mptcp_sock *msk = mptcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) struct mptcp_subflow_context *subflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) struct sockaddr_storage addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) int remote_id = remote->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) int local_id = loc->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) struct socket *sf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) struct sock *ssk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) u32 remote_token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) int addrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (!mptcp_is_fully_established(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) return -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) err = mptcp_subflow_create_socket(sk, &sf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) ssk = sf->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) subflow = mptcp_subflow_ctx(ssk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) get_random_bytes(&subflow->local_nonce, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) } while (!subflow->local_nonce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (!local_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) err = mptcp_pm_get_local_id(msk, (struct sock_common *)ssk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) local_id = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) subflow->remote_key = msk->remote_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) subflow->local_key = msk->local_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) subflow->token = msk->token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) mptcp_info2sockaddr(loc, &addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) addrlen = sizeof(struct sockaddr_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) #if IS_ENABLED(CONFIG_MPTCP_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (loc->family == AF_INET6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) addrlen = sizeof(struct sockaddr_in6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) ssk->sk_bound_dev_if = loc->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) remote_token, local_id, remote_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) subflow->remote_token = remote_token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) subflow->local_id = local_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) subflow->remote_id = remote_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) subflow->request_join = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) subflow->request_bkup = !!(loc->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) mptcp_info2sockaddr(remote, &addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) if (err && err != -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) spin_lock_bh(&msk->join_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) list_add_tail(&subflow->node, &msk->join_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) spin_unlock_bh(&msk->join_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) sock_release(sf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) struct mptcp_subflow_context *subflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) struct socket *sf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) /* un-accepted server sockets can reach here - on bad configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) * bail early to avoid greater trouble later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) if (unlikely(!sk->sk_socket))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) err = sock_create_kern(net, sk->sk_family, SOCK_STREAM, IPPROTO_TCP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) &sf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) lock_sock(sf->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) /* kernel sockets do not by default acquire net ref, but TCP timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) * needs it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) sf->sk->sk_net_refcnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) get_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) this_cpu_add(*net->core.sock_inuse, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) err = tcp_set_ulp(sf->sk, "mptcp");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) release_sock(sf->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) sock_release(sf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) /* the newly created socket really belongs to the owning MPTCP master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * socket, even if for additional subflows the allocation is performed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * by a kernel workqueue. Adjust inode references, so that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * procfs/diag interaces really show this one belonging to the correct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) * user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) subflow = mptcp_subflow_ctx(sf->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) pr_debug("subflow=%p", subflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) *new_sock = sf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) sock_hold(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) subflow->conn = sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) gfp_t priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) struct inet_connection_sock *icsk = inet_csk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) struct mptcp_subflow_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) ctx = kzalloc(sizeof(*ctx), priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) INIT_LIST_HEAD(&ctx->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) pr_debug("subflow=%p", ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) ctx->tcp_sock = sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) return ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) static void __subflow_state_change(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) struct socket_wq *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) wq = rcu_dereference(sk->sk_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (skwq_has_sleeper(wq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) wake_up_interruptible_all(&wq->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) static bool subflow_is_done(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) static void subflow_state_change(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) struct sock *parent = subflow->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) __subflow_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) if (subflow_simultaneous_connect(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) mptcp_do_fallback(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) mptcp_rcv_space_init(mptcp_sk(parent), sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) pr_fallback(mptcp_sk(parent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) subflow->conn_finished = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) inet_sk_state_store(parent, TCP_ESTABLISHED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) parent->sk_state_change(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) /* as recvmsg() does not acquire the subflow socket for ssk selection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) * a fin packet carrying a DSS can be unnoticed if we don't trigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) * the data available machinery here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) if (mptcp_subflow_data_available(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) mptcp_data_ready(parent, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) if (__mptcp_check_fallback(mptcp_sk(parent)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) !(parent->sk_shutdown & RCV_SHUTDOWN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) !subflow->rx_eof && subflow_is_done(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) subflow->rx_eof = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) mptcp_subflow_eof(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) static int subflow_ulp_init(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) struct inet_connection_sock *icsk = inet_csk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) struct mptcp_subflow_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) /* disallow attaching ULP to a socket unless it has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) * created with sock_create_kern()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) if (!sk->sk_kern_sock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) ctx = subflow_create_ctx(sk, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) if (!ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) tp->is_mptcp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) ctx->icsk_af_ops = icsk->icsk_af_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) icsk->icsk_af_ops = subflow_default_af_ops(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) ctx->tcp_data_ready = sk->sk_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) ctx->tcp_state_change = sk->sk_state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) ctx->tcp_write_space = sk->sk_write_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) sk->sk_data_ready = subflow_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) sk->sk_write_space = subflow_write_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) sk->sk_state_change = subflow_state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) static void subflow_ulp_release(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) if (ctx->conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) sock_put(ctx->conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) kfree_rcu(ctx, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) static void subflow_ulp_clone(const struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) struct sock *newsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) const gfp_t priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) struct mptcp_subflow_context *new_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) if (!tcp_rsk(req)->is_mptcp ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) (!subflow_req->mp_capable && !subflow_req->mp_join)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) subflow_ulp_fallback(newsk, old_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) new_ctx = subflow_create_ctx(newsk, priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) if (!new_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) subflow_ulp_fallback(newsk, old_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) new_ctx->conn_finished = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) new_ctx->icsk_af_ops = old_ctx->icsk_af_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) new_ctx->tcp_data_ready = old_ctx->tcp_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) new_ctx->tcp_state_change = old_ctx->tcp_state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) new_ctx->tcp_write_space = old_ctx->tcp_write_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) new_ctx->rel_write_seq = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) new_ctx->tcp_sock = newsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) if (subflow_req->mp_capable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) /* see comments in subflow_syn_recv_sock(), MPTCP connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) * is fully established only after we receive the remote key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) new_ctx->mp_capable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) new_ctx->local_key = subflow_req->local_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) new_ctx->token = subflow_req->token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) new_ctx->ssn_offset = subflow_req->ssn_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) new_ctx->idsn = subflow_req->idsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) } else if (subflow_req->mp_join) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) new_ctx->ssn_offset = subflow_req->ssn_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) new_ctx->mp_join = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) new_ctx->fully_established = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) new_ctx->backup = subflow_req->backup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) new_ctx->local_id = subflow_req->local_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) new_ctx->remote_id = subflow_req->remote_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) new_ctx->token = subflow_req->token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) new_ctx->thmac = subflow_req->thmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) .name = "mptcp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) .init = subflow_ulp_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) .release = subflow_ulp_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) .clone = subflow_ulp_clone,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) static int subflow_ops_init(struct request_sock_ops *subflow_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) subflow_ops->slab_name = "request_sock_subflow";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) subflow_ops->obj_size, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) SLAB_ACCOUNT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) SLAB_TYPESAFE_BY_RCU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) if (!subflow_ops->slab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) subflow_ops->destructor = subflow_req_destructor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) void __init mptcp_subflow_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) mptcp_subflow_request_sock_ops = tcp_request_sock_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) if (subflow_ops_init(&mptcp_subflow_request_sock_ops) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) panic("MPTCP: failed to init subflow request sock ops\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) subflow_request_sock_ipv4_ops.init_req = subflow_v4_init_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) subflow_specific = ipv4_specific;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) subflow_specific.conn_request = subflow_v4_conn_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) subflow_specific.sk_rx_dst_set = subflow_finish_connect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) #if IS_ENABLED(CONFIG_MPTCP_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) subflow_request_sock_ipv6_ops.init_req = subflow_v6_init_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) subflow_v6_specific = ipv6_specific;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) subflow_v6_specific.conn_request = subflow_v6_conn_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) subflow_v6m_specific = subflow_v6_specific;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) subflow_v6m_specific.send_check = ipv4_specific.send_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) subflow_v6m_specific.net_frag_header_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) mptcp_diag_subflow_init(&subflow_ulp_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) if (tcp_register_ulp(&subflow_ulp_ops) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) panic("MPTCP: failed to register subflows to ULP\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) }