^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/crypto.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/rculist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <net/inetpeer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) void tcp_fastopen_init_key_once(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) u8 key[TCP_FASTOPEN_KEY_LENGTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) struct tcp_fastopen_context *ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) if (ctxt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /* tcp_fastopen_reset_cipher publishes the new context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * atomically, so we allow this race happening here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * All call sites of tcp_fastopen_cookie_gen also check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * for a valid cookie, so this is an acceptable risk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) get_random_bytes(key, sizeof(key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) tcp_fastopen_reset_cipher(net, NULL, key, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static void tcp_fastopen_ctx_free(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct tcp_fastopen_context *ctx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) container_of(head, struct tcp_fastopen_context, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) kfree_sensitive(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) void tcp_fastopen_destroy_cipher(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct tcp_fastopen_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) ctx = rcu_dereference_protected(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) call_rcu(&ctx->rcu, tcp_fastopen_ctx_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) void tcp_fastopen_ctx_destroy(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct tcp_fastopen_context *ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) spin_lock(&net->ipv4.tcp_fastopen_ctx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) ctxt = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) void *primary_key, void *backup_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct tcp_fastopen_context *ctx, *octx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct fastopen_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (!ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) ctx->key[0].key[0] = get_unaligned_le64(primary_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) ctx->key[0].key[1] = get_unaligned_le64(primary_key + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (backup_key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) ctx->key[1].key[0] = get_unaligned_le64(backup_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) ctx->key[1].key[1] = get_unaligned_le64(backup_key + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) ctx->num = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) ctx->num = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) spin_lock(&net->ipv4.tcp_fastopen_ctx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (sk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) q = &inet_csk(sk)->icsk_accept_queue.fastopenq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) octx = rcu_dereference_protected(q->ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) rcu_assign_pointer(q->ctx, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) octx = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (octx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) u64 *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct tcp_fastopen_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) int n_keys = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (icsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) n_keys = tcp_fastopen_context_len(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) for (i = 0; i < n_keys; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) put_unaligned_le64(ctx->key[i].key[0], key + (i * 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) put_unaligned_le64(ctx->key[i].key[1], key + (i * 2) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return n_keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct sk_buff *syn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) const siphash_key_t *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct tcp_fastopen_cookie *foc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) BUILD_BUG_ON(TCP_FASTOPEN_COOKIE_SIZE != sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (req->rsk_ops->family == AF_INET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) const struct iphdr *iph = ip_hdr(syn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) foc->val[0] = cpu_to_le64(siphash(&iph->saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) sizeof(iph->saddr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) sizeof(iph->daddr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) foc->len = TCP_FASTOPEN_COOKIE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (req->rsk_ops->family == AF_INET6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) const struct ipv6hdr *ip6h = ipv6_hdr(syn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) foc->val[0] = cpu_to_le64(siphash(&ip6h->saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) sizeof(ip6h->saddr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) sizeof(ip6h->daddr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) foc->len = TCP_FASTOPEN_COOKIE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* Generate the fastopen cookie by applying SipHash to both the source and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * destination addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static void tcp_fastopen_cookie_gen(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct sk_buff *syn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct tcp_fastopen_cookie *foc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct tcp_fastopen_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) ctx = tcp_fastopen_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) __tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[0], foc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* If an incoming SYN or SYNACK frame contains a payload and/or FIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * queue this additional data / FIN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) skb = skb_clone(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) skb_dst_drop(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* segs_in has been initialized to 1 in tcp_create_openreq_child().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * Hence, reset segs_in to 0 before calling tcp_segs_in()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * to avoid double counting. Also, tcp_segs_in() expects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * skb->len to include the tcp_hdrlen. Hence, it should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * be called before __skb_pull().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) tp->segs_in = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) tcp_segs_in(tp, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) __skb_pull(skb, tcp_hdrlen(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) sk_forced_mem_schedule(sk, skb->truesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) skb_set_owner_r(skb, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) TCP_SKB_CB(skb)->seq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) __skb_queue_tail(&sk->sk_receive_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) tp->syn_data_acked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* u64_stats_update_begin(&tp->syncp) not needed here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * as we certainly are not changing upper 32bit value (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) tp->bytes_received = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) tcp_fin(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /* returns 0 - no key match, 1 for primary, 2 for backup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static int tcp_fastopen_cookie_gen_check(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct sk_buff *syn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct tcp_fastopen_cookie *orig,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct tcp_fastopen_cookie *valid_foc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct tcp_fastopen_cookie search_foc = { .len = -1 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct tcp_fastopen_cookie *foc = valid_foc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct tcp_fastopen_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) int i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) ctx = tcp_fastopen_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) for (i = 0; i < tcp_fastopen_context_len(ctx); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) __tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[i], foc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (tcp_fastopen_cookie_match(foc, orig)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) ret = i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) foc = &search_foc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static struct sock *tcp_fastopen_create_child(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct request_sock *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct tcp_sock *tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct sock *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) bool own_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) NULL, &own_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (!child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) spin_lock(&queue->fastopenq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) queue->fastopenq.qlen++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) spin_unlock(&queue->fastopenq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /* Initialize the child socket. Have to fix some values to take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * into account the child is a Fast Open socket and is created
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * only out of the bits carried in the SYN packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) tp = tcp_sk(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) rcu_assign_pointer(tp->fastopen_rsk, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) tcp_rsk(req)->tfo_listener = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /* RFC1323: The window in SYN & SYN/ACK segments is never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * scaled. So correct it appropriately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) tp->max_window = tp->snd_wnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /* Activate the retrans timer so that SYNACK can be retransmitted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * The request socket is not added to the ehash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * because it's been added to the accept queue directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) TCP_TIMEOUT_INIT, TCP_RTO_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) refcount_set(&req->rsk_refcnt, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /* Now finish processing the fastopen child socket. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) tcp_init_transfer(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) tcp_fastopen_add_skb(child, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) tp->rcv_wup = tp->rcv_nxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /* tcp_conn_request() is sending the SYNACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * and queues the child into listener accept queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) return child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static bool tcp_fastopen_queue_check(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct fastopen_queue *fastopenq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /* Make sure the listener has enabled fastopen, and we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * exceed the max # of pending TFO requests allowed before trying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * to validating the cookie in order to avoid burning CPU cycles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * unnecessarily.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * XXX (TFO) - The implication of checking the max_qlen before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * processing a cookie request is that clients can't differentiate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * between qlen overflow causing Fast Open to be disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * temporarily vs a server not supporting Fast Open at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (fastopenq->max_qlen == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (fastopenq->qlen >= fastopenq->max_qlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct request_sock *req1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) spin_lock(&fastopenq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) req1 = fastopenq->rskq_rst_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) __NET_INC_STATS(sock_net(sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) spin_unlock(&fastopenq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) fastopenq->rskq_rst_head = req1->dl_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) fastopenq->qlen--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) spin_unlock(&fastopenq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) reqsk_put(req1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static bool tcp_fastopen_no_cookie(const struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) const struct dst_entry *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) int flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return (sock_net(sk)->ipv4.sysctl_tcp_fastopen & flag) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) tcp_sk(sk)->fastopen_no_cookie ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) (dst && dst_metric(dst, RTAX_FASTOPEN_NO_COOKIE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /* Returns true if we should perform Fast Open on the SYN. The cookie (foc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * may be updated and return the client in the SYN-ACK later. E.g., Fast Open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * cookie request (foc->len == 0).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) struct tcp_fastopen_cookie *foc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) const struct dst_entry *dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) int tcp_fastopen = sock_net(sk)->ipv4.sysctl_tcp_fastopen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct tcp_fastopen_cookie valid_foc = { .len = -1 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct sock *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (foc->len == 0) /* Client requests a cookie */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (!((tcp_fastopen & TFO_SERVER_ENABLE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) (syn_data || foc->len >= 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) tcp_fastopen_queue_check(sk))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) foc->len = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) goto fastopen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (foc->len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* Client requests a cookie. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) tcp_fastopen_cookie_gen(sk, req, skb, &valid_foc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) } else if (foc->len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) ret = tcp_fastopen_cookie_gen_check(sk, req, skb, foc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) &valid_foc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) NET_INC_STATS(sock_net(sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /* Cookie is valid. Create a (full) child socket to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * accept the data in SYN before returning a SYN-ACK to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * ack the data. If we fail to create the socket, fall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * back and ack the ISN only but includes the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * cookie.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * Note: Data-less SYN with valid cookie is allowed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * send data in SYN_RECV state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) fastopen:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) child = tcp_fastopen_create_child(sk, skb, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (child) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (ret == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) valid_foc.exp = foc->exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) *foc = valid_foc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) NET_INC_STATS(sock_net(sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) LINUX_MIB_TCPFASTOPENPASSIVEALTKEY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) foc->len = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) NET_INC_STATS(sock_net(sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) LINUX_MIB_TCPFASTOPENPASSIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) NET_INC_STATS(sock_net(sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) valid_foc.exp = foc->exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) *foc = valid_foc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct tcp_fastopen_cookie *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) const struct dst_entry *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) tcp_fastopen_cache_get(sk, mss, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /* Firewall blackhole issue check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (tcp_fastopen_active_should_disable(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) cookie->len = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) dst = __sk_dst_get(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (tcp_fastopen_no_cookie(sk, dst, TFO_CLIENT_NO_COOKIE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) cookie->len = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (cookie->len > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) tcp_sk(sk)->fastopen_client_fail = TFO_COOKIE_UNAVAILABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /* This function checks if we want to defer sending SYN until the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * write(). We defer under the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * 1. fastopen_connect sockopt is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * 2. we have a valid cookie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * Return value: return true if we want to defer until application writes data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * return false if we want to send out SYN immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) bool tcp_fastopen_defer_connect(struct sock *sk, int *err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct tcp_fastopen_cookie cookie = { .len = 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) u16 mss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (tp->fastopen_connect && !tp->fastopen_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) inet_sk(sk)->defer_connect = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* Alloc fastopen_req in order for FO option to be included
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * in SYN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) tp->fastopen_req = kzalloc(sizeof(*tp->fastopen_req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) sk->sk_allocation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (tp->fastopen_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) tp->fastopen_req->cookie = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) *err = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) EXPORT_SYMBOL(tcp_fastopen_defer_connect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * The following code block is to deal with middle box issues with TFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * Middlebox firewall issues can potentially cause server's data being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * blackholed after a successful 3WHS using TFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * The proposed solution is to disable active TFO globally under the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * following circumstances:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * 1. client side TFO socket receives out of order FIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * 2. client side TFO socket receives out of order RST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * 3. client side TFO socket has timed out three times consecutively during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * or after handshake
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * We disable active side TFO globally for 1hr at first. Then if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * happens again, we disable it for 2h, then 4h, 8h, ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * And we reset the timeout back to 1hr when we see a successful active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * TFO connection with data exchanges.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) /* Disable active TFO and record current jiffies and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * tfo_active_disable_times
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) void tcp_fastopen_active_disable(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (!sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) /* Paired with READ_ONCE() in tcp_fastopen_active_should_disable() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) WRITE_ONCE(net->ipv4.tfo_active_disable_stamp, jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /* Paired with smp_rmb() in tcp_fastopen_active_should_disable().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * We want net->ipv4.tfo_active_disable_stamp to be updated first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) atomic_inc(&net->ipv4.tfo_active_disable_times);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) /* Calculate timeout for tfo active disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * Return true if we are still in the active TFO disable period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * Return false if timeout already expired and we should use active TFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) bool tcp_fastopen_active_should_disable(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) unsigned int tfo_bh_timeout = sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) int tfo_da_times;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) int multiplier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (!tfo_bh_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (!tfo_da_times)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) /* Paired with smp_mb__before_atomic() in tcp_fastopen_active_disable() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /* Limit timout to max: 2^6 * initial timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) multiplier = 1 << min(tfo_da_times - 1, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) /* Paired with the WRITE_ONCE() in tcp_fastopen_active_disable(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) timeout = READ_ONCE(sock_net(sk)->ipv4.tfo_active_disable_stamp) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) multiplier * tfo_bh_timeout * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (time_before(jiffies, timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) /* Mark check bit so we can check for successful active TFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * condition and reset tfo_active_disable_times
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) tcp_sk(sk)->syn_fastopen_ch = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /* Disable active TFO if FIN is the only packet in the ofo queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * and no data is received.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * Also check if we can reset tfo_active_disable_times if data is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * received successfully on a marked active TFO sockets opened on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * a non-loopback interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) struct dst_entry *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (!tp->syn_fastopen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (!tp->data_segs_in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) skb = skb_rb_first(&tp->out_of_order_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (skb && !skb_rb_next(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) tcp_fastopen_active_disable(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) } else if (tp->syn_fastopen_ch &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) dst = sk_dst_get(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) atomic_set(&sock_net(sk)->ipv4.tfo_active_disable_times, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) dst_release(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) u32 timeouts = inet_csk(sk)->icsk_retransmits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /* Broken middle-boxes may black-hole Fast Open connection during or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * even after the handshake. Be extremely conservative and pause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * Fast Open globally after hitting the third consecutive timeout or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * exceeding the configured timeout limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if ((tp->syn_fastopen || tp->syn_data || tp->syn_data_acked) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) (timeouts == 2 || (timeouts < 2 && expired))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) tcp_fastopen_active_disable(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }