Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *	TCP over IPv6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *	Linux INET6 implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *	Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *	Pedro Roque		<roque@di.fc.ul.pt>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *	Based on:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *	linux/net/ipv4/tcp.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *	linux/net/ipv4/tcp_input.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *	linux/net/ipv4/tcp_output.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *	Fixes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  *					a single port at the same time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/bottom_half.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/socket.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/sockios.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/in6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/jhash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/ipsec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/times.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <linux/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include <linux/icmpv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <linux/indirect_call_wrapper.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include <net/ndisc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include <net/inet6_hashtables.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <net/inet6_connection_sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include <net/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include <net/transp_v6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <net/addrconf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include <net/ip6_route.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include <net/ip6_checksum.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #include <net/inet_ecn.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #include <net/protocol.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #include <net/xfrm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #include <net/snmp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #include <net/dsfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #include <net/timewait_sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #include <net/inet_common.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #include <net/secure_seq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #include <net/busy_poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #include <crypto/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #include <trace/events/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) static void	tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) static void	tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 				      struct request_sock *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) static const struct inet_connection_sock_af_ops ipv6_mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) const struct inet_connection_sock_af_ops ipv6_specific;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 						   const struct in6_addr *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 						   int l3index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) /* Helper returning the inet6 address from a given tcp socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92)  * It can be used in TCP stack instead of inet6_sk(sk).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93)  * This avoids a dereference and allow compiler optimizations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94)  * It is a specialized version of inet6_sk_generic().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) static struct ipv6_pinfo *tcp_inet6_sk(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	unsigned int offset = sizeof(struct tcp6_sock) - sizeof(struct ipv6_pinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	struct dst_entry *dst = skb_dst(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	if (dst && dst_hold_safe(dst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 		const struct rt6_info *rt = (const struct rt6_info *)dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		sk->sk_rx_dst = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		tcp_inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) static u32 tcp_v6_init_seq(const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 				ipv6_hdr(skb)->saddr.s6_addr32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 				tcp_hdr(skb)->dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 				tcp_hdr(skb)->source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 				   ipv6_hdr(skb)->saddr.s6_addr32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 			      int addr_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	/* This check is replicated from tcp_v6_connect() and intended to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	 * prevent BPF program called below from accessing bytes that are out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	 * of the bound specified by user in addr_len.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	if (addr_len < SIN6_LEN_RFC2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	sock_owned_by_me(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 			  int addr_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	struct inet_sock *inet = inet_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	struct inet_connection_sock *icsk = inet_csk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	struct ipv6_pinfo *np = tcp_inet6_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	struct in6_addr *saddr = NULL, *final_p, final;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	struct ipv6_txoptions *opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	struct flowi6 fl6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	struct dst_entry *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	int addr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	if (addr_len < SIN6_LEN_RFC2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	if (usin->sin6_family != AF_INET6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 		return -EAFNOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	memset(&fl6, 0, sizeof(fl6));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	if (np->sndflow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 		IP6_ECN_flow_init(fl6.flowlabel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 			struct ip6_flowlabel *flowlabel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 			if (IS_ERR(flowlabel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 			fl6_sock_release(flowlabel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	 *	connect() to INADDR_ANY means loopback (BSD'ism).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	if (ipv6_addr_any(&usin->sin6_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 			ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 					       &usin->sin6_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 			usin->sin6_addr = in6addr_loopback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	addr_type = ipv6_addr_type(&usin->sin6_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	if (addr_type & IPV6_ADDR_MULTICAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 		return -ENETUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	if (addr_type&IPV6_ADDR_LINKLOCAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		if (addr_len >= sizeof(struct sockaddr_in6) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		    usin->sin6_scope_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 			/* If interface is set while binding, indices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 			 * must coincide.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 			if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 			sk->sk_bound_dev_if = usin->sin6_scope_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		/* Connect to link-local address requires an interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		if (!sk->sk_bound_dev_if)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	if (tp->rx_opt.ts_recent_stamp &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		tp->rx_opt.ts_recent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		tp->rx_opt.ts_recent_stamp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 		WRITE_ONCE(tp->write_seq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	sk->sk_v6_daddr = usin->sin6_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	np->flow_label = fl6.flowlabel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	 *	TCP over IPv4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	if (addr_type & IPV6_ADDR_MAPPED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 		u32 exthdrlen = icsk->icsk_ext_hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		struct sockaddr_in sin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		if (__ipv6_only_sock(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 			return -ENETUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		sin.sin_family = AF_INET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		sin.sin_port = usin->sin6_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 		icsk->icsk_af_ops = &ipv6_mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		if (sk_is_mptcp(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 			mptcpv6_handle_mapped(sk, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		sk->sk_backlog_rcv = tcp_v4_do_rcv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 			icsk->icsk_ext_hdr_len = exthdrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 			icsk->icsk_af_ops = &ipv6_specific;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 			if (sk_is_mptcp(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 				mptcpv6_handle_mapped(sk, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 			sk->sk_backlog_rcv = tcp_v6_do_rcv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 			tp->af_specific = &tcp_sock_ipv6_specific;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 			goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		np->saddr = sk->sk_v6_rcv_saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		saddr = &sk->sk_v6_rcv_saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	fl6.flowi6_proto = IPPROTO_TCP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	fl6.daddr = sk->sk_v6_daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	fl6.saddr = saddr ? *saddr : np->saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	fl6.flowi6_oif = sk->sk_bound_dev_if;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	fl6.flowi6_mark = sk->sk_mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	fl6.fl6_dport = usin->sin6_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	fl6.fl6_sport = inet->inet_sport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	fl6.flowi6_uid = sk->sk_uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	final_p = fl6_update_dst(&fl6, opt, &final);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	if (IS_ERR(dst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		err = PTR_ERR(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	if (!saddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		saddr = &fl6.saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		sk->sk_v6_rcv_saddr = *saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	/* set the source address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	np->saddr = *saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	sk->sk_gso_type = SKB_GSO_TCPV6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	ip6_dst_store(sk, dst, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	icsk->icsk_ext_hdr_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	if (opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		icsk->icsk_ext_hdr_len = opt->opt_flen +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 					 opt->opt_nflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	inet->inet_dport = usin->sin6_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	tcp_set_state(sk, TCP_SYN_SENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	err = inet6_hash_connect(tcp_death_row, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		goto late_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	sk_set_txhash(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	if (likely(!tp->repair)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		if (!tp->write_seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 			WRITE_ONCE(tp->write_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 				   secure_tcpv6_seq(np->saddr.s6_addr32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 						    sk->sk_v6_daddr.s6_addr32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 						    inet->inet_sport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 						    inet->inet_dport));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 						   np->saddr.s6_addr32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 						   sk->sk_v6_daddr.s6_addr32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	if (tcp_fastopen_defer_connect(sk, &err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		goto late_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	err = tcp_connect(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		goto late_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) late_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	tcp_set_state(sk, TCP_CLOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	inet->inet_dport = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	sk->sk_route_caps = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) static void tcp_v6_mtu_reduced(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	struct dst_entry *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	u32 mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	/* Drop requests trying to increase our current mss.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	 * Check done in __ip6_rt_update_pmtu() is too late.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	if (tcp_mtu_to_mss(sk, mtu) >= tcp_sk(sk)->mss_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	dst = inet6_csk_update_pmtu(sk, mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	if (!dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		tcp_sync_mss(sk, dst_mtu(dst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		tcp_simple_retransmit(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		u8 type, u8 code, int offset, __be32 info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	struct net *net = dev_net(skb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	struct request_sock *fastopen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	struct ipv6_pinfo *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	struct tcp_sock *tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	__u32 seq, snd_una;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	bool fatal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	sk = __inet6_lookup_established(net, &tcp_hashinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 					&hdr->daddr, th->dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 					&hdr->saddr, ntohs(th->source),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 					skb->dev->ifindex, inet6_sdif(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	if (!sk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 				  ICMP6_MIB_INERRORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	if (sk->sk_state == TCP_TIME_WAIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		inet_twsk_put(inet_twsk(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	seq = ntohl(th->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	fatal = icmpv6_err_convert(type, code, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		tcp_req_err(sk, seq, fatal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	bh_lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	if (sk->sk_state == TCP_CLOSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	if (ipv6_hdr(skb)->hop_limit < tcp_inet6_sk(sk)->min_hopcount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	fastopen = rcu_dereference(tp->fastopen_rsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	if (sk->sk_state != TCP_LISTEN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	    !between(seq, snd_una, tp->snd_nxt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	np = tcp_inet6_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	if (type == NDISC_REDIRECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		if (!sock_owned_by_user(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 			struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 			if (dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 				dst->ops->redirect(dst, sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	if (type == ICMPV6_PKT_TOOBIG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		u32 mtu = ntohl(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		/* We are not interested in TCP_LISTEN and open_requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		 * (SYN-ACKs send out by Linux are always <576bytes so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		 * they should go through unfragmented).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		if (sk->sk_state == TCP_LISTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		if (!ip6_sk_accept_pmtu(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		if (mtu < IPV6_MIN_MTU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		WRITE_ONCE(tp->mtu_info, mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		if (!sock_owned_by_user(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 			tcp_v6_mtu_reduced(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 					   &sk->sk_tsq_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 			sock_hold(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	/* Might be for an request_sock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	switch (sk->sk_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	case TCP_SYN_SENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	case TCP_SYN_RECV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		/* Only in fast or simultaneous open. If a fast open socket is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		 * already accepted it is treated as a connected one below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		if (fastopen && !fastopen->sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		ipv6_icmp_error(sk, skb, err, th->dest, ntohl(info), (u8 *)th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		if (!sock_owned_by_user(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 			sk->sk_err = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 			tcp_done(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 			sk->sk_err_soft = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	case TCP_LISTEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		/* check if this ICMP message allows revert of backoff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		 * (see RFC 6069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		if (!fastopen && type == ICMPV6_DEST_UNREACH &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		    code == ICMPV6_NOROUTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 			tcp_ld_RTO_revert(sk, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	if (!sock_owned_by_user(sk) && np->recverr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		sk->sk_err = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		sk->sk_error_report(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		sk->sk_err_soft = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	bh_unlock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 			      struct flowi *fl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 			      struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 			      struct tcp_fastopen_cookie *foc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 			      enum tcp_synack_type synack_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 			      struct sk_buff *syn_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	struct inet_request_sock *ireq = inet_rsk(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	struct ipv6_pinfo *np = tcp_inet6_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	struct ipv6_txoptions *opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	struct flowi6 *fl6 = &fl->u.ip6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	u8 tclass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	/* First, grab a route. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 					       IPPROTO_TCP)) == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 				    &ireq->ir_v6_rmt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		fl6->daddr = ireq->ir_v6_rmt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		if (np->repflow && ireq->pktopts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		tclass = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 				(tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 				(np->tclass & INET_ECN_MASK) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 				np->tclass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		if (!INET_ECN_is_capable(tclass) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		    tcp_bpf_ca_needs_ecn((struct sock *)req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 			tclass |= INET_ECN_ECT_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		opt = ireq->ipv6_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		if (!opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 			opt = rcu_dereference(np->opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		err = ip6_xmit(sk, skb, fl6, skb->mark ? : sk->sk_mark, opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 			       tclass, sk->sk_priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		err = net_xmit_eval(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) static void tcp_v6_reqsk_destructor(struct request_sock *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	kfree(inet_rsk(req)->ipv6_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	kfree_skb(inet_rsk(req)->pktopts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 						   const struct in6_addr *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 						   int l3index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	return tcp_md5_do_lookup(sk, l3index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 				 (union tcp_md5_addr *)addr, AF_INET6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 						const struct sock *addr_sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	int l3index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 						 addr_sk->sk_bound_dev_if);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 				    l3index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 				 sockptr_t optval, int optlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	struct tcp_md5sig cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	int l3index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	u8 prefixlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	if (optlen < sizeof(cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	if (copy_from_sockptr(&cmd, optval, sizeof(cmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	if (sin6->sin6_family != AF_INET6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	if (optname == TCP_MD5SIG_EXT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		prefixlen = cmd.tcpm_prefixlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 					prefixlen > 32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	if (optname == TCP_MD5SIG_EXT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		if (dev && netif_is_l3_master(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 			l3index = dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		/* ok to reference set/not set outside of rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		 * right now device MUST be an L3 master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		if (!dev || !l3index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	if (!cmd.tcpm_keylen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 					      AF_INET, prefixlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 					      l3index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 				      AF_INET6, prefixlen, l3index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 				      AF_INET, prefixlen, l3index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 				      cmd.tcpm_key, cmd.tcpm_keylen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 				      GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 			      AF_INET6, prefixlen, l3index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 			      cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 				   const struct in6_addr *daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 				   const struct in6_addr *saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 				   const struct tcphdr *th, int nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	struct tcp6_pseudohdr *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	struct scatterlist sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	struct tcphdr *_th;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	bp = hp->scratch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	/* 1. TCP pseudo-header (RFC2460) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	bp->saddr = *saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	bp->daddr = *daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	bp->protocol = cpu_to_be32(IPPROTO_TCP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	bp->len = cpu_to_be32(nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	_th = (struct tcphdr *)(bp + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	memcpy(_th, th, sizeof(*th));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	_th->check = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	ahash_request_set_crypt(hp->md5_req, &sg, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 				sizeof(*bp) + sizeof(*th));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	return crypto_ahash_update(hp->md5_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 			       const struct in6_addr *daddr, struct in6_addr *saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 			       const struct tcphdr *th)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	struct tcp_md5sig_pool *hp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	struct ahash_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	hp = tcp_get_md5sig_pool();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	if (!hp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		goto clear_hash_noput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	req = hp->md5_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	if (crypto_ahash_init(req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		goto clear_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		goto clear_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	if (tcp_md5_hash_key(hp, key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		goto clear_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	ahash_request_set_crypt(req, NULL, md5_hash, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	if (crypto_ahash_final(req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		goto clear_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	tcp_put_md5sig_pool();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) clear_hash:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	tcp_put_md5sig_pool();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) clear_hash_noput:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	memset(md5_hash, 0, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) static int tcp_v6_md5_hash_skb(char *md5_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 			       const struct tcp_md5sig_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 			       const struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 			       const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	const struct in6_addr *saddr, *daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	struct tcp_md5sig_pool *hp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	struct ahash_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	const struct tcphdr *th = tcp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	if (sk) { /* valid for establish/request sockets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		saddr = &sk->sk_v6_rcv_saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		daddr = &sk->sk_v6_daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		saddr = &ip6h->saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		daddr = &ip6h->daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	hp = tcp_get_md5sig_pool();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	if (!hp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		goto clear_hash_noput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	req = hp->md5_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	if (crypto_ahash_init(req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		goto clear_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 		goto clear_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		goto clear_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	if (tcp_md5_hash_key(hp, key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		goto clear_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	ahash_request_set_crypt(req, NULL, md5_hash, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	if (crypto_ahash_final(req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		goto clear_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	tcp_put_md5sig_pool();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) clear_hash:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	tcp_put_md5sig_pool();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) clear_hash_noput:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	memset(md5_hash, 0, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 				    const struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 				    int dif, int sdif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	const __u8 *hash_location = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	struct tcp_md5sig_key *hash_expected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	const struct tcphdr *th = tcp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	int genhash, l3index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	u8 newhash[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	/* sdif set, means packet ingressed via a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	 * in an L3 domain and dif is set to the l3mdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	l3index = sdif ? dif : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr, l3index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	hash_location = tcp_parse_md5sig_option(th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	/* We've parsed the options - do we have a hash? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	if (!hash_expected && !hash_location)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	if (hash_expected && !hash_location) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	if (!hash_expected && hash_location) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	/* check the signature */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	genhash = tcp_v6_md5_hash_skb(newhash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 				      hash_expected,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 				      NULL, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u L3 index %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 				     genhash ? "failed" : "mismatch",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 				     &ip6h->saddr, ntohs(th->source),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 				     &ip6h->daddr, ntohs(th->dest), l3index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) static void tcp_v6_init_req(struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 			    const struct sock *sk_listener,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 			    struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	struct inet_request_sock *ireq = inet_rsk(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	const struct ipv6_pinfo *np = tcp_inet6_sk(sk_listener);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	/* So that link locals have meaning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	if ((!sk_listener->sk_bound_dev_if || l3_slave) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		ireq->ir_iif = tcp_v6_iif(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	    (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	     np->rxopt.bits.rxinfo ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	     np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	     np->rxopt.bits.rxohlim || np->repflow)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		refcount_inc(&skb->users);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		ireq->pktopts = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 					  struct flowi *fl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 					  const struct request_sock *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	.family		=	AF_INET6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	.obj_size	=	sizeof(struct tcp6_request_sock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	.rtx_syn_ack	=	tcp_rtx_synack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	.send_ack	=	tcp_v6_reqsk_send_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	.destructor	=	tcp_v6_reqsk_destructor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	.send_reset	=	tcp_v6_send_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	.syn_ack_timeout =	tcp_syn_ack_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	.mss_clamp	=	IPV6_MIN_MTU - sizeof(struct tcphdr) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 				sizeof(struct ipv6hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	.req_md5_lookup	=	tcp_v6_md5_lookup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	.init_req	=	tcp_v6_init_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) #ifdef CONFIG_SYN_COOKIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	.cookie_init_seq =	cookie_v6_init_sequence,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	.route_req	=	tcp_v6_route_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	.init_seq	=	tcp_v6_init_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	.init_ts_off	=	tcp_v6_init_ts_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	.send_synack	=	tcp_v6_send_synack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 				 u32 ack, u32 win, u32 tsval, u32 tsecr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 				 int oif, struct tcp_md5sig_key *key, int rst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 				 u8 tclass, __be32 label, u32 priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	const struct tcphdr *th = tcp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	struct tcphdr *t1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	struct sk_buff *buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	struct flowi6 fl6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	struct sock *ctl_sk = net->ipv6.tcp_sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	unsigned int tot_len = sizeof(struct tcphdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	struct dst_entry *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	__be32 *topt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	__u32 mark = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	if (tsecr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		tot_len += TCPOLEN_TSTAMP_ALIGNED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	if (key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		tot_len += TCPOLEN_MD5SIG_ALIGNED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 			 GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	if (!buff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	t1 = skb_push(buff, tot_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	skb_reset_transport_header(buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	/* Swap the send and the receive. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	memset(t1, 0, sizeof(*t1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	t1->dest = th->source;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	t1->source = th->dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	t1->doff = tot_len / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	t1->seq = htonl(seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	t1->ack_seq = htonl(ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	t1->ack = !rst || !th->ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	t1->rst = rst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	t1->window = htons(win);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	topt = (__be32 *)(t1 + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	if (tsecr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		*topt++ = htonl(tsval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		*topt++ = htonl(tsecr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	if (key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 				    &ipv6_hdr(skb)->saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 				    &ipv6_hdr(skb)->daddr, t1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	memset(&fl6, 0, sizeof(fl6));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	fl6.daddr = ipv6_hdr(skb)->saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	fl6.saddr = ipv6_hdr(skb)->daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	fl6.flowlabel = label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	buff->ip_summed = CHECKSUM_PARTIAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	buff->csum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	fl6.flowi6_proto = IPPROTO_TCP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	if (rt6_need_strict(&fl6.daddr) && !oif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		fl6.flowi6_oif = tcp_v6_iif(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 			oif = skb->skb_iif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		fl6.flowi6_oif = oif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	if (sk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		if (sk->sk_state == TCP_TIME_WAIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 			mark = inet_twsk(sk)->tw_mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 			/* autoflowlabel relies on buff->hash */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 			skb_set_hash(buff, inet_twsk(sk)->tw_txhash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 				     PKT_HASH_TYPE_L4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 			mark = sk->sk_mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		buff->tstamp = tcp_transmit_time(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	fl6.fl6_dport = t1->dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	fl6.fl6_sport = t1->source;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	/* Pass a socket to ip6_dst_lookup either it is for RST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	 * Underlying function will use this to retrieve the network
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	 * namespace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	if (!IS_ERR(dst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		skb_dst_set(buff, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 			 tclass & ~INET_ECN_MASK, priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		if (rst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 			TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	kfree_skb(buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	const struct tcphdr *th = tcp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	u32 seq = 0, ack_seq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	struct tcp_md5sig_key *key = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	const __u8 *hash_location = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	unsigned char newhash[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	int genhash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	struct sock *sk1 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	__be32 label = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	u32 priority = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	struct net *net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	int oif = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	if (th->rst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	/* If sk not NULL, it means we did a successful lookup and incoming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	 * route had to be correct. prequeue might have dropped our dst.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	if (!sk && !ipv6_unicast_destination(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	hash_location = tcp_parse_md5sig_option(th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	if (sk && sk_fullsock(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		int l3index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		/* sdif set, means packet ingressed via a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		 * in an L3 domain and inet_iif is set to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr, l3index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	} else if (hash_location) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		int dif = tcp_v6_iif_l3_slave(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		int sdif = tcp_v6_sdif(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		int l3index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		 * active side is lost. Try to find listening socket through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		 * source port, and then find md5 key through listening socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		 * we are not loose security here:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		 * Incoming packet is checked with md5 hash with finding key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		 * no RST generated if md5 hash doesn't match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		sk1 = inet6_lookup_listener(net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 					   &tcp_hashinfo, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 					   &ipv6h->saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 					   th->source, &ipv6h->daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 					   ntohs(th->source), dif, sdif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		if (!sk1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		/* sdif set, means packet ingressed via a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		 * in an L3 domain and dif is set to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		l3index = tcp_v6_sdif(skb) ? dif : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr, l3index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		if (!key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	if (th->ack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		seq = ntohl(th->ack_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 			  (th->doff << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	if (sk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		oif = sk->sk_bound_dev_if;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		if (sk_fullsock(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 			const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 			trace_tcp_send_reset(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 			if (np->repflow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 				label = ip6_flowlabel(ipv6h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 			priority = sk->sk_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		if (sk->sk_state == TCP_TIME_WAIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 			label = cpu_to_be32(inet_twsk(sk)->tw_flowlabel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 			priority = inet_twsk(sk)->tw_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		if (net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_TCP_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 			label = ip6_flowlabel(ipv6h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 			     ipv6_get_dsfield(ipv6h), label, priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 			    struct tcp_md5sig_key *key, u8 tclass,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 			    __be32 label, u32 priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 			     tclass, label, priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	struct inet_timewait_sock *tw = inet_twsk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 			tcp_time_stamp_raw() + tcptw->tw_ts_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 			tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 			tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	inet_twsk_put(tw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 				  struct request_sock *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	int l3index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	/* RFC 7323 2.3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	 * The window field (SEG.WND) of every outgoing segment, with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	 * exception of <SYN> segments, MUST be right-shifted by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	 * Rcv.Wind.Shift bits:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 			tcp_rsk(req)->rcv_nxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 			tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 			req->ts_recent, sk->sk_bound_dev_if,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 			ipv6_get_dsfield(ipv6_hdr(skb)), 0, sk->sk_priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) #ifdef CONFIG_SYN_COOKIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	const struct tcphdr *th = tcp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	if (!th->syn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		sk = cookie_v6_check(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	return sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 			 struct tcphdr *th, u32 *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	u16 mss = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) #ifdef CONFIG_SYN_COOKIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	mss = tcp_get_syncookie_mss(&tcp6_request_sock_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 				    &tcp_request_sock_ipv6_ops, sk, th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	if (mss) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 		*cookie = __cookie_v6_init_sequence(iph, th, &mss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		tcp_synq_overflow(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	return mss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	if (skb->protocol == htons(ETH_P_IP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		return tcp_v4_conn_request(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	if (!ipv6_unicast_destination(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		__IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	return tcp_conn_request(&tcp6_request_sock_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 				&tcp_request_sock_ipv6_ops, sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	tcp_listendrop(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	return 0; /* don't send reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) static void tcp_v6_restore_cb(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	/* We need to move header back to the beginning if xfrm6_policy_check()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	 * and tcp_v6_fill_cb() are going to be called again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		sizeof(struct inet6_skb_parm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 					 struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 					 struct dst_entry *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 					 struct request_sock *req_unhash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 					 bool *own_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	struct inet_request_sock *ireq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	struct ipv6_pinfo *newnp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	struct ipv6_txoptions *opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	struct inet_sock *newinet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	bool found_dup_sk = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	struct tcp_sock *newtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	struct sock *newsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	struct tcp_md5sig_key *key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	int l3index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	struct flowi6 fl6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	if (skb->protocol == htons(ETH_P_IP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		 *	v6 mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 					     req_unhash, own_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		if (!newsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		newinet = inet_sk(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		newnp = tcp_inet6_sk(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		newtp = tcp_sk(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		newnp->saddr = newsk->sk_v6_rcv_saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		if (sk_is_mptcp(newsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 			mptcpv6_handle_mapped(newsk, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		newnp->ipv6_mc_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 		newnp->ipv6_ac_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		newnp->ipv6_fl_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		newnp->pktoptions  = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		newnp->opt	   = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 		newnp->mcast_oif   = inet_iif(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		newnp->mcast_hops  = ip_hdr(skb)->ttl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 		newnp->rcv_flowinfo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		if (np->repflow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 			newnp->flow_label = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		 * here, tcp_create_openreq_child now does this for us, see the comment in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		 * that function for the gory details. -acme
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 		/* It is tricky place. Until this moment IPv4 tcp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 		   worked with IPv6 icsk.icsk_af_ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		   Sync it now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 		return newsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	ireq = inet_rsk(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	if (sk_acceptq_is_full(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		goto out_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	if (!dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		if (!dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	newsk = tcp_create_openreq_child(sk, req, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	if (!newsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		goto out_nonewsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	 * count here, tcp_create_openreq_child now does this for us, see the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	 * comment in that function for the gory details. -acme
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	newsk->sk_gso_type = SKB_GSO_TCPV6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	ip6_dst_store(newsk, dst, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	inet6_sk_rx_dst_set(newsk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	newtp = tcp_sk(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	newinet = inet_sk(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	newnp = tcp_inet6_sk(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	newnp->saddr = ireq->ir_v6_loc_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	newsk->sk_bound_dev_if = ireq->ir_iif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	/* Now IPv6 options...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	   First: no IPv4 options.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	newinet->inet_opt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	newnp->ipv6_mc_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	newnp->ipv6_ac_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	newnp->ipv6_fl_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	/* Clone RX bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	newnp->rxopt.all = np->rxopt.all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	newnp->pktoptions = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	newnp->opt	  = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	newnp->mcast_oif  = tcp_v6_iif(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	if (np->repflow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 		newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	/* Set ToS of the new socket based upon the value of incoming SYN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	 * ECT bits are set later in tcp_init_transfer().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 		newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	/* Clone native IPv6 options from listening socket (if any)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	   Yes, keeping reference count would be much more clever,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	   but we make one more one thing there: reattach optmem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	   to newsk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	opt = ireq->ipv6_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	if (!opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		opt = rcu_dereference(np->opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	if (opt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		opt = ipv6_dup_options(newsk, opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		RCU_INIT_POINTER(newnp->opt, opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	if (opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 						    opt->opt_flen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	tcp_ca_openreq_child(newsk, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	tcp_sync_mss(newsk, dst_mtu(dst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	tcp_initialize_rcv_mss(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	/* Copy over the MD5 key from the original socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr, l3index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	if (key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		/* We're using one, so create a matching key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		 * on the newsk structure. If we fail to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 		 * memory, then we end up not copying the key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 		 * across. Shucks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 			       AF_INET6, 128, l3index, key->key, key->keylen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 			       sk_gfp_mask(sk, GFP_ATOMIC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	if (__inet_inherit_port(sk, newsk) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 		inet_csk_prepare_forced_close(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 		tcp_done(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 				       &found_dup_sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	if (*own_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 		tcp_move_syn(newtp, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		/* Clone pktoptions received with SYN, if we own the req */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		if (ireq->pktopts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 			newnp->pktoptions = skb_clone(ireq->pktopts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 						      sk_gfp_mask(sk, GFP_ATOMIC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 			consume_skb(ireq->pktopts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 			ireq->pktopts = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 			if (newnp->pktoptions) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 				tcp_v6_restore_cb(newnp->pktoptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 				skb_set_owner_r(newnp->pktoptions, newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		if (!req_unhash && found_dup_sk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 			/* This code path should only be executed in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 			 * syncookie case only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 			bh_unlock_sock(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 			sock_put(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 			newsk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	return newsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) out_overflow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) out_nonewsk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	dst_release(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	tcp_listendrop(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) /* The socket must have it's spinlock held when we get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)  * here, unless it is a TCP_LISTEN socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)  * We have a potential double-lock case here, so even when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)  * doing backlog processing we use the BH locking scheme.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)  * This is because we cannot sleep with the original spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)  * held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	struct ipv6_pinfo *np = tcp_inet6_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	struct sk_buff *opt_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	struct tcp_sock *tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	/* Imagine: socket is IPv6. IPv4 packet arrives,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	   goes to IPv4 receive handler and backlogged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	   From backlog it always goes here. Kerboom...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	   Fortunately, tcp_rcv_established and rcv_established
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	   handle them correctly, but it is not case with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	if (skb->protocol == htons(ETH_P_IP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		return tcp_v4_do_rcv(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	 *	socket locking is here for SMP purposes as backlog rcv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	 *	is currently called with bh processing disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	/* Do Stevens' IPV6_PKTOPTIONS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	   Yes, guys, it is the only place in our code, where we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	   may make it not affecting IPv4.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	   The rest of code is protocol independent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	   and I do not like idea to uglify IPv4.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	   Actually, all the idea behind IPV6_PKTOPTIONS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	   looks not very well thought. For now we latch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	   options, received in the last packet, enqueued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	   by tcp. Feel free to propose better solution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 					       --ANK (980728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	if (np->rxopt.all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 		opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 		struct dst_entry *dst = sk->sk_rx_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		sock_rps_save_rxhash(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 		sk_mark_napi_id(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 		if (dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 			    dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 				dst_release(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 				sk->sk_rx_dst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 		tcp_rcv_established(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 		if (opt_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 			goto ipv6_pktoptions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	if (tcp_checksum_complete(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 		goto csum_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	if (sk->sk_state == TCP_LISTEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		struct sock *nsk = tcp_v6_cookie_check(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 		if (!nsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 			goto discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 		if (nsk != sk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 			if (tcp_child_process(sk, nsk, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 				goto reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 			if (opt_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 				__kfree_skb(opt_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 		sock_rps_save_rxhash(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	if (tcp_rcv_state_process(sk, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 		goto reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	if (opt_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		goto ipv6_pktoptions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) reset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	tcp_v6_send_reset(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) discard:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	if (opt_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 		__kfree_skb(opt_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) csum_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	goto discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) ipv6_pktoptions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	/* Do you ask, what is it?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	   1. skb was enqueued by tcp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	   2. skb is added to tail of read queue, rather than out of order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	   3. socket is not in passive state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	   4. Finally, it really contains options, which user wants to receive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 			np->mcast_oif = tcp_v6_iif(opt_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 		if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 			np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 		if (np->repflow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 			skb_set_owner_r(opt_skb, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 			tcp_v6_restore_cb(opt_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 			opt_skb = xchg(&np->pktoptions, opt_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 			__kfree_skb(opt_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 			opt_skb = xchg(&np->pktoptions, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	kfree_skb(opt_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 			   const struct tcphdr *th)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	/* This is tricky: we move IP6CB at its correct location into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	 * _decode_session6() uses IP6CB().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	 * barrier() makes sure compiler won't play aliasing games.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 		sizeof(struct inet6_skb_parm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 				    skb->len - th->doff*4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	TCP_SKB_CB(skb)->sacked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	TCP_SKB_CB(skb)->has_rxtstamp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 			skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	struct sk_buff *skb_to_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	int sdif = inet6_sdif(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	int dif = inet6_iif(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	const struct tcphdr *th;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	const struct ipv6hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	bool refcounted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	struct net *net = dev_net(skb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	if (skb->pkt_type != PACKET_HOST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 		goto discard_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	 *	Count it even if it's bad.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 		goto discard_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	th = (const struct tcphdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	if (unlikely(th->doff < sizeof(struct tcphdr)/4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		goto bad_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	if (!pskb_may_pull(skb, th->doff*4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 		goto discard_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 		goto csum_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	th = (const struct tcphdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	hdr = ipv6_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) lookup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 				th->source, th->dest, inet6_iif(skb), sdif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 				&refcounted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	if (!sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 		goto no_tcp_socket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) process:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	if (sk->sk_state == TCP_TIME_WAIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 		goto do_time_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 		struct request_sock *req = inet_reqsk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 		bool req_stolen = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 		struct sock *nsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 		sk = req->rsk_listener;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 		if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 			sk_drops_add(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 			reqsk_put(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 			goto discard_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 		if (tcp_checksum_complete(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 			reqsk_put(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 			goto csum_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		if (unlikely(sk->sk_state != TCP_LISTEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 			inet_csk_reqsk_queue_drop_and_put(sk, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 			goto lookup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 		sock_hold(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		refcounted = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		nsk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 		if (!tcp_filter(sk, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 			th = (const struct tcphdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 			hdr = ipv6_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 			tcp_v6_fill_cb(skb, hdr, th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 			nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		if (!nsk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 			reqsk_put(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 			if (req_stolen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 				/* Another cpu got exclusive access to req
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 				 * and created a full blown socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 				 * Try to feed this packet to this socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 				 * instead of discarding it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 				tcp_v6_restore_cb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 				sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 				goto lookup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 			goto discard_and_relse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 		if (nsk == sk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 			reqsk_put(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 			tcp_v6_restore_cb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 		} else if (tcp_child_process(sk, nsk, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 			tcp_v6_send_reset(nsk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 			goto discard_and_relse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 			sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	if (hdr->hop_limit < tcp_inet6_sk(sk)->min_hopcount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 		goto discard_and_relse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 		goto discard_and_relse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 		goto discard_and_relse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	if (tcp_filter(sk, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 		goto discard_and_relse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	th = (const struct tcphdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	hdr = ipv6_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	tcp_v6_fill_cb(skb, hdr, th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	skb->dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	if (sk->sk_state == TCP_LISTEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 		ret = tcp_v6_do_rcv(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 		goto put_and_return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	sk_incoming_cpu_update(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	bh_lock_sock_nested(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	tcp_segs_in(tcp_sk(sk), skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	if (!sock_owned_by_user(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 		skb_to_free = sk->sk_rx_skb_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 		sk->sk_rx_skb_cache = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 		ret = tcp_v6_do_rcv(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 		if (tcp_add_backlog(sk, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 			goto discard_and_relse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 		skb_to_free = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	bh_unlock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	if (skb_to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 		__kfree_skb(skb_to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) put_and_return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	if (refcounted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 		sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	return ret ? -1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) no_tcp_socket:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 		goto discard_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	tcp_v6_fill_cb(skb, hdr, th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	if (tcp_checksum_complete(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) csum_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) bad_packet:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 		__TCP_INC_STATS(net, TCP_MIB_INERRS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 		tcp_v6_send_reset(NULL, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) discard_it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) discard_and_relse:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	sk_drops_add(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	if (refcounted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 		sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	goto discard_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) do_time_wait:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 		inet_twsk_put(inet_twsk(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 		goto discard_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	tcp_v6_fill_cb(skb, hdr, th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 	if (tcp_checksum_complete(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 		inet_twsk_put(inet_twsk(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 		goto csum_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	case TCP_TW_SYN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 		struct sock *sk2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 					    skb, __tcp_hdrlen(th),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 					    &ipv6_hdr(skb)->saddr, th->source,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 					    &ipv6_hdr(skb)->daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 					    ntohs(th->dest),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 					    tcp_v6_iif_l3_slave(skb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 					    sdif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 		if (sk2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 			struct inet_timewait_sock *tw = inet_twsk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 			inet_twsk_deschedule_put(tw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 			sk = sk2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 			tcp_v6_restore_cb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 			refcounted = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 			goto process;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 		/* to ACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	case TCP_TW_ACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 		tcp_v6_timewait_ack(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	case TCP_TW_RST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 		tcp_v6_send_reset(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 		inet_twsk_deschedule_put(inet_twsk(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 		goto discard_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	case TCP_TW_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 		;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	goto discard_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	const struct ipv6hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	const struct tcphdr *th;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	if (skb->pkt_type != PACKET_HOST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	hdr = ipv6_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	th = tcp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	if (th->doff < sizeof(struct tcphdr) / 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	/* Note : We use inet6_iif() here, not tcp_v6_iif() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 					&hdr->saddr, th->source,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 					&hdr->daddr, ntohs(th->dest),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 					inet6_iif(skb), inet6_sdif(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	if (sk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 		skb->sk = sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 		skb->destructor = sock_edemux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 		if (sk_fullsock(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 			if (dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 				dst = dst_check(dst, tcp_inet6_sk(sk)->rx_dst_cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 			if (dst &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 				skb_dst_set_noref(skb, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) static struct timewait_sock_ops tcp6_timewait_sock_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	.twsk_unique	= tcp_twsk_unique,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	.twsk_destructor = tcp_twsk_destructor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	struct ipv6_pinfo *np = inet6_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	__tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) const struct inet_connection_sock_af_ops ipv6_specific = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	.queue_xmit	   = inet6_csk_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	.send_check	   = tcp_v6_send_check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	.rebuild_header	   = inet6_sk_rebuild_header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	.conn_request	   = tcp_v6_conn_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	.net_header_len	   = sizeof(struct ipv6hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	.net_frag_header_len = sizeof(struct frag_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	.setsockopt	   = ipv6_setsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	.getsockopt	   = ipv6_getsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	.mtu_reduced	   = tcp_v6_mtu_reduced,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	.md5_lookup	=	tcp_v6_md5_lookup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	.md5_parse	=	tcp_v6_parse_md5_keys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)  *	TCP over IPv4 via INET6 API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) static const struct inet_connection_sock_af_ops ipv6_mapped = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	.queue_xmit	   = ip_queue_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	.send_check	   = tcp_v4_send_check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	.rebuild_header	   = inet_sk_rebuild_header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	.conn_request	   = tcp_v6_conn_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	.net_header_len	   = sizeof(struct iphdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	.setsockopt	   = ipv6_setsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	.getsockopt	   = ipv6_getsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	.mtu_reduced	   = tcp_v4_mtu_reduced,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	.md5_lookup	=	tcp_v4_md5_lookup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	.md5_parse	=	tcp_v6_parse_md5_keys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) /* NOTE: A lot of things set to zero explicitly by call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)  *       sk_alloc() so need not be done here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) static int tcp_v6_init_sock(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 	struct inet_connection_sock *icsk = inet_csk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	tcp_init_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	icsk->icsk_af_ops = &ipv6_specific;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) static void tcp_v6_destroy_sock(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	tcp_v4_destroy_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	inet6_destroy_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) /* Proc filesystem TCPv6 sock list dumping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) static void get_openreq6(struct seq_file *seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 			 const struct request_sock *req, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	long ttd = req->rsk_timer.expires - jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	if (ttd < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 		ttd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	seq_printf(seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 		   i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 		   src->s6_addr32[0], src->s6_addr32[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 		   src->s6_addr32[2], src->s6_addr32[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 		   inet_rsk(req)->ir_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 		   dest->s6_addr32[0], dest->s6_addr32[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 		   dest->s6_addr32[2], dest->s6_addr32[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 		   ntohs(inet_rsk(req)->ir_rmt_port),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 		   TCP_SYN_RECV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 		   0, 0, /* could print option size, but that is af dependent. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 		   1,   /* timers active (only the expire timer) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 		   jiffies_to_clock_t(ttd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 		   req->num_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 		   from_kuid_munged(seq_user_ns(seq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 				    sock_i_uid(req->rsk_listener)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 		   0,  /* non standard timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 		   0, /* open_requests have no inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 		   0, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	const struct in6_addr *dest, *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	__u16 destp, srcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	int timer_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	unsigned long timer_expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	const struct inet_sock *inet = inet_sk(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	const struct tcp_sock *tp = tcp_sk(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 	const struct inet_connection_sock *icsk = inet_csk(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	int rx_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 	dest  = &sp->sk_v6_daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	src   = &sp->sk_v6_rcv_saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	destp = ntohs(inet->inet_dport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	srcp  = ntohs(inet->inet_sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	    icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 		timer_active	= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 		timer_expires	= icsk->icsk_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 		timer_active	= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 		timer_expires	= icsk->icsk_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	} else if (timer_pending(&sp->sk_timer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 		timer_active	= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 		timer_expires	= sp->sk_timer.expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 		timer_active	= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 		timer_expires = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	state = inet_sk_state_load(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 	if (state == TCP_LISTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 		rx_queue = READ_ONCE(sp->sk_ack_backlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 		/* Because we don't lock the socket,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 		 * we might find a transient negative value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 		rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 				      READ_ONCE(tp->copied_seq), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	seq_printf(seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 		   i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 		   src->s6_addr32[0], src->s6_addr32[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 		   dest->s6_addr32[0], dest->s6_addr32[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 		   state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 		   READ_ONCE(tp->write_seq) - tp->snd_una,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 		   rx_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 		   timer_active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 		   jiffies_delta_to_clock_t(timer_expires - jiffies),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 		   icsk->icsk_retransmits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 		   icsk->icsk_probes_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 		   sock_i_ino(sp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 		   refcount_read(&sp->sk_refcnt), sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 		   jiffies_to_clock_t(icsk->icsk_rto),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 		   jiffies_to_clock_t(icsk->icsk_ack.ato),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 		   (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 		   tp->snd_cwnd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 		   state == TCP_LISTEN ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 			fastopenq->max_qlen :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 		   );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) static void get_timewait6_sock(struct seq_file *seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 			       struct inet_timewait_sock *tw, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 	long delta = tw->tw_timer.expires - jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	const struct in6_addr *dest, *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 	__u16 destp, srcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	dest = &tw->tw_v6_daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	src  = &tw->tw_v6_rcv_saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	destp = ntohs(tw->tw_dport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	srcp  = ntohs(tw->tw_sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	seq_printf(seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 		   i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 		   src->s6_addr32[0], src->s6_addr32[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 		   dest->s6_addr32[0], dest->s6_addr32[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 		   tw->tw_substate, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 		   refcount_read(&tw->tw_refcnt), tw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) static int tcp6_seq_show(struct seq_file *seq, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	struct tcp_iter_state *st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	struct sock *sk = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	if (v == SEQ_START_TOKEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 		seq_puts(seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 			 "  sl  "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 			 "local_address                         "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 			 "remote_address                        "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 			 "st tx_queue rx_queue tr tm->when retrnsmt"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 			 "   uid  timeout inode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	st = seq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	if (sk->sk_state == TCP_TIME_WAIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 		get_timewait6_sock(seq, v, st->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	else if (sk->sk_state == TCP_NEW_SYN_RECV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 		get_openreq6(seq, v, st->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 		get_tcp6_sock(seq, v, st->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) static const struct seq_operations tcp6_seq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	.show		= tcp6_seq_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 	.start		= tcp_seq_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	.next		= tcp_seq_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 	.stop		= tcp_seq_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) static struct tcp_seq_afinfo tcp6_seq_afinfo = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	.family		= AF_INET6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) int __net_init tcp6_proc_init(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 			sizeof(struct tcp_iter_state), &tcp6_seq_afinfo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) void tcp6_proc_exit(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	remove_proc_entry("tcp6", net->proc_net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) struct proto tcpv6_prot = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	.name			= "TCPv6",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	.owner			= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	.close			= tcp_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	.pre_connect		= tcp_v6_pre_connect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 	.connect		= tcp_v6_connect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	.disconnect		= tcp_disconnect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 	.accept			= inet_csk_accept,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	.ioctl			= tcp_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 	.init			= tcp_v6_init_sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	.destroy		= tcp_v6_destroy_sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 	.shutdown		= tcp_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	.setsockopt		= tcp_setsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 	.getsockopt		= tcp_getsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 	.keepalive		= tcp_set_keepalive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 	.recvmsg		= tcp_recvmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 	.sendmsg		= tcp_sendmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 	.sendpage		= tcp_sendpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	.backlog_rcv		= tcp_v6_do_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 	.release_cb		= tcp_release_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 	.hash			= inet6_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 	.unhash			= inet_unhash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	.get_port		= inet_csk_get_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	.enter_memory_pressure	= tcp_enter_memory_pressure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	.leave_memory_pressure	= tcp_leave_memory_pressure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	.stream_memory_free	= tcp_stream_memory_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	.sockets_allocated	= &tcp_sockets_allocated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	.memory_allocated	= &tcp_memory_allocated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	.memory_pressure	= &tcp_memory_pressure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	.orphan_count		= &tcp_orphan_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 	.sysctl_mem		= sysctl_tcp_mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_rmem),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	.max_header		= MAX_TCP_HEADER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	.obj_size		= sizeof(struct tcp6_sock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 	.slab_flags		= SLAB_TYPESAFE_BY_RCU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	.twsk_prot		= &tcp6_timewait_sock_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	.rsk_prot		= &tcp6_request_sock_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	.h.hashinfo		= &tcp_hashinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	.no_autobind		= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	.diag_destroy		= tcp_abort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) EXPORT_SYMBOL_GPL(tcpv6_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) /* thinking of making this const? Don't.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)  * early_demux can change based on sysctl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) static struct inet6_protocol tcpv6_protocol = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 	.early_demux	=	tcp_v6_early_demux,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 	.early_demux_handler =  tcp_v6_early_demux,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 	.handler	=	tcp_v6_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 	.err_handler	=	tcp_v6_err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) static struct inet_protosw tcpv6_protosw = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	.type		=	SOCK_STREAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 	.protocol	=	IPPROTO_TCP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	.prot		=	&tcpv6_prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 	.ops		=	&inet6_stream_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 	.flags		=	INET_PROTOSW_PERMANENT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 				INET_PROTOSW_ICSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) static int __net_init tcpv6_net_init(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 				    SOCK_RAW, IPPROTO_TCP, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) static void __net_exit tcpv6_net_exit(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 	inet_twsk_purge(&tcp_hashinfo, AF_INET6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) static struct pernet_operations tcpv6_net_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 	.init	    = tcpv6_net_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 	.exit	    = tcpv6_net_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 	.exit_batch = tcpv6_net_exit_batch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) int __init tcpv6_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 	/* register inet6 protocol */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 	ret = inet6_register_protosw(&tcpv6_protosw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 		goto out_tcpv6_protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	ret = register_pernet_subsys(&tcpv6_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 		goto out_tcpv6_protosw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 	ret = mptcpv6_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 		goto out_tcpv6_pernet_subsys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) out_tcpv6_pernet_subsys:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	unregister_pernet_subsys(&tcpv6_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) out_tcpv6_protosw:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 	inet6_unregister_protosw(&tcpv6_protosw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) out_tcpv6_protocol:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) void tcpv6_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 	unregister_pernet_subsys(&tcpv6_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	inet6_unregister_protosw(&tcpv6_protosw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) }