Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * INET		An implementation of the TCP/IP protocol suite for the LINUX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *		operating system.  INET is implemented using the  BSD Socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *		interface as the means of communication with the user level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *		Definitions for the AF_INET socket handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Version:	@(#)sock.h	1.0.4	05/13/93
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * Authors:	Ross Biro
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *		Florian La Roche <flla@stud.uni-sb.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * Fixes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *		Alan Cox	:	Volatiles in skbuff pointers. See
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  *					skbuff comments. May be overdone,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *					better to prove they can be removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  *					than the reverse.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  *		Alan Cox	:	Added a zapped field for tcp to note
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  *					a socket is reset and must stay shut up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  *		Alan Cox	:	New fields for options
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  *	Pauline Middelink	:	identd support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  *		Alan Cox	:	Eliminate low level recv/recvfrom
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  *		David S. Miller	:	New socket lookup architecture.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  *              Steve Whitehouse:       Default routines for sock_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  *              Arnaldo C. Melo :	removed net_pinfo, tp_pinfo and made
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  *              			protinfo be just a void pointer, as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  *              			protocol specific parts were moved to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  *              			respective headers and ipv4/v6, etc now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  *              			use private slabcaches for its socks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  *              Pedro Hortas	:	New flags field for socket options
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #ifndef _SOCK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #define _SOCK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <linux/list_nulls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <linux/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include <linux/lockdep.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <linux/skbuff.h>	/* struct sk_buff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include <linux/security.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include <linux/page_counter.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #include <linux/memcontrol.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #include <linux/static_key.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #include <linux/cgroup-defs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #include <linux/rbtree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #include <linux/filter.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #include <linux/rculist_nulls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #include <linux/sockptr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #include <linux/refcount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #include <net/dst.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #include <net/checksum.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #include <net/tcp_states.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #include <linux/net_tstamp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #include <net/l3mdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #include <linux/android_kabi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #include <linux/android_vendor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75)  * This structure really needs to be cleaned up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76)  * Most of it is for TCP, and not used by any of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77)  * the other protocols.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) /* Define this to get the SOCK_DBG debugging facility. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #define SOCK_DEBUGGING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #ifdef SOCK_DEBUGGING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 					printk(KERN_DEBUG msg); } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) /* Validate arguments and do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) static inline __printf(2, 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) void SOCK_DEBUG(const struct sock *sk, const char *msg, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) /* This is the per-socket lock.  The spinlock provides a synchronization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94)  * between user contexts and software interrupt processing, whereas the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95)  * mini-semaphore synchronizes multiple users amongst themselves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) typedef struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	spinlock_t		slock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	int			owned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	wait_queue_head_t	wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	 * We express the mutex-alike socket_lock semantics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	 * to the lock validator by explicitly managing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	 * the slock as a lock variant (in addition to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	 * the slock itself):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) #ifdef CONFIG_DEBUG_LOCK_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	struct lockdep_map dep_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) } socket_lock_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) struct sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) struct proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) struct net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) typedef __u32 __bitwise __portpair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) typedef __u64 __bitwise __addrpair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120)  *	struct sock_common - minimal network layer representation of sockets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121)  *	@skc_daddr: Foreign IPv4 addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122)  *	@skc_rcv_saddr: Bound local IPv4 addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123)  *	@skc_addrpair: 8-byte-aligned __u64 union of @skc_daddr & @skc_rcv_saddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124)  *	@skc_hash: hash value used with various protocol lookup tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125)  *	@skc_u16hashes: two u16 hash values used by UDP lookup tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126)  *	@skc_dport: placeholder for inet_dport/tw_dport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127)  *	@skc_num: placeholder for inet_num/tw_num
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128)  *	@skc_portpair: __u32 union of @skc_dport & @skc_num
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129)  *	@skc_family: network address family
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130)  *	@skc_state: Connection state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131)  *	@skc_reuse: %SO_REUSEADDR setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132)  *	@skc_reuseport: %SO_REUSEPORT setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133)  *	@skc_ipv6only: socket is IPV6 only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134)  *	@skc_net_refcnt: socket is using net ref counting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135)  *	@skc_bound_dev_if: bound device index if != 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136)  *	@skc_bind_node: bind hash linkage for various protocol lookup tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137)  *	@skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138)  *	@skc_prot: protocol handlers inside a network family
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139)  *	@skc_net: reference to the network namespace of this socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140)  *	@skc_v6_daddr: IPV6 destination address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141)  *	@skc_v6_rcv_saddr: IPV6 source address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142)  *	@skc_cookie: socket's cookie value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143)  *	@skc_node: main hash linkage for various protocol lookup tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144)  *	@skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145)  *	@skc_tx_queue_mapping: tx queue number for this connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146)  *	@skc_rx_queue_mapping: rx queue number for this connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147)  *	@skc_flags: place holder for sk_flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148)  *		%SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149)  *		%SO_OOBINLINE settings, %SO_TIMESTAMPING settings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150)  *	@skc_listener: connection request listener socket (aka rsk_listener)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151)  *		[union with @skc_flags]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152)  *	@skc_tw_dr: (aka tw_dr) ptr to &struct inet_timewait_death_row
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153)  *		[union with @skc_flags]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154)  *	@skc_incoming_cpu: record/match cpu processing incoming packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155)  *	@skc_rcv_wnd: (aka rsk_rcv_wnd) TCP receive window size (possibly scaled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156)  *		[union with @skc_incoming_cpu]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157)  *	@skc_tw_rcv_nxt: (aka tw_rcv_nxt) TCP window next expected seq number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158)  *		[union with @skc_incoming_cpu]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159)  *	@skc_refcnt: reference count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161)  *	This is the minimal network layer representation of sockets, the header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162)  *	for struct sock and struct inet_timewait_sock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) struct sock_common {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	/* skc_daddr and skc_rcv_saddr must be grouped on a 8 bytes aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	 * address on 64bit arches : cf INET_MATCH()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 		__addrpair	skc_addrpair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 		struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 			__be32	skc_daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 			__be32	skc_rcv_saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	union  {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 		unsigned int	skc_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		__u16		skc_u16hashes[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	/* skc_dport && skc_num must be grouped as well */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 		__portpair	skc_portpair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 			__be16	skc_dport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 			__u16	skc_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	unsigned short		skc_family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	volatile unsigned char	skc_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	unsigned char		skc_reuse:4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	unsigned char		skc_reuseport:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	unsigned char		skc_ipv6only:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	unsigned char		skc_net_refcnt:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	int			skc_bound_dev_if;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 		struct hlist_node	skc_bind_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		struct hlist_node	skc_portaddr_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	struct proto		*skc_prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	possible_net_t		skc_net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	struct in6_addr		skc_v6_daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	struct in6_addr		skc_v6_rcv_saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	atomic64_t		skc_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	/* following fields are padding to force
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	 * offset(struct sock, sk_refcnt) == 128 on 64bit arches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	 * assuming IPV6 is enabled. We use this padding differently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	 * for different kind of 'sockets'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 		unsigned long	skc_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		struct sock	*skc_listener; /* request_sock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		struct inet_timewait_death_row *skc_tw_dr; /* inet_timewait_sock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	 * fields between dontcopy_begin/dontcopy_end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	 * are not copied in sock_copy()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	/* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	int			skc_dontcopy_begin[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	/* public: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		struct hlist_node	skc_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		struct hlist_nulls_node skc_nulls_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	unsigned short		skc_tx_queue_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) #ifdef CONFIG_XPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	unsigned short		skc_rx_queue_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		int		skc_incoming_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		u32		skc_rcv_wnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		u32		skc_tw_rcv_nxt; /* struct tcp_timewait_sock  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	refcount_t		skc_refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	/* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	int                     skc_dontcopy_end[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		u32		skc_rxhash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		u32		skc_window_clamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		u32		skc_tw_snd_nxt; /* struct tcp_timewait_sock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	/* public: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) struct bpf_local_storage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254)   *	struct sock - network layer representation of sockets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255)   *	@__sk_common: shared layout with inet_timewait_sock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256)   *	@sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257)   *	@sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258)   *	@sk_lock:	synchronizer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259)   *	@sk_kern_sock: True if sock is using kernel lock classes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260)   *	@sk_rcvbuf: size of receive buffer in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261)   *	@sk_wq: sock wait queue and async head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262)   *	@sk_rx_dst: receive input route used by early demux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263)   *	@sk_dst_cache: destination cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264)   *	@sk_dst_pending_confirm: need to confirm neighbour
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265)   *	@sk_policy: flow policy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266)   *	@sk_rx_skb_cache: cache copy of recently accessed RX skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267)   *	@sk_receive_queue: incoming packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268)   *	@sk_wmem_alloc: transmit queue bytes committed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269)   *	@sk_tsq_flags: TCP Small Queues flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270)   *	@sk_write_queue: Packet sending queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271)   *	@sk_omem_alloc: "o" is "option" or "other"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272)   *	@sk_wmem_queued: persistent queue size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273)   *	@sk_forward_alloc: space allocated forward
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274)   *	@sk_napi_id: id of the last napi context to receive data for sk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275)   *	@sk_ll_usec: usecs to busypoll when there is no data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276)   *	@sk_allocation: allocation mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277)   *	@sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278)   *	@sk_pacing_status: Pacing status (requested, handled by sch_fq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279)   *	@sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280)   *	@sk_sndbuf: size of send buffer in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281)   *	@__sk_flags_offset: empty field used to determine location of bitfield
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282)   *	@sk_padding: unused element for alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283)   *	@sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284)   *	@sk_no_check_rx: allow zero checksum in RX packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285)   *	@sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286)   *	@sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287)   *	@sk_route_forced_caps: static, forced route capabilities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288)   *		(set in tcp_init_sock())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289)   *	@sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290)   *	@sk_gso_max_size: Maximum GSO segment size to build
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291)   *	@sk_gso_max_segs: Maximum number of GSO segments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292)   *	@sk_pacing_shift: scaling factor for TCP Small Queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293)   *	@sk_lingertime: %SO_LINGER l_linger setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294)   *	@sk_backlog: always used with the per-socket spinlock held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295)   *	@sk_callback_lock: used with the callbacks in the end of this struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296)   *	@sk_error_queue: rarely used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297)   *	@sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298)   *			  IPV6_ADDRFORM for instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299)   *	@sk_err: last error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300)   *	@sk_err_soft: errors that don't cause failure but are the cause of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301)   *		      persistent failure not just 'timed out'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302)   *	@sk_drops: raw/udp drops counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303)   *	@sk_ack_backlog: current listen backlog
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304)   *	@sk_max_ack_backlog: listen backlog set in listen()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305)   *	@sk_uid: user id of owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306)   *	@sk_priority: %SO_PRIORITY setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307)   *	@sk_type: socket type (%SOCK_STREAM, etc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308)   *	@sk_protocol: which protocol this socket belongs in this network family
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309)   *	@sk_peer_pid: &struct pid for this socket's peer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310)   *	@sk_peer_cred: %SO_PEERCRED setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311)   *	@sk_rcvlowat: %SO_RCVLOWAT setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312)   *	@sk_rcvtimeo: %SO_RCVTIMEO setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313)   *	@sk_sndtimeo: %SO_SNDTIMEO setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314)   *	@sk_txhash: computed flow hash for use on transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315)   *	@sk_filter: socket filtering instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316)   *	@sk_timer: sock cleanup timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317)   *	@sk_stamp: time stamp of last packet received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318)   *	@sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319)   *	@sk_tsflags: SO_TIMESTAMPING socket options
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320)   *	@sk_tskey: counter to disambiguate concurrent tstamp requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321)   *	@sk_zckey: counter to order MSG_ZEROCOPY notifications
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322)   *	@sk_socket: Identd and reporting IO signals
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323)   *	@sk_user_data: RPC layer private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324)   *	@sk_frag: cached page frag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325)   *	@sk_peek_off: current peek_offset value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326)   *	@sk_send_head: front of stuff to transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327)   *	@tcp_rtx_queue: TCP re-transmit queue [union with @sk_send_head]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328)   *	@sk_tx_skb_cache: cache copy of recently accessed TX skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329)   *	@sk_security: used by security modules
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330)   *	@sk_mark: generic packet mark
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331)   *	@sk_cgrp_data: cgroup data for this cgroup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332)   *	@sk_memcg: this socket's memory cgroup association
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333)   *	@sk_write_pending: a write to stream socket waits to start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334)   *	@sk_state_change: callback to indicate change in the state of the sock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335)   *	@sk_data_ready: callback to indicate there is data to be processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336)   *	@sk_write_space: callback to indicate there is bf sending space available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337)   *	@sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338)   *	@sk_backlog_rcv: callback to process the backlog
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339)   *	@sk_validate_xmit_skb: ptr to an optional validate function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340)   *	@sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341)   *	@sk_reuseport_cb: reuseport group container
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342)   *	@sk_bpf_storage: ptr to cache and control for bpf_sk_storage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343)   *	@sk_rcu: used during RCU grace period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344)   *	@sk_clockid: clockid used by time-based scheduling (SO_TXTIME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345)   *	@sk_txtime_deadline_mode: set deadline mode for SO_TXTIME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346)   *	@sk_txtime_report_errors: set report errors mode for SO_TXTIME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347)   *	@sk_txtime_unused: unused txtime flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348)   */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) struct sock {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	 * Now struct inet_timewait_sock also uses sock_common, so please just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	 * don't add nothing before this first member (__sk_common) --acme
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	struct sock_common	__sk_common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) #define sk_node			__sk_common.skc_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) #define sk_nulls_node		__sk_common.skc_nulls_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) #define sk_refcnt		__sk_common.skc_refcnt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) #define sk_tx_queue_mapping	__sk_common.skc_tx_queue_mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) #ifdef CONFIG_XPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) #define sk_rx_queue_mapping	__sk_common.skc_rx_queue_mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) #define sk_dontcopy_begin	__sk_common.skc_dontcopy_begin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) #define sk_dontcopy_end		__sk_common.skc_dontcopy_end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) #define sk_hash			__sk_common.skc_hash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) #define sk_portpair		__sk_common.skc_portpair
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) #define sk_num			__sk_common.skc_num
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) #define sk_dport		__sk_common.skc_dport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) #define sk_addrpair		__sk_common.skc_addrpair
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) #define sk_daddr		__sk_common.skc_daddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) #define sk_rcv_saddr		__sk_common.skc_rcv_saddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) #define sk_family		__sk_common.skc_family
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) #define sk_state		__sk_common.skc_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) #define sk_reuse		__sk_common.skc_reuse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) #define sk_reuseport		__sk_common.skc_reuseport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) #define sk_ipv6only		__sk_common.skc_ipv6only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) #define sk_net_refcnt		__sk_common.skc_net_refcnt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) #define sk_bound_dev_if		__sk_common.skc_bound_dev_if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) #define sk_bind_node		__sk_common.skc_bind_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) #define sk_prot			__sk_common.skc_prot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) #define sk_net			__sk_common.skc_net
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) #define sk_v6_daddr		__sk_common.skc_v6_daddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) #define sk_v6_rcv_saddr	__sk_common.skc_v6_rcv_saddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) #define sk_cookie		__sk_common.skc_cookie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) #define sk_incoming_cpu		__sk_common.skc_incoming_cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) #define sk_flags		__sk_common.skc_flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) #define sk_rxhash		__sk_common.skc_rxhash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	socket_lock_t		sk_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	atomic_t		sk_drops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	int			sk_rcvlowat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	struct sk_buff_head	sk_error_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	struct sk_buff		*sk_rx_skb_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	struct sk_buff_head	sk_receive_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	 * The backlog queue is special, it is always used with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	 * the per-socket spinlock held and requires low latency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	 * access. Therefore we special case it's implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	 * Note : rmem_alloc is in this structure to fill a hole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	 * on 64bit arches, not because its logically part of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	 * backlog.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		atomic_t	rmem_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		int		len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		struct sk_buff	*head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		struct sk_buff	*tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	} sk_backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) #define sk_rmem_alloc sk_backlog.rmem_alloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	int			sk_forward_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) #ifdef CONFIG_NET_RX_BUSY_POLL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	unsigned int		sk_ll_usec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	/* ===== mostly read cache line ===== */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	unsigned int		sk_napi_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	int			sk_rcvbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	struct sk_filter __rcu	*sk_filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		struct socket_wq __rcu	*sk_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		/* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		struct socket_wq	*sk_wq_raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		/* public: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) #ifdef CONFIG_XFRM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	struct xfrm_policy __rcu *sk_policy[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	struct dst_entry	*sk_rx_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	struct dst_entry __rcu	*sk_dst_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	atomic_t		sk_omem_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	int			sk_sndbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	/* ===== cache line for TX ===== */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	int			sk_wmem_queued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	refcount_t		sk_wmem_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	unsigned long		sk_tsq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		struct sk_buff	*sk_send_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		struct rb_root	tcp_rtx_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	struct sk_buff		*sk_tx_skb_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	struct sk_buff_head	sk_write_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	__s32			sk_peek_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	int			sk_write_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	__u32			sk_dst_pending_confirm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	u32			sk_pacing_status; /* see enum sk_pacing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	long			sk_sndtimeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	struct timer_list	sk_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	__u32			sk_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	__u32			sk_mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	unsigned long		sk_pacing_rate; /* bytes per second */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	unsigned long		sk_max_pacing_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	struct page_frag	sk_frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	netdev_features_t	sk_route_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	netdev_features_t	sk_route_nocaps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	netdev_features_t	sk_route_forced_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	int			sk_gso_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	unsigned int		sk_gso_max_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	gfp_t			sk_allocation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	__u32			sk_txhash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	 * Because of non atomicity rules, all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	 * changes are protected by socket lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	u8			sk_padding : 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 				sk_kern_sock : 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 				sk_no_check_tx : 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 				sk_no_check_rx : 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 				sk_userlocks : 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	u8			sk_pacing_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	u16			sk_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	u16			sk_protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	u16			sk_gso_max_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	unsigned long	        sk_lingertime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	struct proto		*sk_prot_creator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	rwlock_t		sk_callback_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	int			sk_err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 				sk_err_soft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	u32			sk_ack_backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	u32			sk_max_ack_backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	kuid_t			sk_uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) #if IS_ENABLED(CONFIG_DEBUG_SPINLOCK) || IS_ENABLED(CONFIG_DEBUG_LOCK_ALLOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	spinlock_t		sk_peer_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	/* sk_peer_lock is in the ANDROID_KABI_RESERVE(1) field below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	struct pid		*sk_peer_pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	const struct cred	*sk_peer_cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	long			sk_rcvtimeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	ktime_t			sk_stamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) #if BITS_PER_LONG==32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	seqlock_t		sk_stamp_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	u16			sk_tsflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	u8			sk_shutdown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	u32			sk_tskey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	atomic_t		sk_zckey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	u8			sk_clockid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	u8			sk_txtime_deadline_mode : 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 				sk_txtime_report_errors : 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 				sk_txtime_unused : 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	struct socket		*sk_socket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	void			*sk_user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) #ifdef CONFIG_SECURITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	void			*sk_security;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	struct sock_cgroup_data	sk_cgrp_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	struct mem_cgroup	*sk_memcg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	void			(*sk_state_change)(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	void			(*sk_data_ready)(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	void			(*sk_write_space)(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	void			(*sk_error_report)(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	int			(*sk_backlog_rcv)(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 						  struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) #ifdef CONFIG_SOCK_VALIDATE_XMIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	struct sk_buff*		(*sk_validate_xmit_skb)(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 							struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 							struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	void                    (*sk_destruct)(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	struct sock_reuseport __rcu	*sk_reuseport_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) #ifdef CONFIG_BPF_SYSCALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	struct bpf_local_storage __rcu	*sk_bpf_storage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	struct rcu_head		sk_rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) #if IS_ENABLED(CONFIG_DEBUG_SPINLOCK) || IS_ENABLED(CONFIG_DEBUG_LOCK_ALLOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	ANDROID_KABI_RESERVE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	ANDROID_KABI_USE(1, spinlock_t sk_peer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	ANDROID_KABI_RESERVE(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	ANDROID_KABI_RESERVE(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	ANDROID_KABI_RESERVE(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	ANDROID_KABI_RESERVE(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	ANDROID_KABI_RESERVE(6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	ANDROID_KABI_RESERVE(7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	ANDROID_KABI_RESERVE(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	ANDROID_OEM_DATA(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) enum sk_pacing {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	SK_PACING_NONE		= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	SK_PACING_NEEDED	= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	SK_PACING_FQ		= 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) /* Pointer stored in sk_user_data might not be suitable for copying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555)  * when cloning the socket. For instance, it can point to a reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556)  * counted object. sk_user_data bottom bit is set if pointer must not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557)  * be copied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) #define SK_USER_DATA_NOCOPY	1UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) #define SK_USER_DATA_BPF	2UL	/* Managed by BPF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) #define SK_USER_DATA_PTRMASK	~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564)  * sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565)  * @sk: socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) static inline bool sk_user_data_is_nocopy(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	return ((uintptr_t)sk->sk_user_data & SK_USER_DATA_NOCOPY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) #define rcu_dereference_sk_user_data(sk)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) ({									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	void *__tmp = rcu_dereference(__sk_user_data((sk)));		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	(void *)((uintptr_t)__tmp & SK_USER_DATA_PTRMASK);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) #define rcu_assign_sk_user_data(sk, ptr)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) ({									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	uintptr_t __tmp = (uintptr_t)(ptr);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	rcu_assign_pointer(__sk_user_data((sk)), __tmp);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) #define rcu_assign_sk_user_data_nocopy(sk, ptr)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) ({									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	uintptr_t __tmp = (uintptr_t)(ptr);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	rcu_assign_pointer(__sk_user_data((sk)),			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 			   __tmp | SK_USER_DATA_NOCOPY);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594)  * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595)  * or not whether his port will be reused by someone else. SK_FORCE_REUSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596)  * on a socket means that the socket will reuse everybody else's port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597)  * without looking at the other's sk_reuse value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) #define SK_NO_REUSE	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) #define SK_CAN_REUSE	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) #define SK_FORCE_REUSE	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) int sk_set_peek_off(struct sock *sk, int val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) static inline int sk_peek_offset(struct sock *sk, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	if (unlikely(flags & MSG_PEEK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		return READ_ONCE(sk->sk_peek_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) static inline void sk_peek_offset_bwd(struct sock *sk, int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	s32 off = READ_ONCE(sk->sk_peek_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	if (unlikely(off >= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		off = max_t(s32, off - val, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		WRITE_ONCE(sk->sk_peek_off, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) static inline void sk_peek_offset_fwd(struct sock *sk, int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	sk_peek_offset_bwd(sk, -val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631)  * Hashed lists helper routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) static inline struct sock *sk_entry(const struct hlist_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	return hlist_entry(node, struct sock, sk_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) static inline struct sock *__sk_head(const struct hlist_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	return hlist_entry(head->first, struct sock, sk_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) static inline struct sock *sk_head(const struct hlist_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	return hlist_empty(head) ? NULL : __sk_head(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	return hlist_nulls_entry(head->first, struct sock, sk_nulls_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) static inline struct sock *sk_next(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	return hlist_entry_safe(sk->sk_node.next, struct sock, sk_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) static inline struct sock *sk_nulls_next(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	return (!is_a_nulls(sk->sk_nulls_node.next)) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		hlist_nulls_entry(sk->sk_nulls_node.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 				  struct sock, sk_nulls_node) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) static inline bool sk_unhashed(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	return hlist_unhashed(&sk->sk_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) static inline bool sk_hashed(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	return !sk_unhashed(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) static inline void sk_node_init(struct hlist_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	node->pprev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) static inline void sk_nulls_node_init(struct hlist_nulls_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	node->pprev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) static inline void __sk_del_node(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	__hlist_del(&sk->sk_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) /* NB: equivalent to hlist_del_init_rcu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) static inline bool __sk_del_node_init(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	if (sk_hashed(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		__sk_del_node(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		sk_node_init(&sk->sk_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) /* Grab socket reference count. This operation is valid only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708)    when sk is ALREADY grabbed f.e. it is found in hash table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709)    or a list and the lookup is made under lock preventing hash table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710)    modifications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) static __always_inline void sock_hold(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	refcount_inc(&sk->sk_refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) /* Ungrab socket in the context, which assumes that socket refcnt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719)    cannot hit zero, f.e. it is true in context of any socketcall.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) static __always_inline void __sock_put(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	refcount_dec(&sk->sk_refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) static inline bool sk_del_node_init(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	bool rc = __sk_del_node_init(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		/* paranoid for a while -acme */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		__sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) #define sk_del_node_init_rcu(sk)	sk_del_node_init(sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	if (sk_hashed(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) static inline bool sk_nulls_del_node_init_rcu(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	bool rc = __sk_nulls_del_node_init_rcu(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		/* paranoid for a while -acme */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		__sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) static inline void __sk_add_node(struct sock *sk, struct hlist_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	hlist_add_head(&sk->sk_node, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) static inline void sk_add_node(struct sock *sk, struct hlist_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	sock_hold(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	__sk_add_node(sk, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	sock_hold(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	    sk->sk_family == AF_INET6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		hlist_add_tail_rcu(&sk->sk_node, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		hlist_add_head_rcu(&sk->sk_node, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	sock_hold(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	hlist_add_tail_rcu(&sk->sk_node, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	sock_hold(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	__sk_nulls_add_node_rcu(sk, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) static inline void __sk_del_bind_node(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	__hlist_del(&sk->sk_bind_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) static inline void sk_add_bind_node(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 					struct hlist_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	hlist_add_head(&sk->sk_bind_node, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) #define sk_for_each(__sk, list) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	hlist_for_each_entry(__sk, list, sk_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) #define sk_for_each_rcu(__sk, list) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	hlist_for_each_entry_rcu(__sk, list, sk_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) #define sk_nulls_for_each(__sk, node, list) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) #define sk_nulls_for_each_rcu(__sk, node, list) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) #define sk_for_each_from(__sk) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	hlist_for_each_entry_from(__sk, sk_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) #define sk_nulls_for_each_from(__sk, node) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) #define sk_for_each_safe(__sk, tmp, list) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) #define sk_for_each_bound(__sk, list) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	hlist_for_each_entry(__sk, list, sk_bind_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833)  * sk_for_each_entry_offset_rcu - iterate over a list at a given struct offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834)  * @tpos:	the type * to use as a loop cursor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835)  * @pos:	the &struct hlist_node to use as a loop cursor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836)  * @head:	the head for your list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837)  * @offset:	offset of hlist_node within the struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) #define sk_for_each_entry_offset_rcu(tpos, pos, head, offset)		       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	for (pos = rcu_dereference(hlist_first_rcu(head));		       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	     pos != NULL &&						       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		({ tpos = (typeof(*tpos) *)((void *)pos - offset); 1;});       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	     pos = rcu_dereference(hlist_next_rcu(pos)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) static inline struct user_namespace *sk_user_ns(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	/* Careful only use this in a context where these parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	 * can not change and must all be valid, such as recvmsg from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	 * userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	return sk->sk_socket->file->f_cred->user_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) /* Sock flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) enum sock_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	SOCK_DEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	SOCK_DONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	SOCK_URGINLINE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	SOCK_KEEPOPEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	SOCK_LINGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	SOCK_DESTROY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	SOCK_BROADCAST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	SOCK_TIMESTAMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	SOCK_ZAPPED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	SOCK_DBG, /* %SO_DEBUG setting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	SOCK_MEMALLOC, /* VM depends on this socket for swapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	SOCK_TIMESTAMPING_RX_SOFTWARE,  /* %SOF_TIMESTAMPING_RX_SOFTWARE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	SOCK_FASYNC, /* fasync() active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	SOCK_RXQ_OVFL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	SOCK_ZEROCOPY, /* buffers from userspace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	SOCK_WIFI_STATUS, /* push wifi status to userspace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	SOCK_NOFCS, /* Tell NIC not to do the Ethernet FCS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		     * Will use last 4 bytes of packet sent from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		     * user-space instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		     */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	SOCK_TXTIME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	SOCK_XDP, /* XDP is attached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	SOCK_TSTAMP_NEW, /* Indicates 64 bit timestamps always */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	nsk->sk_flags = osk->sk_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	__set_bit(flag, &sk->sk_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	__clear_bit(flag, &sk->sk_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) static inline void sock_valbool_flag(struct sock *sk, enum sock_flags bit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 				     int valbool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	if (valbool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		sock_set_flag(sk, bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		sock_reset_flag(sk, bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) static inline bool sock_flag(const struct sock *sk, enum sock_flags flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	return test_bit(flag, &sk->sk_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) #ifdef CONFIG_NET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) DECLARE_STATIC_KEY_FALSE(memalloc_socks_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) static inline int sk_memalloc_socks(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	return static_branch_unlikely(&memalloc_socks_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) void __receive_sock(struct file *file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) static inline int sk_memalloc_socks(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) static inline void __receive_sock(struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	return gfp_mask | (sk->sk_allocation & __GFP_MEMALLOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) static inline void sk_acceptq_removed(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) static inline void sk_acceptq_added(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) static inline bool sk_acceptq_is_full(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960)  * Compute minimal free write space needed to queue new packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) static inline int sk_stream_min_wspace(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	return READ_ONCE(sk->sk_wmem_queued) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) static inline int sk_stream_wspace(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	return READ_ONCE(sk->sk_sndbuf) - READ_ONCE(sk->sk_wmem_queued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) static inline void sk_wmem_queued_add(struct sock *sk, int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) void sk_stream_write_space(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) /* OOB backlog add */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	/* dont let skb dst not refcounted, we are going to leave rcu lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	skb_dst_force(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	if (!sk->sk_backlog.tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		WRITE_ONCE(sk->sk_backlog.head, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		sk->sk_backlog.tail->next = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	WRITE_ONCE(sk->sk_backlog.tail, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	skb->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995)  * Take into account size of receive queue and backlog queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996)  * Do not take into account this skb truesize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997)  * to allow even a single big packet to come.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	return qsize > limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) /* The per-socket spinlock must be held here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 					      unsigned int limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	if (sk_rcvqueues_full(sk, limit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	 * If the skb was allocated from pfmemalloc reserves, only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	 * allow SOCK_MEMALLOC sockets to use it as this socket is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	 * helping free memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	__sk_add_backlog(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	sk->sk_backlog.len += skb->truesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	if (sk_memalloc_socks() && skb_pfmemalloc(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		return __sk_backlog_rcv(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	return sk->sk_backlog_rcv(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) static inline void sk_incoming_cpu_update(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	int cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		WRITE_ONCE(sk->sk_incoming_cpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) static inline void sock_rps_record_flow_hash(__u32 hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) #ifdef CONFIG_RPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	struct rps_sock_flow_table *sock_flow_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	sock_flow_table = rcu_dereference(rps_sock_flow_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	rps_record_sock_flow(sock_flow_table, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) static inline void sock_rps_record_flow(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) #ifdef CONFIG_RPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	if (static_branch_unlikely(&rfs_needed)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		/* Reading sk->sk_rxhash might incur an expensive cache line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		 * miss.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		 * TCP_ESTABLISHED does cover almost all states where RFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		 * might be useful, and is cheaper [1] than testing :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		 *	IPv4: inet_sk(sk)->inet_daddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		 * 	IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		 * OR	an additional socket flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		 * [1] : sk_state and sk_prot are in the same cache line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		if (sk->sk_state == TCP_ESTABLISHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 			sock_rps_record_flow_hash(sk->sk_rxhash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) static inline void sock_rps_save_rxhash(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 					const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) #ifdef CONFIG_RPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	if (unlikely(sk->sk_rxhash != skb->hash))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		sk->sk_rxhash = skb->hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) static inline void sock_rps_reset_rxhash(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) #ifdef CONFIG_RPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	sk->sk_rxhash = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) #define sk_wait_event(__sk, __timeo, __condition, __wait)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	({	int __rc;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		release_sock(__sk);					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		__rc = __condition;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		if (!__rc) {						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 			*(__timeo) = wait_woken(__wait,			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 						TASK_INTERRUPTIBLE,	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 						*(__timeo));		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		}							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		sched_annotate_sleep();					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		lock_sock(__sk);					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		__rc = __condition;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		__rc;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) void sk_stream_wait_close(struct sock *sk, long timeo_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) int sk_stream_error(struct sock *sk, int flags, int err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) void sk_stream_kill_queues(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) void sk_set_memalloc(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) void sk_clear_memalloc(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) void __sk_flush_backlog(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) static inline bool sk_flush_backlog(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	if (unlikely(READ_ONCE(sk->sk_backlog.tail))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		__sk_flush_backlog(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) struct request_sock_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) struct timewait_sock_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) struct inet_hashinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) struct raw_hashinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) struct smc_hashinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) struct module;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)  * caches using SLAB_TYPESAFE_BY_RCU should let .next pointer from nulls nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)  * un-modified. Special care is taken when initializing object to zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) static inline void sk_prot_clear_nulls(struct sock *sk, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	if (offsetof(struct sock, sk_node.next) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		memset(sk, 0, offsetof(struct sock, sk_node.next));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	memset(&sk->sk_node.pprev, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	       size - offsetof(struct sock, sk_node.pprev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) /* Networking protocol blocks we attach to sockets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)  * socket layer -> transport layer interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) struct proto {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	void			(*close)(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 					long timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	int			(*pre_connect)(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 					struct sockaddr *uaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 					int addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	int			(*connect)(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 					struct sockaddr *uaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 					int addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	int			(*disconnect)(struct sock *sk, int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	struct sock *		(*accept)(struct sock *sk, int flags, int *err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 					  bool kern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	int			(*ioctl)(struct sock *sk, int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 					 unsigned long arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	int			(*init)(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	void			(*destroy)(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	void			(*shutdown)(struct sock *sk, int how);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	int			(*setsockopt)(struct sock *sk, int level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 					int optname, sockptr_t optval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 					unsigned int optlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	int			(*getsockopt)(struct sock *sk, int level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 					int optname, char __user *optval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 					int __user *option);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	void			(*keepalive)(struct sock *sk, int valbool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	int			(*compat_ioctl)(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 					unsigned int cmd, unsigned long arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	int			(*sendmsg)(struct sock *sk, struct msghdr *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 					   size_t len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	int			(*recvmsg)(struct sock *sk, struct msghdr *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 					   size_t len, int noblock, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 					   int *addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	int			(*sendpage)(struct sock *sk, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 					int offset, size_t size, int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	int			(*bind)(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 					struct sockaddr *addr, int addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	int			(*bind_add)(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 					struct sockaddr *addr, int addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	int			(*backlog_rcv) (struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 						struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	void		(*release_cb)(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	/* Keeping track of sk's, looking them up, and port selection methods. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	int			(*hash)(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	void			(*unhash)(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	void			(*rehash)(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	int			(*get_port)(struct sock *sk, unsigned short snum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	/* Keeping track of sockets in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	unsigned int		inuse_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	bool			(*stream_memory_free)(const struct sock *sk, int wake);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	bool			(*stream_memory_read)(const struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	/* Memory pressure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	void			(*enter_memory_pressure)(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	void			(*leave_memory_pressure)(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	atomic_long_t		*memory_allocated;	/* Current allocated memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	struct percpu_counter	*sockets_allocated;	/* Current number of sockets. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	 * Pressure flag: try to collapse.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	 * Technical note: it is used by multiple contexts non atomically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	 * All the __sk_mem_schedule() is of this nature: accounting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	 * is strict, actions are advisory and have some latency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	unsigned long		*memory_pressure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	long			*sysctl_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	int			*sysctl_wmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	int			*sysctl_rmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	u32			sysctl_wmem_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	u32			sysctl_rmem_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	int			max_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	bool			no_autobind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	struct kmem_cache	*slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	unsigned int		obj_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	slab_flags_t		slab_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	unsigned int		useroffset;	/* Usercopy region offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	unsigned int		usersize;	/* Usercopy region size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	struct percpu_counter	*orphan_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	struct request_sock_ops	*rsk_prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	struct timewait_sock_ops *twsk_prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		struct inet_hashinfo	*hashinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		struct udp_table	*udp_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		struct raw_hashinfo	*raw_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 		struct smc_hashinfo	*smc_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	} h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	struct module		*owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	char			name[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	struct list_head	node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) #ifdef SOCK_REFCNT_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	atomic_t		socks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	int			(*diag_destroy)(struct sock *sk, int err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) } __randomize_layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) int proto_register(struct proto *prot, int alloc_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) void proto_unregister(struct proto *prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) int sock_load_diag_module(int family, int protocol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) #ifdef SOCK_REFCNT_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) static inline void sk_refcnt_debug_inc(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	atomic_inc(&sk->sk_prot->socks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) static inline void sk_refcnt_debug_dec(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	atomic_dec(&sk->sk_prot->socks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	       sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) static inline void sk_refcnt_debug_release(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	if (refcount_read(&sk->sk_refcnt) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		       sk->sk_prot->name, sk, refcount_read(&sk->sk_refcnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) #else /* SOCK_REFCNT_DEBUG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) #define sk_refcnt_debug_inc(sk) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) #define sk_refcnt_debug_dec(sk) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) #define sk_refcnt_debug_release(sk) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) #endif /* SOCK_REFCNT_DEBUG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	return sk->sk_prot->stream_memory_free ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 		sk->sk_prot->stream_memory_free(sk, wake) : true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) static inline bool sk_stream_memory_free(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	return __sk_stream_memory_free(sk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) static inline bool __sk_stream_is_writeable(const struct sock *sk, int wake)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	       __sk_stream_memory_free(sk, wake);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) static inline bool sk_stream_is_writeable(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	return __sk_stream_is_writeable(sk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) static inline int sk_under_cgroup_hierarchy(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 					    struct cgroup *ancestor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) #ifdef CONFIG_SOCK_CGROUP_DATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 				    ancestor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) static inline bool sk_has_memory_pressure(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	return sk->sk_prot->memory_pressure != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) static inline bool sk_under_memory_pressure(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	if (!sk->sk_prot->memory_pressure)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	    mem_cgroup_under_socket_pressure(sk->sk_memcg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	return !!*sk->sk_prot->memory_pressure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) static inline long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) sk_memory_allocated(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	return atomic_long_read(sk->sk_prot->memory_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) static inline long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) sk_memory_allocated_add(struct sock *sk, int amt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	return atomic_long_add_return(amt, sk->sk_prot->memory_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) sk_memory_allocated_sub(struct sock *sk, int amt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	atomic_long_sub(amt, sk->sk_prot->memory_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) static inline void sk_sockets_allocated_dec(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	percpu_counter_dec(sk->sk_prot->sockets_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) static inline void sk_sockets_allocated_inc(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	percpu_counter_inc(sk->sk_prot->sockets_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) static inline u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) sk_sockets_allocated_read_positive(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	return percpu_counter_read_positive(sk->sk_prot->sockets_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) proto_sockets_allocated_sum_positive(struct proto *prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	return percpu_counter_sum_positive(prot->sockets_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) static inline long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) proto_memory_allocated(struct proto *prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	return atomic_long_read(prot->memory_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) proto_memory_pressure(struct proto *prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	if (!prot->memory_pressure)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	return !!*prot->memory_pressure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) /* Called with local bh disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) int sock_prot_inuse_get(struct net *net, struct proto *proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) int sock_inuse_get(struct net *net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		int inc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) /* With per-bucket locks this operation is not-atomic, so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)  * this version is not worse.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) static inline int __sk_prot_rehash(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	sk->sk_prot->unhash(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	return sk->sk_prot->hash(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) /* About 10 seconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) #define SOCK_DESTROY_TIME (10*HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) /* Sockets 0-1023 can't be bound to unless you are superuser */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) #define PROT_SOCK	1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) #define SHUTDOWN_MASK	3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) #define RCV_SHUTDOWN	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) #define SEND_SHUTDOWN	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) #define SOCK_SNDBUF_LOCK	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) #define SOCK_RCVBUF_LOCK	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) #define SOCK_BINDADDR_LOCK	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) #define SOCK_BINDPORT_LOCK	8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) struct socket_alloc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	struct socket socket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	struct inode vfs_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) static inline struct socket *SOCKET_I(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) static inline struct inode *SOCK_INODE(struct socket *socket)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)  * Functions for memory accounting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) int __sk_mem_schedule(struct sock *sk, int size, int kind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) void __sk_mem_reduce_allocated(struct sock *sk, int amount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) void __sk_mem_reclaim(struct sock *sk, int amount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) /* We used to have PAGE_SIZE here, but systems with 64KB pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)  * do not necessarily have 16x time more memory than 4KB ones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) #define SK_MEM_QUANTUM 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) #define SK_MEM_SEND	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) #define SK_MEM_RECV	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) /* sysctl_mem values are in pages, we convert them in SK_MEM_QUANTUM units */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) static inline long sk_prot_mem_limits(const struct sock *sk, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	long val = sk->sk_prot->sysctl_mem[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) #if PAGE_SIZE > SK_MEM_QUANTUM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	val <<= PAGE_SHIFT - SK_MEM_QUANTUM_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) #elif PAGE_SIZE < SK_MEM_QUANTUM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	val >>= SK_MEM_QUANTUM_SHIFT - PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) static inline int sk_mem_pages(int amt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) static inline bool sk_has_account(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	/* return true if protocol supports memory accounting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	return !!sk->sk_prot->memory_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) static inline bool sk_wmem_schedule(struct sock *sk, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	if (!sk_has_account(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	return size <= sk->sk_forward_alloc ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 		__sk_mem_schedule(sk, size, SK_MEM_SEND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	if (!sk_has_account(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	return size <= sk->sk_forward_alloc ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 		__sk_mem_schedule(sk, size, SK_MEM_RECV) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 		skb_pfmemalloc(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) static inline void sk_mem_reclaim(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	if (!sk_has_account(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 		__sk_mem_reclaim(sk, sk->sk_forward_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) static inline void sk_mem_reclaim_partial(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	if (!sk_has_account(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 		__sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) static inline void sk_mem_charge(struct sock *sk, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	if (!sk_has_account(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	sk->sk_forward_alloc -= size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) static inline void sk_mem_uncharge(struct sock *sk, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	if (!sk_has_account(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	sk->sk_forward_alloc += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	/* Avoid a possible overflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	 * TCP send queues can make this happen, if sk_mem_reclaim()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	 * is not called and more than 2 GBytes are released at once.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	 * If we reach 2 MBytes, reclaim 1 MBytes right now, there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	 * no need to hold that much forward allocation anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	if (unlikely(sk->sk_forward_alloc >= 1 << 21))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 		__sk_mem_reclaim(sk, 1 << 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	sk_wmem_queued_add(sk, -skb->truesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	sk_mem_uncharge(sk, skb->truesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	    !sk->sk_tx_skb_cache && !skb_cloned(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 		skb_ext_reset(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 		skb_zcopy_clear(skb, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 		sk->sk_tx_skb_cache = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	__kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) static inline void sock_release_ownership(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	if (sk->sk_lock.owned) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 		sk->sk_lock.owned = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 		/* The sk_lock has mutex_unlock() semantics: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 		mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)  * Macro so as to not evaluate some arguments when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)  * lockdep is not enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)  * Mark both the sk_lock and the sk_lock.slock as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)  * per-address-family lock class.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) #define sock_lock_init_class_and_name(sk, sname, skey, name, key)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) do {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	sk->sk_lock.owned = 0;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	init_waitqueue_head(&sk->sk_lock.wq);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	spin_lock_init(&(sk)->sk_lock.slock);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	debug_check_no_locks_freed((void *)&(sk)->sk_lock,		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 			sizeof((sk)->sk_lock));				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	lockdep_set_class_and_name(&(sk)->sk_lock.slock,		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 				(skey), (sname));				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) #ifdef CONFIG_LOCKDEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) static inline bool lockdep_sock_is_held(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	return lockdep_is_held(&sk->sk_lock) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	       lockdep_is_held(&sk->sk_lock.slock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) void lock_sock_nested(struct sock *sk, int subclass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) static inline void lock_sock(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	lock_sock_nested(sk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) void __release_sock(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) void release_sock(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) /* BH context may only use the following locking interface. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) #define bh_lock_sock(__sk)	spin_lock(&((__sk)->sk_lock.slock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) #define bh_lock_sock_nested(__sk) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 				spin_lock_nested(&((__sk)->sk_lock.slock), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 				SINGLE_DEPTH_NESTING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) #define bh_unlock_sock(__sk)	spin_unlock(&((__sk)->sk_lock.slock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) bool lock_sock_fast(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)  * unlock_sock_fast - complement of lock_sock_fast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)  * @sk: socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)  * @slow: slow mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)  * fast unlock socket for user context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)  * If slow mode is on, we call regular release_sock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) static inline void unlock_sock_fast(struct sock *sk, bool slow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	if (slow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 		release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 		spin_unlock_bh(&sk->sk_lock.slock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) /* Used by processes to "lock" a socket state, so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)  * interrupts and bottom half handlers won't change it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)  * from under us. It essentially blocks any incoming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)  * packets, so that we won't get any new data or any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)  * packets that change the state of the socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)  * While locked, BH processing will add new packets to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)  * the backlog queue.  This queue is processed by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)  * owner of the socket lock right before it is released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)  * Since ~2.3.5 it is also exclusive sleep lock serializing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)  * accesses from user process context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) static inline void sock_owned_by_me(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) #ifdef CONFIG_LOCKDEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) static inline bool sock_owned_by_user(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	sock_owned_by_me(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	return sk->sk_lock.owned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) static inline bool sock_owned_by_user_nocheck(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	return sk->sk_lock.owned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) /* no reclassification while locks are held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) static inline bool sock_allow_reclassification(const struct sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	struct sock *sk = (struct sock *)csk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	return !sk->sk_lock.owned && !spin_is_locked(&sk->sk_lock.slock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 		      struct proto *prot, int kern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) void sk_free(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) void sk_destruct(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) void sk_free_unlock_clone(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 			     gfp_t priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) void __sock_wfree(struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) void sock_wfree(struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 			     gfp_t priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) void skb_orphan_partial(struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) void sock_rfree(struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) void sock_efree(struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) #ifdef CONFIG_INET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) void sock_edemux(struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) void sock_pfree(struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) #define sock_edemux sock_efree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) int sock_setsockopt(struct socket *sock, int level, int op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 		    sockptr_t optval, unsigned int optlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) int sock_getsockopt(struct socket *sock, int level, int op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 		    char __user *optval, int __user *optlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) int sock_gettstamp(struct socket *sock, void __user *userstamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 		   bool timeval, bool time32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 				    int noblock, int *errcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 				     unsigned long data_len, int noblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 				     int *errcode, int max_page_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) void sock_kfree_s(struct sock *sk, void *mem, int size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) void sock_kzfree_s(struct sock *sk, void *mem, int size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) void sk_send_sigurg(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) struct sockcm_cookie {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	u64 transmit_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	u32 mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	u16 tsflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) static inline void sockcm_init(struct sockcm_cookie *sockc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 			       const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	*sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 		     struct sockcm_cookie *sockc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 		   struct sockcm_cookie *sockc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)  * Functions to fill in entries in struct proto_ops when a protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)  * does not implement a particular function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) int sock_no_bind(struct socket *, struct sockaddr *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) int sock_no_connect(struct socket *, struct sockaddr *, int, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) int sock_no_socketpair(struct socket *, struct socket *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) int sock_no_accept(struct socket *, struct socket *, int, bool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) int sock_no_getname(struct socket *, struct sockaddr *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) int sock_no_listen(struct socket *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) int sock_no_shutdown(struct socket *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) int sock_no_sendmsg(struct socket *, struct msghdr *, size_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) int sock_no_mmap(struct file *file, struct socket *sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 		 struct vm_area_struct *vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 			 size_t size, int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 				int offset, size_t size, int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)  * Functions to fill in entries in struct proto_ops when a protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)  * uses the inet style.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) int sock_common_getsockopt(struct socket *sock, int level, int optname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 				  char __user *optval, int __user *optlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 			int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) int sock_common_setsockopt(struct socket *sock, int level, int optname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 			   sockptr_t optval, unsigned int optlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) void sk_common_release(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)  *	Default socket callbacks and setup code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) /* Initialise core socket variables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) void sock_init_data(struct socket *sock, struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)  * Socket reference counting postulates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)  * * Each user of socket SHOULD hold a reference count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)  * * Each access point to socket (an hash table bucket, reference from a list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)  *   running timer, skb in flight MUST hold a reference count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)  * * When reference count hits 0, it means it will never increase back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)  * * When reference count hits 0, it means that no references from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)  *   outside exist to this socket and current process on current CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)  *   is last user and may/should destroy this socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)  * * sk_free is called from any context: process, BH, IRQ. When
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)  *   it is called, socket has no references from outside -> sk_free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)  *   may release descendant resources allocated by the socket, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)  *   to the time when it is called, socket is NOT referenced by any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)  *   hash tables, lists etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)  * * Packets, delivered from outside (from network or from another process)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)  *   and enqueued on receive/error queues SHOULD NOT grab reference count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)  *   when they sit in queue. Otherwise, packets will leak to hole, when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)  *   socket is looked up by one cpu and unhasing is made by another CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)  *   It is true for udp/raw, netlink (leak to receive and error queues), tcp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)  *   (leak to backlog). Packet socket does all the processing inside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)  *   BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)  *   use separate SMP lock, so that they are prone too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) /* Ungrab socket and destroy it, if it was the last reference. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) static inline void sock_put(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	if (refcount_dec_and_test(&sk->sk_refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 		sk_free(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) /* Generic version of sock_put(), dealing with all sockets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)  * (TCP_TIMEWAIT, TCP_NEW_SYN_RECV, ESTABLISHED...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) void sock_gen_put(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 		     unsigned int trim_cap, bool refcounted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 				 const int nested)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	return __sk_receive_skb(sk, skb, nested, 1, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	/* sk_tx_queue_mapping accept only upto a 16-bit value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	sk->sk_tx_queue_mapping = tx_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) #define NO_QUEUE_MAPPING	USHRT_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) static inline void sk_tx_queue_clear(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) static inline int sk_tx_queue_get(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 		return sk->sk_tx_queue_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) #ifdef CONFIG_XPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	if (skb_rx_queue_recorded(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 		u16 rx_queue = skb_get_rx_queue(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 		if (WARN_ON_ONCE(rx_queue == NO_QUEUE_MAPPING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 		sk->sk_rx_queue_mapping = rx_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) static inline void sk_rx_queue_clear(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) #ifdef CONFIG_XPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) #ifdef CONFIG_XPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) static inline int sk_rx_queue_get(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 		return sk->sk_rx_queue_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) static inline void sk_set_socket(struct sock *sk, struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	sk->sk_socket = sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) static inline wait_queue_head_t *sk_sleep(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	return &rcu_dereference_raw(sk->sk_wq)->wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) /* Detach socket from process context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)  * Announce socket dead, detach it from wait queue and inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)  * Note that parent inode held reference count on this struct sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)  * we do not release it in this function, because protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)  * probably wants some additional cleanups or even continuing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)  * to work with this socket (TCP).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) static inline void sock_orphan(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	write_lock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	sock_set_flag(sk, SOCK_DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	sk_set_socket(sk, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	sk->sk_wq  = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	write_unlock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) static inline void sock_graft(struct sock *sk, struct socket *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	WARN_ON(parent->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	write_lock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	rcu_assign_pointer(sk->sk_wq, &parent->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	parent->sk = sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	sk_set_socket(sk, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	sk->sk_uid = SOCK_INODE(parent)->i_uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	security_sock_graft(sk, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	write_unlock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) kuid_t sock_i_uid(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) unsigned long sock_i_ino(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	return sk ? sk->sk_uid : make_kuid(net->user_ns, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) static inline u32 net_tx_rndhash(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	u32 v = prandom_u32();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	return v ?: 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) static inline void sk_set_txhash(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	/* This pairs with READ_ONCE() in skb_set_hash_from_sk() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	WRITE_ONCE(sk->sk_txhash, net_tx_rndhash());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) static inline bool sk_rethink_txhash(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	if (sk->sk_txhash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 		sk_set_txhash(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) static inline struct dst_entry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) __sk_dst_get(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	return rcu_dereference_check(sk->sk_dst_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 				     lockdep_sock_is_held(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) static inline struct dst_entry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) sk_dst_get(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	struct dst_entry *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	dst = rcu_dereference(sk->sk_dst_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	if (dst && !atomic_inc_not_zero(&dst->__refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 		dst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	return dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) static inline void __dst_negative_advice(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	struct dst_entry *ndst, *dst = __sk_dst_get(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	if (dst && dst->ops->negative_advice) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 		ndst = dst->ops->negative_advice(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 		if (ndst != dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 			rcu_assign_pointer(sk->sk_dst_cache, ndst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 			sk_tx_queue_clear(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 			sk->sk_dst_pending_confirm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) static inline void dst_negative_advice(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 	sk_rethink_txhash(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	__dst_negative_advice(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) __sk_dst_set(struct sock *sk, struct dst_entry *dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	struct dst_entry *old_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 	sk_tx_queue_clear(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	sk->sk_dst_pending_confirm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	old_dst = rcu_dereference_protected(sk->sk_dst_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 					    lockdep_sock_is_held(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 	rcu_assign_pointer(sk->sk_dst_cache, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 	dst_release(old_dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) sk_dst_set(struct sock *sk, struct dst_entry *dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	struct dst_entry *old_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	sk_tx_queue_clear(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	sk->sk_dst_pending_confirm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	dst_release(old_dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) __sk_dst_reset(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	__sk_dst_set(sk, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) sk_dst_reset(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 	sk_dst_set(sk, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) static inline void sk_dst_confirm(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	if (!READ_ONCE(sk->sk_dst_pending_confirm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 		WRITE_ONCE(sk->sk_dst_pending_confirm, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	if (skb_get_dst_pending_confirm(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 		struct sock *sk = skb->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 		unsigned long now = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 		/* avoid dirtying neighbour */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 		if (READ_ONCE(n->confirmed) != now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 			WRITE_ONCE(n->confirmed, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 		if (sk && READ_ONCE(sk->sk_dst_pending_confirm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 			WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) bool sk_mc_loop(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) static inline bool sk_can_gso(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	sk->sk_route_nocaps |= flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 	sk->sk_route_caps &= ~flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 					   struct iov_iter *from, char *to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 					   int copy, int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	if (skb->ip_summed == CHECKSUM_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 		__wsum csum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 		if (!csum_and_copy_from_iter_full(to, copy, &csum, from))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 		skb->csum = csum_block_add(skb->csum, csum, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	} else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 		if (!copy_from_iter_full_nocache(to, copy, from))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	} else if (!copy_from_iter_full(to, copy, from))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 				       struct iov_iter *from, int copy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	int err, offset = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 	err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 				       copy, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 		__skb_trim(skb, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 					   struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 					   struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 					   int off, int copy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 				       copy, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	skb->len	     += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	skb->data_len	     += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	skb->truesize	     += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	sk_wmem_queued_add(sk, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 	sk_mem_charge(sk, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)  * sk_wmem_alloc_get - returns write allocations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108)  * @sk: socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110)  * Return: sk_wmem_alloc minus initial offset of one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) static inline int sk_wmem_alloc_get(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 	return refcount_read(&sk->sk_wmem_alloc) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118)  * sk_rmem_alloc_get - returns read allocations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)  * @sk: socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)  * Return: sk_rmem_alloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) static inline int sk_rmem_alloc_get(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	return atomic_read(&sk->sk_rmem_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129)  * sk_has_allocations - check if allocations are outstanding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)  * @sk: socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)  * Return: true if socket has write or read allocations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) static inline bool sk_has_allocations(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 	return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)  * skwq_has_sleeper - check if there are any waiting processes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)  * @wq: struct socket_wq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)  * Return: true if socket_wq has waiting processes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145)  * The purpose of the skwq_has_sleeper and sock_poll_wait is to wrap the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)  * barrier call. They were added due to the race found within the tcp code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)  * Consider following tcp code paths::
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)  *   CPU1                CPU2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)  *   sys_select          receive packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152)  *   ...                 ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)  *   __add_wait_queue    update tp->rcv_nxt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)  *   ...                 ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155)  *   tp->rcv_nxt check   sock_def_readable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156)  *   ...                 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)  *   schedule               rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158)  *                          wq = rcu_dereference(sk->sk_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)  *                          if (wq && waitqueue_active(&wq->wait))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160)  *                              wake_up_interruptible(&wq->wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)  *                          ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162)  *                       }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)  * The race for tcp fires when the __add_wait_queue changes done by CPU1 stay
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)  * in its cache, and so does the tp->rcv_nxt update on CPU2 side.  The CPU1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)  * could then endup calling schedule and sleep forever if there are no more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)  * data on the socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) static inline bool skwq_has_sleeper(struct socket_wq *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 	return wq && wq_has_sleeper(&wq->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176)  * sock_poll_wait - place memory barrier behind the poll_wait call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)  * @filp:           file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178)  * @sock:           socket to wait on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179)  * @p:              poll_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181)  * See the comments in the wq_has_sleeper function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) static inline void sock_poll_wait(struct file *filp, struct socket *sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 				  poll_table *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 	if (!poll_does_not_wait(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 		poll_wait(filp, &sock->wq.wait, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 		/* We need to be sure we are in sync with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 		 * socket flags modification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 		 * This memory barrier is paired in the wq_has_sleeper.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 		smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 	/* This pairs with WRITE_ONCE() in sk_set_txhash() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 	u32 txhash = READ_ONCE(sk->sk_txhash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 	if (txhash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 		skb->l4_hash = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 		skb->hash = txhash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) void skb_set_owner_w(struct sk_buff *skb, struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)  *	Queue a received datagram if it will fit. Stream and sequenced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)  *	protocols can't normally use this as they need to fit buffers in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213)  *	and play with them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215)  *	Inlined as it's very short and called for pretty much every
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)  *	packet ever received.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 	skb_orphan(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 	skb->sk = sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 	skb->destructor = sock_rfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	sk_mem_charge(sk, skb->truesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 	if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 		skb_orphan(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 		skb->destructor = sock_efree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 		skb->sk = sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) void sk_reset_timer(struct sock *sk, struct timer_list *timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 		    unsigned long expires);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) void sk_stop_timer(struct sock *sk, struct timer_list *timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 			struct sk_buff *skb, unsigned int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 			void (*destructor)(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 					   struct sk_buff *skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256)  *	Recover an error report and clear atomically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) static inline int sock_error(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	/* Avoid an atomic operation for the common case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 	 * This is racy since another cpu/thread can change sk_err under us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 	if (likely(data_race(!sk->sk_err)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 	err = xchg(&sk->sk_err, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 	return -err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) static inline unsigned long sock_wspace(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 	int amt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 		amt = sk->sk_sndbuf - refcount_read(&sk->sk_wmem_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 		if (amt < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 			amt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 	return amt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) /* Note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)  *  We use sk->sk_wq_raw, from contexts knowing this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287)  *  pointer is not NULL and cannot disappear/change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) static inline void sk_set_bit(int nr, struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 	if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 	    !sock_flag(sk, SOCK_FASYNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	set_bit(nr, &sk->sk_wq_raw->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) static inline void sk_clear_bit(int nr, struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	    !sock_flag(sk, SOCK_FASYNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 	clear_bit(nr, &sk->sk_wq_raw->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) static inline void sk_wake_async(const struct sock *sk, int how, int band)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	if (sock_flag(sk, SOCK_FASYNC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 		rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 		sock_wake_async(rcu_dereference(sk->sk_wq), how, band);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) /* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317)  * need sizeof(sk_buff) + MTU + padding, unless net driver perform copybreak.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)  * Note: for send buffers, TCP works better if we can build two skbs at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319)  * minimum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) #define TCP_SKB_MIN_TRUESIZE	(2048 + SKB_DATA_ALIGN(sizeof(struct sk_buff)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) #define SOCK_MIN_SNDBUF		(TCP_SKB_MIN_TRUESIZE * 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) #define SOCK_MIN_RCVBUF		 TCP_SKB_MIN_TRUESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) static inline void sk_stream_moderate_sndbuf(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 				    bool force_schedule);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342)  * sk_page_frag - return an appropriate page_frag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343)  * @sk: socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345)  * Use the per task page_frag instead of the per socket one for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346)  * optimization when we know that we're in process context and own
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347)  * everything that's associated with %current.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349)  * Both direct reclaim and page faults can nest inside other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350)  * socket operations and end up recursing into sk_page_frag()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351)  * while it's already in use: explicitly avoid task page_frag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352)  * usage if the caller is potentially doing any of them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)  * This assumes that page fault handlers use the GFP_NOFS flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355)  * Return: a per task page_frag if context allows that,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356)  * otherwise a per socket one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) static inline struct page_frag *sk_page_frag(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 	if ((sk->sk_allocation & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC | __GFP_FS)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 	    (__GFP_DIRECT_RECLAIM | __GFP_FS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 		return &current->task_frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	return &sk->sk_frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)  *	Default write policy as shown to user space via poll/select/SIGIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) static inline bool sock_writeable(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	return refcount_read(&sk->sk_wmem_alloc) < (READ_ONCE(sk->sk_sndbuf) >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) static inline gfp_t gfp_any(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 	return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 	return noblock ? 0 : sk->sk_rcvtimeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) static inline long sock_sndtimeo(const struct sock *sk, bool noblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 	return noblock ? 0 : sk->sk_sndtimeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 	int v = waitall ? len : min_t(int, READ_ONCE(sk->sk_rcvlowat), len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 	return v ?: 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) /* Alas, with timeout socket operations are not restartable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400)  * Compare this to poll().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) static inline int sock_intr_errno(long timeo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) struct sock_skb_cb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 	u32 dropcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) /* Store sock_skb_cb at the end of skb->cb[] so protocol families
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412)  * using skb->cb[] would keep using it directly and utilize its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413)  * alignement guarantee.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) #define SOCK_SKB_CB_OFFSET ((sizeof_field(struct sk_buff, cb) - \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 			    sizeof(struct sock_skb_cb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) #define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 			    SOCK_SKB_CB_OFFSET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) #define sock_skb_cb_check_size(size) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 	BUILD_BUG_ON((size) > SOCK_SKB_CB_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 	SOCK_SKB_CB(skb)->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 						atomic_read(&sk->sk_drops) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 	int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 	atomic_add(segs, &sk->sk_drops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) static inline ktime_t sock_read_timestamp(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) #if BITS_PER_LONG==32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 	unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 	ktime_t kt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 		seq = read_seqbegin(&sk->sk_stamp_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 		kt = sk->sk_stamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 	} while (read_seqretry(&sk->sk_stamp_seq, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 	return kt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 	return READ_ONCE(sk->sk_stamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) #if BITS_PER_LONG==32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 	write_seqlock(&sk->sk_stamp_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 	sk->sk_stamp = kt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 	write_sequnlock(&sk->sk_stamp_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 	WRITE_ONCE(sk->sk_stamp, kt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 			   struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 			     struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 	ktime_t kt = skb->tstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 	struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 	 * generate control messages if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 	 * - receive time stamping in software requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 	 * - software time stamp available and wanted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 	 * - hardware time stamps available and wanted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 	if (sock_flag(sk, SOCK_RCVTSTAMP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 	    (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 	    (kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 	    (hwtstamps->hwtstamp &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	     (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 		__sock_recv_timestamp(msg, sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 		sock_write_timestamp(sk, kt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 	if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 		__sock_recv_wifi_status(msg, sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 			      struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) #define SK_DEFAULT_STAMP (-1L * NSEC_PER_SEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 					  struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) #define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL)			| \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 			   (1UL << SOCK_RCVTSTAMP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) #define TSFLAGS_ANY	  (SOF_TIMESTAMPING_SOFTWARE			| \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 			   SOF_TIMESTAMPING_RAW_HARDWARE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 		__sock_recv_ts_and_drops(msg, sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 	else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 		sock_write_timestamp(sk, skb->tstamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 	else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 		sock_write_timestamp(sk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519)  * _sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520)  * @sk:		socket sending this packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521)  * @tsflags:	timestamping flags to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522)  * @tx_flags:	completed with instructions for time stamping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523)  * @tskey:      filled in with next sk_tskey (not for TCP, which uses seqno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525)  * Note: callers should take care of initial ``*tx_flags`` value (usually 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 				      __u8 *tx_flags, __u32 *tskey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 	if (unlikely(tsflags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 		__sock_tx_timestamp(tsflags, tx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 		if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 		    tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 			*tskey = sk->sk_tskey++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 	if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 		*tx_flags |= SKBTX_WIFI_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 				     __u8 *tx_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 	_sock_tx_timestamp(sk, tsflags, tx_flags, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 	_sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 			   &skb_shinfo(skb)->tskey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554)  * sk_eat_skb - Release a skb if it is no longer needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555)  * @sk: socket to eat this skb from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556)  * @skb: socket buffer to eat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558)  * This routine must be called with interrupts disabled or with the socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)  * locked so that the sk_buff queue operation is ok.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 	__skb_unlink(skb, &sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 	if (static_branch_unlikely(&tcp_rx_skb_cache_key) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 	    !sk->sk_rx_skb_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 		sk->sk_rx_skb_cache = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 		skb_orphan(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 	__kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) struct net *sock_net(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 	return read_pnet(&sk->sk_net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) void sock_net_set(struct sock *sk, struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 	write_pnet(&sk->sk_net, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) skb_sk_is_prefetched(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) #ifdef CONFIG_INET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 	return skb->destructor == sock_pfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) #endif /* CONFIG_INET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) /* This helper checks if a socket is a full socket,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596)  * ie _not_ a timewait or request socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) static inline bool sk_fullsock(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 	return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) sk_is_refcounted(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 	/* Only full sockets have sk->sk_flags. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 	return !sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611)  * skb_steal_sock - steal a socket from an sk_buff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612)  * @skb: sk_buff to steal the socket from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613)  * @refcounted: is set to true if the socket is reference-counted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) static inline struct sock *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) skb_steal_sock(struct sk_buff *skb, bool *refcounted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 	if (skb->sk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 		struct sock *sk = skb->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 		*refcounted = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 		if (skb_sk_is_prefetched(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 			*refcounted = sk_is_refcounted(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 		skb->destructor = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 		skb->sk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 		return sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 	*refcounted = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) /* Checks if this SKB belongs to an HW offloaded socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633)  * and whether any SW fallbacks are required based on dev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)  * Check decrypted mark in case skb_orphan() cleared socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 						   struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) #ifdef CONFIG_SOCK_VALIDATE_XMIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 	struct sock *sk = skb->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 	if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 		skb = sk->sk_validate_xmit_skb(sk, dev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) #ifdef CONFIG_TLS_DEVICE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 	} else if (unlikely(skb->decrypted)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 		pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 		skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) /* This helper checks if a socket is a LISTEN or NEW_SYN_RECV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657)  * SYNACK messages can be attached to either ones (depending on SYNCOOKIE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) static inline bool sk_listener(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 	return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) void sock_enable_timestamp(struct sock *sk, enum sock_flags flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 		       int type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) bool sk_ns_capable(const struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 		   struct user_namespace *user_ns, int cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) bool sk_capable(const struct sock *sk, int cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) bool sk_net_capable(const struct sock *sk, int cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) void sk_get_meminfo(const struct sock *sk, u32 *meminfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) /* Take into consideration the size of the struct sk_buff overhead in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676)  * determination of these values, since that is non-constant across
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677)  * platforms.  This makes socket queueing behavior and performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678)  * not depend upon such differences.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) #define _SK_MEM_PACKETS		256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) #define _SK_MEM_OVERHEAD	SKB_TRUESIZE(256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) #define SK_WMEM_MAX		(_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) #define SK_RMEM_MAX		(_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) extern __u32 sysctl_wmem_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) extern __u32 sysctl_rmem_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) extern int sysctl_tstamp_allow_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) extern int sysctl_optmem_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) extern __u32 sysctl_wmem_default;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) extern __u32 sysctl_rmem_default;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) #define SKB_FRAG_PAGE_ORDER	get_order(32768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) DECLARE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 	/* Does this proto have per netns sysctl_wmem ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 	if (proto->sysctl_wmem_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 		return *(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 	return *proto->sysctl_wmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 	/* Does this proto have per netns sysctl_rmem ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 	if (proto->sysctl_rmem_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 		return *(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 	return *proto->sysctl_rmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) /* Default TCP Small queue budget is ~1 ms of data (1sec >> 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716)  * Some wifi drivers need to tweak it to get more chunks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717)  * They can use this helper from their ndo_start_xmit()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) static inline void sk_pacing_shift_update(struct sock *sk, int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 	if (!sk || !sk_fullsock(sk) || READ_ONCE(sk->sk_pacing_shift) == val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 	WRITE_ONCE(sk->sk_pacing_shift, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) /* if a socket is bound to a device, check that the given device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727)  * index is either the same or that the socket is bound to an L3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728)  * master device and the given device index is also enslaved to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729)  * that L3 master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 	int mdif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 	if (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 	mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 	if (mdif && mdif == sk->sk_bound_dev_if)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) void sock_def_readable(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) void sock_enable_timestamps(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) void sock_no_linger(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) void sock_set_keepalive(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) void sock_set_priority(struct sock *sk, u32 priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) void sock_set_rcvbuf(struct sock *sk, int val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) void sock_set_mark(struct sock *sk, u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) void sock_set_reuseaddr(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) void sock_set_reuseport(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) void sock_set_sndtimeo(struct sock *sk, s64 secs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) #endif	/* _SOCK_H */