^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * NET Generic infrastructure for Network protocols.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Definitions for request_sock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * From code originally in include/net/tcp.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #ifndef _REQUEST_SOCK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define _REQUEST_SOCK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/refcount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct request_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct sk_buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct dst_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct request_sock_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) int family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) unsigned int obj_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct kmem_cache *slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) char *slab_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) int (*rtx_syn_ack)(const struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct request_sock *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) void (*send_ack)(const struct sock *sk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct request_sock *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) void (*send_reset)(const struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) void (*destructor)(struct request_sock *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) void (*syn_ack_timeout)(const struct request_sock *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct saved_syn {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) u32 mac_hdrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u32 network_hdrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) u32 tcp_hdrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) u8 data[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* struct request_sock - mini sock to represent a connection request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct request_sock {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct sock_common __req_common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define rsk_refcnt __req_common.skc_refcnt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define rsk_hash __req_common.skc_hash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define rsk_listener __req_common.skc_listener
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define rsk_window_clamp __req_common.skc_window_clamp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define rsk_rcv_wnd __req_common.skc_rcv_wnd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct request_sock *dl_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) u16 mss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) u8 num_retrans; /* number of retransmits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) u8 syncookie:1; /* syncookie: encode tcpopts in timestamp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) u8 num_timeout:7; /* number of timeouts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) u32 ts_recent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct timer_list rsk_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) const struct request_sock_ops *rsk_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct saved_syn *saved_syn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) u32 secid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) u32 peer_secid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static inline struct request_sock *inet_reqsk(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return (struct request_sock *)sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static inline struct sock *req_to_sk(struct request_sock *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return (struct sock *)req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static inline struct request_sock *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) bool attach_listener)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct request_sock *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) req->rsk_listener = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (attach_listener) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) kmem_cache_free(ops->slab, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) req->rsk_listener = sk_listener;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) req->rsk_ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) req_to_sk(req)->sk_prot = sk_listener->sk_prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) sk_node_init(&req_to_sk(req)->sk_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) sk_tx_queue_clear(req_to_sk(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) req->saved_syn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) req->num_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) req->num_retrans = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) req->sk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) refcount_set(&req->rsk_refcnt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static inline void __reqsk_free(struct request_sock *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) req->rsk_ops->destructor(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (req->rsk_listener)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) sock_put(req->rsk_listener);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) kfree(req->saved_syn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) kmem_cache_free(req->rsk_ops->slab, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static inline void reqsk_free(struct request_sock *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) __reqsk_free(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static inline void reqsk_put(struct request_sock *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (refcount_dec_and_test(&req->rsk_refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) reqsk_free(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * For a TCP Fast Open listener -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * lock - protects the access to all the reqsk, which is co-owned by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * the listener and the child socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * qlen - pending TFO requests (still in TCP_SYN_RECV).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * max_qlen - max TFO reqs allowed before TFO is disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * XXX (TFO) - ideally these fields can be made as part of "listen_sock"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * structure above. But there is some implementation difficulty due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * listen_sock being part of request_sock_queue hence will be freed when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * a listener is stopped. But TFO related fields may continue to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * accessed even after a listener is closed, until its sk_refcnt drops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * to 0 implying no more outstanding TFO reqs. One solution is to keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * listen_opt around until sk_refcnt drops to 0. But there is some other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * complexity that needs to be resolved. E.g., a listener can be disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * temporarily through shutdown()->tcp_disconnect(), and re-enabled later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct fastopen_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct request_sock *rskq_rst_head; /* Keep track of past TFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct request_sock *rskq_rst_tail; /* requests that caused RST.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * This is part of the defense
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * against spoofing attack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) int qlen; /* # of pending (TCP_SYN_RECV) reqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) int max_qlen; /* != 0 iff TFO is currently enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct tcp_fastopen_context __rcu *ctx; /* cipher context for cookie */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /** struct request_sock_queue - queue of request_socks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * @rskq_accept_head - FIFO head of established children
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * @rskq_accept_tail - FIFO tail of established children
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * @rskq_defer_accept - User waits for some data after accept()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct request_sock_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) spinlock_t rskq_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) u8 rskq_defer_accept;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) u32 synflood_warned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) atomic_t qlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) atomic_t young;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct request_sock *rskq_accept_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct request_sock *rskq_accept_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct fastopen_queue fastopenq; /* Check max_qlen != 0 to determine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * if TFO is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) void reqsk_queue_alloc(struct request_sock_queue *queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) bool reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return READ_ONCE(queue->rskq_accept_head) == NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct sock *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct request_sock *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) spin_lock_bh(&queue->rskq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) req = queue->rskq_accept_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) sk_acceptq_removed(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) WRITE_ONCE(queue->rskq_accept_head, req->dl_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (queue->rskq_accept_head == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) queue->rskq_accept_tail = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) spin_unlock_bh(&queue->rskq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static inline void reqsk_queue_removed(struct request_sock_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) const struct request_sock *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (req->num_timeout == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) atomic_dec(&queue->young);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) atomic_dec(&queue->qlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static inline void reqsk_queue_added(struct request_sock_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) atomic_inc(&queue->young);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) atomic_inc(&queue->qlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static inline int reqsk_queue_len(const struct request_sock_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return atomic_read(&queue->qlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return atomic_read(&queue->young);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) #endif /* _REQUEST_SOCK_H */