^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _SOCK_REUSEPORT_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _SOCK_REUSEPORT_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/filter.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) extern spinlock_t reuseport_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) struct sock_reuseport {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) u16 max_socks; /* length of socks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) u16 num_socks; /* elements in socks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /* The last synq overflow event timestamp of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * reuse->socks[] group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) unsigned int synq_overflow_ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /* ID stays the same even after the size of socks[] grows. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) unsigned int reuseport_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) unsigned int bind_inany:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) unsigned int has_conns:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct bpf_prog __rcu *prog; /* optional BPF sock selector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct sock *socks[]; /* array of sock pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) extern int reuseport_alloc(struct sock *sk, bool bind_inany);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) extern int reuseport_add_sock(struct sock *sk, struct sock *sk2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) bool bind_inany);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) extern void reuseport_detach_sock(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) extern struct sock *reuseport_select_sock(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) u32 hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) int hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) extern int reuseport_detach_prog(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static inline bool reuseport_has_conns(struct sock *sk, bool set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct sock_reuseport *reuse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) reuse = rcu_dereference(sk->sk_reuseport_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (reuse) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) reuse->has_conns = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) ret = reuse->has_conns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #endif /* _SOCK_REUSEPORT_H */