^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef __NET_FRAG_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define __NET_FRAG_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/rhashtable-types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) /* Per netns frag queues directory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) struct fqdir {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) /* sysctls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) long high_thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) long low_thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) int timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) int max_dist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) struct inet_frags *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) struct net *net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) bool dead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct rhashtable rhashtable ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /* Keep atomic mem on separate cachelines in structs that include it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) atomic_long_t mem ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct work_struct destroy_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * fragment queue flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * @INET_FRAG_FIRST_IN: first fragment has arrived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * @INET_FRAG_LAST_IN: final fragment has arrived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * @INET_FRAG_HASH_DEAD: inet_frag_kill() has not removed fq from rhashtable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) INET_FRAG_FIRST_IN = BIT(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) INET_FRAG_LAST_IN = BIT(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) INET_FRAG_COMPLETE = BIT(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) INET_FRAG_HASH_DEAD = BIT(3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct frag_v4_compare_key {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) __be32 saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) __be32 daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) u32 user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) u32 vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) __be16 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) u16 protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct frag_v6_compare_key {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct in6_addr saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct in6_addr daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) u32 user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) __be32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) u32 iif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * struct inet_frag_queue - fragment queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * @node: rhash node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * @key: keys identifying this frag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * @timer: queue expiration timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * @lock: spinlock protecting this frag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * @refcnt: reference count of the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * @rb_fragments: received fragments rb-tree root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * @fragments_tail: received fragments tail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * @last_run_head: the head of the last "run". see ip_fragment.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * @stamp: timestamp of the last received fragment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * @len: total length of the original datagram
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * @meat: length of received fragments so far
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * @flags: fragment queue flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * @max_size: maximum received fragment size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * @fqdir: pointer to struct fqdir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * @rcu: rcu head for freeing deferall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct inet_frag_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct rhash_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct frag_v4_compare_key v4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct frag_v6_compare_key v6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) } key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct timer_list timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) refcount_t refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct rb_root rb_fragments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct sk_buff *fragments_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct sk_buff *last_run_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) ktime_t stamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int meat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) __u8 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) u16 max_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct fqdir *fqdir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct inet_frags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) unsigned int qsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) void (*constructor)(struct inet_frag_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) const void *arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) void (*destructor)(struct inet_frag_queue *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) void (*frag_expire)(struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct kmem_cache *frags_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) const char *frags_cache_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct rhashtable_params rhash_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) refcount_t refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct completion completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int inet_frags_init(struct inet_frags *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) void inet_frags_fini(struct inet_frags *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static inline void fqdir_pre_exit(struct fqdir *fqdir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* Prevent creation of new frags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * Pairs with READ_ONCE() in inet_frag_find().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) WRITE_ONCE(fqdir->high_thresh, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /* Pairs with READ_ONCE() in inet_frag_kill(), ip_expire()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * and ip6frag_expire_frag_queue().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) WRITE_ONCE(fqdir->dead, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) void fqdir_exit(struct fqdir *fqdir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) void inet_frag_kill(struct inet_frag_queue *q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) void inet_frag_destroy(struct inet_frag_queue *q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /* Free all skbs in the queue; return the sum of their truesizes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) unsigned int inet_frag_rbtree_purge(struct rb_root *root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static inline void inet_frag_put(struct inet_frag_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (refcount_dec_and_test(&q->refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) inet_frag_destroy(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* Memory Tracking Functions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static inline long frag_mem_limit(const struct fqdir *fqdir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return atomic_long_read(&fqdir->mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static inline void sub_frag_mem_limit(struct fqdir *fqdir, long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) atomic_long_sub(val, &fqdir->mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static inline void add_frag_mem_limit(struct fqdir *fqdir, long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) atomic_long_add(val, &fqdir->mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /* RFC 3168 support :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * We want to check ECN values of all fragments, do detect invalid combinations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) extern const u8 ip_frag_ecn_table[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* Return values of inet_frag_queue_insert() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define IPFRAG_OK 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #define IPFRAG_DUP 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define IPFRAG_OVERLAP 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) int offset, int end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct sk_buff *parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) void *reasm_data, bool try_coalesce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #endif