^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* 6LoWPAN fragment reassembly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Alexander Aring <aar@pengutronix.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Based on: net/ipv6/reassembly.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #define pr_fmt(fmt) "6LoWPAN: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/jhash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <net/ieee802154_netdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <net/6lowpan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <net/ipv6_frag.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <net/inet_frag.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <net/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "6lowpan_i.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static const char lowpan_frags_cache_name[] = "lowpan-frags";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static struct inet_frags lowpan_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct sk_buff *prev, struct net_device *ldev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) const struct frag_lowpan_compare_key *key = a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) BUILD_BUG_ON(sizeof(*key) > sizeof(q->key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) memcpy(&q->key, key, sizeof(*key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static void lowpan_frag_expire(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct inet_frag_queue *frag = from_timer(frag, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct frag_queue *fq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) fq = container_of(frag, struct frag_queue, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) spin_lock(&fq->q.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (fq->q.flags & INET_FRAG_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) inet_frag_kill(&fq->q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) spin_unlock(&fq->q.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) inet_frag_put(&fq->q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static inline struct lowpan_frag_queue *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) fq_find(struct net *net, const struct lowpan_802154_cb *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) const struct ieee802154_addr *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) const struct ieee802154_addr *dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct netns_ieee802154_lowpan *ieee802154_lowpan =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) net_ieee802154_lowpan(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct frag_lowpan_compare_key key = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct inet_frag_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) key.tag = cb->d_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) key.d_size = cb->d_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) key.src = *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) key.dst = *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) q = inet_frag_find(ieee802154_lowpan->fqdir, &key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (!q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return container_of(q, struct lowpan_frag_queue, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct sk_buff *skb, u8 frag_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct sk_buff *prev_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct net_device *ldev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) int end, offset, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* inet_frag_queue_* functions use skb->cb; see struct ipfrag_skb_cb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * in inet_fragment.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) BUILD_BUG_ON(sizeof(struct lowpan_802154_cb) > sizeof(struct inet_skb_parm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) BUILD_BUG_ON(sizeof(struct lowpan_802154_cb) > sizeof(struct inet6_skb_parm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (fq->q.flags & INET_FRAG_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) offset = lowpan_802154_cb(skb)->d_offset << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) end = lowpan_802154_cb(skb)->d_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* Is this the final fragment? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (offset + skb->len == end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* If we already have some bits beyond end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * or have different end, the segment is corrupted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (end < fq->q.len ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) fq->q.flags |= INET_FRAG_LAST_IN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) fq->q.len = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (end > fq->q.len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* Some bits beyond end -> corruption. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (fq->q.flags & INET_FRAG_LAST_IN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) fq->q.len = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) ldev = skb->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (ldev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) skb->dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) prev_tail = fq->q.fragments_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) err = inet_frag_queue_insert(&fq->q, skb, offset, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) fq->q.stamp = skb->tstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (frag_type == LOWPAN_DISPATCH_FRAG1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) fq->q.flags |= INET_FRAG_FIRST_IN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) fq->q.meat += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) add_frag_mem_limit(fq->q.fqdir, skb->truesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) fq->q.meat == fq->q.len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) unsigned long orefdst = skb->_skb_refdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) skb->_skb_refdst = 0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) res = lowpan_frag_reasm(fq, skb, prev_tail, ldev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) skb->_skb_refdst = orefdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) skb_dst_drop(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* Check if this packet is complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * It is called with locked fq, and caller must check that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * queue is eligible for reassembly i.e. it is not COMPLETE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * the last and the first frames arrived and all the bits are here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct sk_buff *prev_tail, struct net_device *ldev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) void *reasm_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) inet_frag_kill(&fq->q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (!reasm_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) goto out_oom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) inet_frag_reasm_finish(&fq->q, skb, reasm_data, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) skb->dev = ldev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) skb->tstamp = fq->q.stamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) fq->q.rb_fragments = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) fq->q.fragments_tail = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) fq->q.last_run_head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) out_oom:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) net_dbg_ratelimited("lowpan_frag_reasm: no memory for reassembly\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static int lowpan_frag_rx_handlers_result(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) lowpan_rx_result res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) switch (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) case RX_QUEUED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return NET_RX_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) case RX_CONTINUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /* nobody cared about this packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) net_warn_ratelimited("%s: received unknown dispatch\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /* all others failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return NET_RX_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static lowpan_rx_result lowpan_frag_rx_h_iphc(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (!lowpan_is_iphc(*skb_network_header(skb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return RX_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ret = lowpan_iphc_decompress(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return RX_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return RX_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static int lowpan_invoke_frag_rx_handlers(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) lowpan_rx_result res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) #define CALL_RXH(rxh) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) res = rxh(skb); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (res != RX_CONTINUE) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) goto rxh_next; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /* likely at first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) CALL_RXH(lowpan_frag_rx_h_iphc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) CALL_RXH(lowpan_rx_h_ipv6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) rxh_next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return lowpan_frag_rx_handlers_result(skb, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #undef CALL_RXH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) #define LOWPAN_FRAG_DGRAM_SIZE_HIGH_MASK 0x07
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) #define LOWPAN_FRAG_DGRAM_SIZE_HIGH_SHIFT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static int lowpan_get_cb(struct sk_buff *skb, u8 frag_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct lowpan_802154_cb *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) bool fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) u8 high = 0, low = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) __be16 d_tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) fail = lowpan_fetch_skb(skb, &high, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) fail |= lowpan_fetch_skb(skb, &low, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* remove the dispatch value and use first three bits as high value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * for the datagram size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) cb->d_size = (high & LOWPAN_FRAG_DGRAM_SIZE_HIGH_MASK) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) LOWPAN_FRAG_DGRAM_SIZE_HIGH_SHIFT | low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) fail |= lowpan_fetch_skb(skb, &d_tag, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) cb->d_tag = ntohs(d_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (frag_type == LOWPAN_DISPATCH_FRAGN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) fail |= lowpan_fetch_skb(skb, &cb->d_offset, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) cb->d_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /* check if datagram_size has ipv6hdr on FRAG1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) fail |= cb->d_size < sizeof(struct ipv6hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /* check if we can dereference the dispatch value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) fail |= !skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (unlikely(fail))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct lowpan_frag_queue *fq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct net *net = dev_net(skb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct lowpan_802154_cb *cb = lowpan_802154_cb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct ieee802154_hdr hdr = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) err = lowpan_get_cb(skb, frag_type, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (frag_type == LOWPAN_DISPATCH_FRAG1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) err = lowpan_invoke_frag_rx_handlers(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (err == NET_RX_DROP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (cb->d_size > IPV6_MIN_MTU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) net_warn_ratelimited("lowpan_frag_rcv: datagram size exceeds MTU\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) fq = fq_find(net, cb, &hdr.source, &hdr.dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (fq != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) spin_lock(&fq->q.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) ret = lowpan_frag_queue(fq, skb, frag_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) spin_unlock(&fq->q.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) inet_frag_put(&fq->q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) #ifdef CONFIG_SYSCTL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) static struct ctl_table lowpan_frags_ns_ctl_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) .procname = "6lowpanfrag_high_thresh",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) .maxlen = sizeof(unsigned long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) .mode = 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) .proc_handler = proc_doulongvec_minmax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) .procname = "6lowpanfrag_low_thresh",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) .maxlen = sizeof(unsigned long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) .mode = 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) .proc_handler = proc_doulongvec_minmax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) .procname = "6lowpanfrag_time",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) .maxlen = sizeof(int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) .mode = 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) .proc_handler = proc_dointvec_jiffies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* secret interval has been deprecated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static int lowpan_frags_secret_interval_unused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static struct ctl_table lowpan_frags_ctl_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) .procname = "6lowpanfrag_secret_interval",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) .data = &lowpan_frags_secret_interval_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) .maxlen = sizeof(int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) .mode = 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) .proc_handler = proc_dointvec_jiffies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct ctl_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct ctl_table_header *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct netns_ieee802154_lowpan *ieee802154_lowpan =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) net_ieee802154_lowpan(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) table = lowpan_frags_ns_ctl_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (!net_eq(net, &init_net)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (table == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /* Don't export sysctls to unprivileged users */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (net->user_ns != &init_user_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) table[0].procname = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) table[0].data = &ieee802154_lowpan->fqdir->high_thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) table[0].extra1 = &ieee802154_lowpan->fqdir->low_thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) table[1].data = &ieee802154_lowpan->fqdir->low_thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) table[1].extra2 = &ieee802154_lowpan->fqdir->high_thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) table[2].data = &ieee802154_lowpan->fqdir->timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (hdr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) goto err_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) ieee802154_lowpan->sysctl.frags_hdr = hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) err_reg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (!net_eq(net, &init_net))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) kfree(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) err_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct ctl_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct netns_ieee802154_lowpan *ieee802154_lowpan =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) net_ieee802154_lowpan(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) table = ieee802154_lowpan->sysctl.frags_hdr->ctl_table_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) unregister_net_sysctl_table(ieee802154_lowpan->sysctl.frags_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (!net_eq(net, &init_net))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) kfree(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static struct ctl_table_header *lowpan_ctl_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static int __init lowpan_frags_sysctl_register(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) lowpan_ctl_header = register_net_sysctl(&init_net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) "net/ieee802154/6lowpan",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) lowpan_frags_ctl_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return lowpan_ctl_header == NULL ? -ENOMEM : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static void lowpan_frags_sysctl_unregister(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) unregister_net_sysctl_table(lowpan_ctl_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) static inline int lowpan_frags_ns_sysctl_register(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static inline void lowpan_frags_ns_sysctl_unregister(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) static inline int __init lowpan_frags_sysctl_register(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) static inline void lowpan_frags_sysctl_unregister(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static int __net_init lowpan_frags_init_net(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct netns_ieee802154_lowpan *ieee802154_lowpan =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) net_ieee802154_lowpan(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) res = fqdir_init(&ieee802154_lowpan->fqdir, &lowpan_frags, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (res < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) ieee802154_lowpan->fqdir->high_thresh = IPV6_FRAG_HIGH_THRESH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) ieee802154_lowpan->fqdir->low_thresh = IPV6_FRAG_LOW_THRESH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) ieee802154_lowpan->fqdir->timeout = IPV6_FRAG_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) res = lowpan_frags_ns_sysctl_register(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (res < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) fqdir_exit(ieee802154_lowpan->fqdir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) static void __net_exit lowpan_frags_pre_exit_net(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) struct netns_ieee802154_lowpan *ieee802154_lowpan =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) net_ieee802154_lowpan(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) fqdir_pre_exit(ieee802154_lowpan->fqdir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static void __net_exit lowpan_frags_exit_net(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct netns_ieee802154_lowpan *ieee802154_lowpan =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) net_ieee802154_lowpan(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) lowpan_frags_ns_sysctl_unregister(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) fqdir_exit(ieee802154_lowpan->fqdir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) static struct pernet_operations lowpan_frags_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) .init = lowpan_frags_init_net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) .pre_exit = lowpan_frags_pre_exit_net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) .exit = lowpan_frags_exit_net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) static u32 lowpan_key_hashfn(const void *data, u32 len, u32 seed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return jhash2(data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) static u32 lowpan_obj_hashfn(const void *data, u32 len, u32 seed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) const struct inet_frag_queue *fq = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return jhash2((const u32 *)&fq->key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) static int lowpan_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) const struct frag_lowpan_compare_key *key = arg->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) const struct inet_frag_queue *fq = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return !!memcmp(&fq->key, key, sizeof(*key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) static const struct rhashtable_params lowpan_rhash_params = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) .head_offset = offsetof(struct inet_frag_queue, node),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) .hashfn = lowpan_key_hashfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) .obj_hashfn = lowpan_obj_hashfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) .obj_cmpfn = lowpan_obj_cmpfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) .automatic_shrinking = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) int __init lowpan_net_frag_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) lowpan_frags.constructor = lowpan_frag_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) lowpan_frags.destructor = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) lowpan_frags.qsize = sizeof(struct frag_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) lowpan_frags.frag_expire = lowpan_frag_expire;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) lowpan_frags.frags_cache_name = lowpan_frags_cache_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) lowpan_frags.rhash_params = lowpan_rhash_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) ret = inet_frags_init(&lowpan_frags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) ret = lowpan_frags_sysctl_register();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) goto err_sysctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) ret = register_pernet_subsys(&lowpan_frags_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) goto err_pernet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) err_pernet:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) lowpan_frags_sysctl_unregister();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) err_sysctl:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) inet_frags_fini(&lowpan_frags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) void lowpan_net_frag_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) lowpan_frags_sysctl_unregister();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) unregister_pernet_subsys(&lowpan_frags_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) inet_frags_fini(&lowpan_frags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }