Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #ifndef _IPV6_FRAG_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #define _IPV6_FRAG_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <net/addrconf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <net/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <net/inet_frag.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) enum ip6_defrag_users {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 	IP6_DEFRAG_LOCAL_DELIVER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 	IP6_DEFRAG_CONNTRACK_IN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 	__IP6_DEFRAG_CONNTRACK_IN	= IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 	IP6_DEFRAG_CONNTRACK_OUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 	__IP6_DEFRAG_CONNTRACK_OUT	= IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 	IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 	__IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  *	Equivalent of ipv4 struct ip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) struct frag_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	struct inet_frag_queue	q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	int			iif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	__u16			nhoffset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	u8			ecn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) static inline void ip6frag_init(struct inet_frag_queue *q, const void *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	struct frag_queue *fq = container_of(q, struct frag_queue, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	const struct frag_v6_compare_key *key = a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	q->key.v6 = *key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	fq->ecn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) static inline u32 ip6frag_key_hashfn(const void *data, u32 len, u32 seed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	return jhash2(data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 		      sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) static inline u32 ip6frag_obj_hashfn(const void *data, u32 len, u32 seed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	const struct inet_frag_queue *fq = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	return jhash2((const u32 *)&fq->key.v6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		      sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) ip6frag_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	const struct frag_v6_compare_key *key = arg->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	const struct inet_frag_queue *fq = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	return !!memcmp(&fq->key, key, sizeof(*key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	struct net_device *dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	struct sk_buff *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	/* Paired with the WRITE_ONCE() in fqdir_pre_exit(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	if (READ_ONCE(fq->q.fqdir->dead))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		goto out_rcu_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	spin_lock(&fq->q.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	if (fq->q.flags & INET_FRAG_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	inet_frag_kill(&fq->q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	dev = dev_get_by_index_rcu(net, fq->iif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	/* Don't send error if the first segment did not arrive. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	if (!(fq->q.flags & INET_FRAG_FIRST_IN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	/* sk_buff::dev and sk_buff::rbnode are unionized. So we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	 * pull the head out of the tree in order to be able to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	 * deal with head->dev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	head = inet_frag_pull_head(&fq->q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	if (!head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	head->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	spin_unlock(&fq->q.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	kfree_skb(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	goto out_rcu_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	spin_unlock(&fq->q.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) out_rcu_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	inet_frag_put(&fq->q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* Check if the upper layer header is truncated in the first fragment. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) ipv6frag_thdr_truncated(struct sk_buff *skb, int start, u8 *nexthdrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	u8 nexthdr = *nexthdrp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	__be16 frag_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	offset = ipv6_skip_exthdr(skb, start, &nexthdr, &frag_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	if (offset < 0 || (frag_off & htons(IP6_OFFSET)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	switch (nexthdr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	case NEXTHDR_TCP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		offset += sizeof(struct tcphdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	case NEXTHDR_UDP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		offset += sizeof(struct udphdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	case NEXTHDR_ICMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		offset += sizeof(struct icmp6hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		offset += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	if (offset > skb->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #endif