Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* Copyright (c) 2015 PLUMgrid, http://plumgrid.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * This program is free software; you can redistribute it and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * modify it under the terms of version 2 of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * License as published by the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <uapi/linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <uapi/linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <uapi/linux/if.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <uapi/linux/if_ether.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <uapi/linux/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <uapi/linux/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <uapi/linux/if_tunnel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <uapi/linux/mpls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <bpf/bpf_helpers.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include "bpf_legacy.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #define IP_MF		0x2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #define IP_OFFSET	0x1FFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #define PROG(F) SEC("socket/"__stringify(F)) int bpf_func_##F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	__uint(key_size, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	__uint(value_size, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	__uint(max_entries, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) } jmp_table SEC(".maps");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define PARSE_VLAN 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #define PARSE_MPLS 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define PARSE_IP 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define PARSE_IPV6 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) /* Protocol dispatch routine. It tail-calls next BPF program depending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * on eth proto. Note, we could have used ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  *   bpf_tail_call(skb, &jmp_table, proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * ... but it would need large prog_array and cannot be optimised given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  * the map key is not static.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) static inline void parse_eth_proto(struct __sk_buff *skb, u32 proto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	switch (proto) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	case ETH_P_8021Q:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	case ETH_P_8021AD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 		bpf_tail_call(skb, &jmp_table, PARSE_VLAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	case ETH_P_MPLS_UC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	case ETH_P_MPLS_MC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		bpf_tail_call(skb, &jmp_table, PARSE_MPLS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	case ETH_P_IP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		bpf_tail_call(skb, &jmp_table, PARSE_IP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	case ETH_P_IPV6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		bpf_tail_call(skb, &jmp_table, PARSE_IPV6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) struct vlan_hdr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	__be16 h_vlan_TCI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	__be16 h_vlan_encapsulated_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) struct flow_key_record {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	__be32 src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	__be32 dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		__be32 ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		__be16 port16[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	__u32 ip_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) static inline int ip_is_fragment(struct __sk_buff *ctx, __u64 nhoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	return load_half(ctx, nhoff + offsetof(struct iphdr, frag_off))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		& (IP_MF | IP_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) static inline __u32 ipv6_addr_hash(struct __sk_buff *ctx, __u64 off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	__u64 w0 = load_word(ctx, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	__u64 w1 = load_word(ctx, off + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	__u64 w2 = load_word(ctx, off + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	__u64 w3 = load_word(ctx, off + 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	return (__u32)(w0 ^ w1 ^ w2 ^ w3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) struct globals {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	struct flow_key_record flow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	__uint(type, BPF_MAP_TYPE_ARRAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	__type(key, __u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	__type(value, struct globals);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	__uint(max_entries, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) } percpu_map SEC(".maps");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* user poor man's per_cpu until native support is ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static struct globals *this_cpu_globals(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	u32 key = bpf_get_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	return bpf_map_lookup_elem(&percpu_map, &key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* some simple stats for user space consumption */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct pair {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	__u64 packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	__u64 bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	__uint(type, BPF_MAP_TYPE_HASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	__type(key, struct flow_key_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	__type(value, struct pair);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	__uint(max_entries, 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) } hash_map SEC(".maps");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static void update_stats(struct __sk_buff *skb, struct globals *g)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	struct flow_key_record key = g->flow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	struct pair *value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	value = bpf_map_lookup_elem(&hash_map, &key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	if (value) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		__sync_fetch_and_add(&value->packets, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		__sync_fetch_and_add(&value->bytes, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		struct pair val = {1, skb->len};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		bpf_map_update_elem(&hash_map, &key, &val, BPF_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static __always_inline void parse_ip_proto(struct __sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 					   struct globals *g, __u32 ip_proto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	__u32 nhoff = skb->cb[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	int poff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	switch (ip_proto) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	case IPPROTO_GRE: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		struct gre_hdr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 			__be16 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 			__be16 proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		__u32 gre_flags = load_half(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 					    nhoff + offsetof(struct gre_hdr, flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		__u32 gre_proto = load_half(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 					    nhoff + offsetof(struct gre_hdr, proto));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		if (gre_flags & (GRE_VERSION|GRE_ROUTING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		nhoff += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		if (gre_flags & GRE_CSUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 			nhoff += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		if (gre_flags & GRE_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 			nhoff += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		if (gre_flags & GRE_SEQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 			nhoff += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		skb->cb[0] = nhoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		parse_eth_proto(skb, gre_proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	case IPPROTO_IPIP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		parse_eth_proto(skb, ETH_P_IP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	case IPPROTO_IPV6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		parse_eth_proto(skb, ETH_P_IPV6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	case IPPROTO_TCP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	case IPPROTO_UDP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		g->flow.ports = load_word(skb, nhoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	case IPPROTO_ICMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		g->flow.ip_proto = ip_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		update_stats(skb, g);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) PROG(PARSE_IP)(struct __sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	struct globals *g = this_cpu_globals();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	__u32 nhoff, verlen, ip_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	if (!g)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	nhoff = skb->cb[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	if (unlikely(ip_is_fragment(skb, nhoff)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	ip_proto = load_byte(skb, nhoff + offsetof(struct iphdr, protocol));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	if (ip_proto != IPPROTO_GRE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		g->flow.src = load_word(skb, nhoff + offsetof(struct iphdr, saddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		g->flow.dst = load_word(skb, nhoff + offsetof(struct iphdr, daddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	verlen = load_byte(skb, nhoff + 0/*offsetof(struct iphdr, ihl)*/);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	nhoff += (verlen & 0xF) << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	skb->cb[0] = nhoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	parse_ip_proto(skb, g, ip_proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) PROG(PARSE_IPV6)(struct __sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	struct globals *g = this_cpu_globals();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	__u32 nhoff, ip_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	if (!g)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	nhoff = skb->cb[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	ip_proto = load_byte(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 			     nhoff + offsetof(struct ipv6hdr, nexthdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	g->flow.src = ipv6_addr_hash(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 				     nhoff + offsetof(struct ipv6hdr, saddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	g->flow.dst = ipv6_addr_hash(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 				     nhoff + offsetof(struct ipv6hdr, daddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	nhoff += sizeof(struct ipv6hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	skb->cb[0] = nhoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	parse_ip_proto(skb, g, ip_proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) PROG(PARSE_VLAN)(struct __sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	__u32 nhoff, proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	nhoff = skb->cb[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	proto = load_half(skb, nhoff + offsetof(struct vlan_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 						h_vlan_encapsulated_proto));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	nhoff += sizeof(struct vlan_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	skb->cb[0] = nhoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	parse_eth_proto(skb, proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) PROG(PARSE_MPLS)(struct __sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	__u32 nhoff, label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	nhoff = skb->cb[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	label = load_word(skb, nhoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	nhoff += sizeof(struct mpls_label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	skb->cb[0] = nhoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	if (label & MPLS_LS_S_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		__u8 verlen = load_byte(skb, nhoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		if ((verlen & 0xF0) == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 			parse_eth_proto(skb, ETH_P_IP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 			parse_eth_proto(skb, ETH_P_IPV6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		parse_eth_proto(skb, ETH_P_MPLS_UC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) SEC("socket/0")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) int main_prog(struct __sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	__u32 nhoff = ETH_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	__u32 proto = load_half(skb, 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	skb->cb[0] = nhoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	parse_eth_proto(skb, proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) char _license[] SEC("license") = "GPL";