Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Berkeley Packet Filter based traffic classifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Might be used to classify traffic through flexible, user-defined and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * possibly JIT-ed BPF filters for traffic control as an alternative to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * ematches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/filter.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <net/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <net/pkt_cls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) MODULE_DESCRIPTION("TC BPF based classifier");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #define CLS_BPF_NAME_LEN	256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define CLS_BPF_SUPPORTED_GEN_FLAGS		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) struct cls_bpf_head {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	struct list_head plist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	struct idr handle_idr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) struct cls_bpf_prog {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	struct bpf_prog *filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	struct list_head link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	struct tcf_result res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	bool exts_integrated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	u32 gen_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	unsigned int in_hw_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	struct tcf_exts exts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	u32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	u16 bpf_num_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	struct sock_filter *bpf_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	const char *bpf_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	struct tcf_proto *tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	struct rcu_work rwork;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	[TCA_BPF_CLASSID]	= { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	[TCA_BPF_FLAGS]		= { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	[TCA_BPF_FLAGS_GEN]	= { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	[TCA_BPF_FD]		= { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	[TCA_BPF_NAME]		= { .type = NLA_NUL_STRING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 				    .len = CLS_BPF_NAME_LEN },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	[TCA_BPF_OPS_LEN]	= { .type = NLA_U16 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	[TCA_BPF_OPS]		= { .type = NLA_BINARY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 				    .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) static int cls_bpf_exec_opcode(int code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	switch (code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	case TC_ACT_OK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	case TC_ACT_SHOT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	case TC_ACT_STOLEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	case TC_ACT_TRAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	case TC_ACT_REDIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	case TC_ACT_UNSPEC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		return code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		return TC_ACT_UNSPEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 			    struct tcf_result *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	bool at_ingress = skb_at_tc_ingress(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	struct cls_bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	int ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	/* Needed here for accessing maps. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	list_for_each_entry_rcu(prog, &head->plist, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		int filter_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		if (tc_skip_sw(prog->gen_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 			filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		} else if (at_ingress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 			/* It is safe to push/pull even if skb_shared() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 			__skb_push(skb, skb->mac_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 			bpf_compute_data_pointers(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 			filter_res = BPF_PROG_RUN(prog->filter, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 			__skb_pull(skb, skb->mac_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 			bpf_compute_data_pointers(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 			filter_res = BPF_PROG_RUN(prog->filter, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		if (prog->exts_integrated) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 			res->class   = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 			res->classid = TC_H_MAJ(prog->res.classid) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 				       qdisc_skb_cb(skb)->tc_classid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 			ret = cls_bpf_exec_opcode(filter_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 			if (ret == TC_ACT_UNSPEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		if (filter_res == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		if (filter_res != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 			res->class   = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 			res->classid = filter_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 			*res = prog->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		ret = tcf_exts_exec(skb, &prog->exts, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	return !prog->bpf_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 			       struct cls_bpf_prog *oldprog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 			       struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	struct tcf_block *block = tp->chain->block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	struct tc_cls_bpf_offload cls_bpf = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	struct cls_bpf_prog *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	bool skip_sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	skip_sw = prog && tc_skip_sw(prog->gen_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	obj = prog ?: oldprog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	cls_bpf.command = TC_CLSBPF_OFFLOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	cls_bpf.exts = &obj->exts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	cls_bpf.prog = prog ? prog->filter : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	cls_bpf.name = obj->bpf_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	cls_bpf.exts_integrated = obj->exts_integrated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	if (oldprog && prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		err = tc_setup_cb_replace(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 					  skip_sw, &oldprog->gen_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 					  &oldprog->in_hw_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 					  &prog->gen_flags, &prog->in_hw_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 					  true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	else if (prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		err = tc_setup_cb_add(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 				      skip_sw, &prog->gen_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 				      &prog->in_hw_count, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		err = tc_setup_cb_destroy(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 					  skip_sw, &oldprog->gen_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 					  &oldprog->in_hw_count, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	if (prog && err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		cls_bpf_offload_cmd(tp, oldprog, prog, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static u32 cls_bpf_flags(u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	return flags & CLS_BPF_SUPPORTED_GEN_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 			   struct cls_bpf_prog *oldprog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 			   struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	if (prog && oldprog &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	    cls_bpf_flags(prog->gen_flags) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	    cls_bpf_flags(oldprog->gen_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	if (prog && tc_skip_hw(prog->gen_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		prog = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	if (oldprog && tc_skip_hw(oldprog->gen_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		oldprog = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	if (!prog && !oldprog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	return cls_bpf_offload_cmd(tp, prog, oldprog, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static void cls_bpf_stop_offload(struct tcf_proto *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 				 struct cls_bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 				 struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	err = cls_bpf_offload_cmd(tp, NULL, prog, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		pr_err("Stopping hardware offload failed: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 					 struct cls_bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	struct tcf_block *block = tp->chain->block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	struct tc_cls_bpf_offload cls_bpf = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	cls_bpf.command = TC_CLSBPF_STATS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	cls_bpf.exts = &prog->exts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	cls_bpf.prog = prog->filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	cls_bpf.name = prog->bpf_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	cls_bpf.exts_integrated = prog->exts_integrated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	tc_setup_cb_call(block, TC_SETUP_CLSBPF, &cls_bpf, false, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static int cls_bpf_init(struct tcf_proto *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	struct cls_bpf_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	head = kzalloc(sizeof(*head), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	if (head == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	INIT_LIST_HEAD_RCU(&head->plist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	idr_init(&head->handle_idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	rcu_assign_pointer(tp->root, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	if (cls_bpf_is_ebpf(prog))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		bpf_prog_put(prog->filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		bpf_prog_destroy(prog->filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	kfree(prog->bpf_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	kfree(prog->bpf_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	tcf_exts_destroy(&prog->exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	tcf_exts_put_net(&prog->exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	cls_bpf_free_parms(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	kfree(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static void cls_bpf_delete_prog_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	struct cls_bpf_prog *prog = container_of(to_rcu_work(work),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 						 struct cls_bpf_prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 						 rwork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	__cls_bpf_delete_prog(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 			     struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	idr_remove(&head->handle_idr, prog->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	cls_bpf_stop_offload(tp, prog, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	list_del_rcu(&prog->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	tcf_unbind_filter(tp, &prog->res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	if (tcf_exts_get_net(&prog->exts))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		tcf_queue_work(&prog->rwork, cls_bpf_delete_prog_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		__cls_bpf_delete_prog(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 			  bool rtnl_held, struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	__cls_bpf_delete(tp, arg, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	*last = list_empty(&head->plist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static void cls_bpf_destroy(struct tcf_proto *tp, bool rtnl_held,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 			    struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	struct cls_bpf_prog *prog, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	list_for_each_entry_safe(prog, tmp, &head->plist, link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		__cls_bpf_delete(tp, prog, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	idr_destroy(&head->handle_idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	kfree_rcu(head, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static void *cls_bpf_get(struct tcf_proto *tp, u32 handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	struct cls_bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	list_for_each_entry(prog, &head->plist, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		if (prog->handle == handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 			return prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	struct sock_filter *bpf_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	struct sock_fprog_kern fprog_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	struct bpf_prog *fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	u16 bpf_size, bpf_num_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	bpf_size = bpf_num_ops * sizeof(*bpf_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	bpf_ops = kmemdup(nla_data(tb[TCA_BPF_OPS]), bpf_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	if (bpf_ops == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	fprog_tmp.len = bpf_num_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	fprog_tmp.filter = bpf_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	ret = bpf_prog_create(&fp, &fprog_tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		kfree(bpf_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	prog->bpf_ops = bpf_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	prog->bpf_num_ops = bpf_num_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	prog->bpf_name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	prog->filter = fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 				 u32 gen_flags, const struct tcf_proto *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	struct bpf_prog *fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	char *name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	bool skip_sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	u32 bpf_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	if (IS_ERR(fp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		return PTR_ERR(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	if (tb[TCA_BPF_NAME]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		if (!name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 			bpf_prog_put(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	prog->bpf_ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	prog->bpf_name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	prog->filter = fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	if (fp->dst_needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		tcf_block_netif_keep_dst(tp->chain->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 			     struct cls_bpf_prog *prog, unsigned long base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 			     struct nlattr **tb, struct nlattr *est, bool ovr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 			     struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	bool is_bpf, is_ebpf, have_exts = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	u32 gen_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	is_ebpf = tb[TCA_BPF_FD];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 				extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	if (tb[TCA_BPF_FLAGS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	if (tb[TCA_BPF_FLAGS_GEN]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		    !tc_flags_valid(gen_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	prog->exts_integrated = have_exts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	prog->gen_flags = gen_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 		       cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	if (tb[TCA_BPF_CLASSID]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 		prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 		tcf_bind_filter(tp, &prog->res, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 			  struct tcf_proto *tp, unsigned long base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 			  u32 handle, struct nlattr **tca,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 			  void **arg, bool ovr, bool rtnl_held,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 			  struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	struct cls_bpf_prog *oldprog = *arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	struct nlattr *tb[TCA_BPF_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	struct cls_bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	if (tca[TCA_OPTIONS] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	ret = nla_parse_nested_deprecated(tb, TCA_BPF_MAX, tca[TCA_OPTIONS],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 					  bpf_policy, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	prog = kzalloc(sizeof(*prog), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	if (!prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	ret = tcf_exts_init(&prog->exts, net, TCA_BPF_ACT, TCA_BPF_POLICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 		goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	if (oldprog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 		if (handle && oldprog->handle != handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 			goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	if (handle == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 		handle = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 				    INT_MAX, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	} else if (!oldprog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 				    handle, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 		goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	prog->handle = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 				extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 		goto errout_idr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	ret = cls_bpf_offload(tp, prog, oldprog, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 		goto errout_parms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	if (!tc_in_hw(prog->gen_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 		prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	if (oldprog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 		idr_replace(&head->handle_idr, prog, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 		list_replace_rcu(&oldprog->link, &prog->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 		tcf_unbind_filter(tp, &oldprog->res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		tcf_exts_get_net(&oldprog->exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 		tcf_queue_work(&oldprog->rwork, cls_bpf_delete_prog_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 		list_add_rcu(&prog->link, &head->plist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	*arg = prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) errout_parms:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	cls_bpf_free_parms(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) errout_idr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	if (!oldprog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 		idr_remove(&head->handle_idr, prog->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	tcf_exts_destroy(&prog->exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	kfree(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 				 struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	struct nlattr *nla;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 		return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 			  sizeof(struct sock_filter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	if (nla == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 		return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 				  struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	struct nlattr *nla;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	if (prog->bpf_name &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	    nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 		return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 		return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	if (nla == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 		return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 			struct sk_buff *skb, struct tcmsg *tm, bool rtnl_held)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	struct cls_bpf_prog *prog = fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	struct nlattr *nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	u32 bpf_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	if (prog == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 		return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	tm->tcm_handle = prog->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	cls_bpf_offload_update_stats(tp, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	if (nest == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	if (prog->res.classid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	    nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	if (cls_bpf_is_ebpf(prog))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 		ret = cls_bpf_dump_ebpf_info(prog, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 		ret = cls_bpf_dump_bpf_info(prog, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	if (tcf_exts_dump(skb, &prog->exts) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	if (prog->exts_integrated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 		bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	if (prog->gen_flags &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	    nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	nla_nest_end(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	nla_nest_cancel(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 			       void *q, unsigned long base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	struct cls_bpf_prog *prog = fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	if (prog && prog->res.classid == classid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 		if (cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 			__tcf_bind_filter(q, &prog->res, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 			__tcf_unbind_filter(q, &prog->res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 			 bool rtnl_held)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	struct cls_bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 	list_for_each_entry(prog, &head->plist, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 		if (arg->count < arg->skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 			goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 		if (arg->fn(tp, prog, arg) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 			arg->stop = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 		arg->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 			     void *cb_priv, struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	struct tcf_block *block = tp->chain->block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 	struct tc_cls_bpf_offload cls_bpf = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 	struct cls_bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	list_for_each_entry(prog, &head->plist, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 		if (tc_skip_hw(prog->gen_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 		tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 					   extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 		cls_bpf.command = TC_CLSBPF_OFFLOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 		cls_bpf.exts = &prog->exts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 		cls_bpf.prog = add ? prog->filter : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 		cls_bpf.oldprog = add ? NULL : prog->filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 		cls_bpf.name = prog->bpf_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 		cls_bpf.exts_integrated = prog->exts_integrated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 		err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSBPF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 					    &cls_bpf, cb_priv, &prog->gen_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 					    &prog->in_hw_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	.kind		=	"bpf",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 	.owner		=	THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 	.classify	=	cls_bpf_classify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 	.init		=	cls_bpf_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	.destroy	=	cls_bpf_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 	.get		=	cls_bpf_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	.change		=	cls_bpf_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 	.delete		=	cls_bpf_delete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 	.walk		=	cls_bpf_walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 	.reoffload	=	cls_bpf_reoffload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	.dump		=	cls_bpf_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	.bind_class	=	cls_bpf_bind_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) static int __init cls_bpf_init_mod(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 	return register_tcf_proto_ops(&cls_bpf_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) static void __exit cls_bpf_exit_mod(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 	unregister_tcf_proto_ops(&cls_bpf_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) module_init(cls_bpf_init_mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) module_exit(cls_bpf_exit_mod);