Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /* Copyright (c) 2017 Facebook
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) #include <linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/filter.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <net/bpf_sk_storage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/error-injection.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <trace/events/bpf_test_run.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 			u32 *retval, u32 *time, bool xdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	enum bpf_cgroup_storage_type stype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	u64 time_start, time_spent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	for_each_cgroup_storage_type(stype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 		storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 		if (IS_ERR(storage[stype])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 			storage[stype] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 			for_each_cgroup_storage_type(stype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 				bpf_cgroup_storage_free(storage[stype]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	if (!repeat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		repeat = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	migrate_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	time_start = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	for (i = 0; i < repeat; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 		ret = bpf_cgroup_storage_set(storage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 		if (xdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 			*retval = bpf_prog_run_xdp(prog, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 			*retval = BPF_PROG_RUN(prog, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		bpf_cgroup_storage_unset();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		if (signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 			ret = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 		if (need_resched()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 			time_spent += ktime_get_ns() - time_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 			migrate_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 			rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 			cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 			rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 			migrate_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 			time_start = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	time_spent += ktime_get_ns() - time_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	migrate_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	do_div(time_spent, repeat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	*time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	for_each_cgroup_storage_type(stype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		bpf_cgroup_storage_free(storage[stype]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) static int bpf_test_finish(const union bpf_attr *kattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 			   union bpf_attr __user *uattr, const void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 			   u32 size, u32 retval, u32 duration)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	int err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	u32 copy_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	/* Clamp copy if the user has provided a size hint, but copy the full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	 * buffer if not to retain old behaviour.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	if (kattr->test.data_size_out &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	    copy_size > kattr->test.data_size_out) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		copy_size = kattr->test.data_size_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		err = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	if (data_out && copy_to_user(data_out, data, copy_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	if (err != -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	trace_bpf_test_finish(&err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /* Integer types of various sizes and pointer combinations cover variety of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)  * architecture dependent calling conventions. 7+ can be supported in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)  * future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) __diag_push();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) __diag_ignore(GCC, 8, "-Wmissing-prototypes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	      "Global functions as their definitions will be in vmlinux BTF");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) int noinline bpf_fentry_test1(int a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	return a + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) int noinline bpf_fentry_test2(int a, u64 b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	return a + b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int noinline bpf_fentry_test3(char a, int b, u64 c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	return a + b + c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	return (long)a + b + c + d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	return a + (long)b + c + d + e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	return a + (long)b + c + d + (long)e + f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct bpf_fentry_test_t {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	struct bpf_fentry_test_t *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	return (long)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	return (long)arg->a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) int noinline bpf_modify_return_test(int a, int *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	*b += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	return a + *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) __diag_pop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 			   u32 headroom, u32 tailroom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	u32 user_size = kattr->test.data_size_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	if (user_size > size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		return ERR_PTR(-EMSGSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	data = kzalloc(size + headroom + tailroom, GFP_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	if (copy_from_user(data + headroom, data_in, user_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		return ERR_PTR(-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) int bpf_prog_test_run_tracing(struct bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 			      const union bpf_attr *kattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 			      union bpf_attr __user *uattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	struct bpf_fentry_test_t arg = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	u16 side_effect = 0, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	int b = 2, err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	u32 retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	if (kattr->test.flags || kattr->test.cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	switch (prog->expected_attach_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	case BPF_TRACE_FENTRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	case BPF_TRACE_FEXIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		if (bpf_fentry_test1(1) != 2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		    bpf_fentry_test2(2, 3) != 5 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		    bpf_fentry_test3(4, 5, 6) != 15 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		    bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		    bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		    bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		    bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		    bpf_fentry_test8(&arg) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	case BPF_MODIFY_RETURN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		ret = bpf_modify_return_test(1, &b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		if (b != 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 			side_effect = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	retval = ((u32)side_effect << 16) | ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	trace_bpf_test_finish(&err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct bpf_raw_tp_test_run_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	struct bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	void *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	u32 retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) __bpf_prog_test_run_raw_tp(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	struct bpf_raw_tp_test_run_info *info = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	info->retval = BPF_PROG_RUN(info->prog, info->ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 			     const union bpf_attr *kattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 			     union bpf_attr __user *uattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	__u32 ctx_size_in = kattr->test.ctx_size_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	struct bpf_raw_tp_test_run_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	int cpu = kattr->test.cpu, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	int current_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	/* doesn't support data_in/out, ctx_out, duration, or repeat */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	if (kattr->test.data_in || kattr->test.data_out ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	    kattr->test.ctx_out || kattr->test.duration ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	    kattr->test.repeat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	if (ctx_size_in < prog->aux->max_ctx_offset ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	    ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	if (ctx_size_in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 		info.ctx = kzalloc(ctx_size_in, GFP_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		if (!info.ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		if (copy_from_user(info.ctx, ctx_in, ctx_size_in)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 			err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		info.ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	info.prog = prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	current_cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	    cpu == current_cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		__bpf_prog_test_run_raw_tp(&info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	} else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		/* smp_call_function_single() also checks cpu_online()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		 * after csd_lock(). However, since cpu is from user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		 * space, let's do an extra quick check to filter out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		 * invalid value before smp_call_function_single().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		err = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 					       &info, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	if (!err &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	    copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	kfree(info.ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	u32 size = kattr->test.ctx_size_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	if (!data_in && !data_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	data = kzalloc(max_size, GFP_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	if (data_in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		err = bpf_check_uarg_tail_zero(data_in, max_size, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 			kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 			return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		size = min_t(u32, max_size, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		if (copy_from_user(data, data_in, size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 			kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 			return ERR_PTR(-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static int bpf_ctx_finish(const union bpf_attr *kattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 			  union bpf_attr __user *uattr, const void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 			  u32 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	int err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	u32 copy_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	if (!data || !data_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	if (copy_size > kattr->test.ctx_size_out) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		copy_size = kattr->test.ctx_size_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 		err = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	if (copy_to_user(data_out, data, copy_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	if (err != -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)  * range_is_zero - test whether buffer is initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)  * @buf: buffer to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)  * @from: check from this position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)  * @to: check up until (excluding) this position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)  * This function returns true if the there is a non-zero byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)  * in the buf in the range [from,to).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) static inline bool range_is_zero(void *buf, size_t from, size_t to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	return !memchr_inv((u8 *)buf + from, 0, to - from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	if (!__skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	/* make sure the fields we don't use are zeroed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	/* mark is allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 			   offsetof(struct __sk_buff, priority)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	/* priority is allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, priority),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 			   offsetof(struct __sk_buff, ifindex)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	/* ifindex is allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 			   offsetof(struct __sk_buff, cb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	/* cb is allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 			   offsetof(struct __sk_buff, tstamp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	/* tstamp is allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	/* wire_len is allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	/* gso_segs is allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 			   offsetof(struct __sk_buff, gso_size)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	/* gso_size is allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 			   sizeof(struct __sk_buff)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	skb->mark = __skb->mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	skb->priority = __skb->priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	skb->tstamp = __skb->tstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	if (__skb->wire_len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 		cb->pkt_len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 		if (__skb->wire_len < skb->len ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		    __skb->wire_len > GSO_MAX_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 		cb->pkt_len = __skb->wire_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	if (__skb->gso_segs > GSO_MAX_SEGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	skb_shinfo(skb)->gso_segs = __skb->gso_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	skb_shinfo(skb)->gso_size = __skb->gso_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	if (!__skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	__skb->mark = skb->mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	__skb->priority = skb->priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	__skb->ifindex = skb->dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	__skb->tstamp = skb->tstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	__skb->wire_len = cb->pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	__skb->gso_segs = skb_shinfo(skb)->gso_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) static struct proto bpf_dummy_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	.name   = "bpf_dummy",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	.owner  = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	.obj_size = sizeof(struct sock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 			  union bpf_attr __user *uattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	bool is_l2 = false, is_direct_pkt_access = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	struct net *net = current->nsproxy->net_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	struct net_device *dev = net->loopback_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	u32 size = kattr->test.data_size_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	u32 repeat = kattr->test.repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	struct __sk_buff *ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	u32 retval, duration;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	int hh_len = ETH_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	if (kattr->test.flags || kattr->test.cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	if (IS_ERR(data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 		return PTR_ERR(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	if (IS_ERR(ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 		kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 		return PTR_ERR(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	switch (prog->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	case BPF_PROG_TYPE_SCHED_CLS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	case BPF_PROG_TYPE_SCHED_ACT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 		is_l2 = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	case BPF_PROG_TYPE_LWT_IN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	case BPF_PROG_TYPE_LWT_OUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	case BPF_PROG_TYPE_LWT_XMIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 		is_direct_pkt_access = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	if (!sk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 		kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 		kfree(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	sock_init_data(NULL, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	skb = build_skb(data, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 		kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 		kfree(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 		sk_free(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	skb->sk = sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	__skb_put(skb, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	if (ctx && ctx->ifindex > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 		dev = dev_get_by_index(net, ctx->ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 		if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 			ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	skb->protocol = eth_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	switch (skb->protocol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	case htons(ETH_P_IP):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 		sk->sk_family = AF_INET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 		if (sizeof(struct iphdr) <= skb_headlen(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 			sk->sk_rcv_saddr = ip_hdr(skb)->saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 			sk->sk_daddr = ip_hdr(skb)->daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	case htons(ETH_P_IPV6):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 		sk->sk_family = AF_INET6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 		if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 			sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 			sk->sk_v6_daddr = ipv6_hdr(skb)->daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	if (is_l2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 		__skb_push(skb, hh_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	if (is_direct_pkt_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 		bpf_compute_data_pointers(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	ret = convert___skb_to_skb(skb, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	if (!is_l2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 		if (skb_headroom(skb) < hh_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 			int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 			if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 				ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 		memset(__skb_push(skb, hh_len), 0, hh_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	convert_skb_to___skb(skb, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	size = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	/* bpf program can never convert linear skb to non-linear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 		size = skb_headlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 		ret = bpf_ctx_finish(kattr, uattr, ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 				     sizeof(struct __sk_buff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	if (dev && dev != net->loopback_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 		dev_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	sk_free(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	kfree(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 			  union bpf_attr __user *uattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	u32 headroom = XDP_PACKET_HEADROOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	u32 size = kattr->test.data_size_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	u32 repeat = kattr->test.repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	struct netdev_rx_queue *rxqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	struct xdp_buff xdp = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	u32 retval, duration;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	u32 max_data_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	    prog->expected_attach_type == BPF_XDP_CPUMAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	if (kattr->test.ctx_in || kattr->test.ctx_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	/* XDP have extra tailroom as (most) drivers use full page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	max_data_sz = 4096 - headroom - tailroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	data = bpf_test_init(kattr, max_data_sz, headroom, tailroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	if (IS_ERR(data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 		return PTR_ERR(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	xdp.data_hard_start = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	xdp.data = data + headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 	xdp.data_meta = xdp.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 	xdp.data_end = xdp.data + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	xdp.frame_sz = headroom + max_data_sz + tailroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	xdp.rxq = &rxqueue->xdp_rxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	bpf_prog_change_xdp(NULL, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	if (xdp.data != data + headroom || xdp.data_end != xdp.data + size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 		size = xdp.data_end - xdp.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 	bpf_prog_change_xdp(prog, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	/* make sure the fields we don't use are zeroed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 	if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	/* flags is allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 	if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 			   sizeof(struct bpf_flow_keys)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 				     const union bpf_attr *kattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 				     union bpf_attr __user *uattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 	u32 size = kattr->test.data_size_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 	struct bpf_flow_dissector ctx = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 	u32 repeat = kattr->test.repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	struct bpf_flow_keys *user_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	struct bpf_flow_keys flow_keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	u64 time_start, time_spent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	const struct ethhdr *eth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	unsigned int flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 	u32 retval, duration;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 	void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 	if (kattr->test.flags || kattr->test.cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 	if (size < ETH_HLEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 	data = bpf_test_init(kattr, size, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 	if (IS_ERR(data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 		return PTR_ERR(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 	eth = (struct ethhdr *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 	if (!repeat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 		repeat = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	if (IS_ERR(user_ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 		kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 		return PTR_ERR(user_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 	if (user_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 		ret = verify_user_bpf_flow_keys(user_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 		flags = user_ctx->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 	ctx.flow_keys = &flow_keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 	ctx.data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 	ctx.data_end = (__u8 *)data + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 	time_start = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 	for (i = 0; i < repeat; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 		retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 					  size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 		if (signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 			preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 			rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 			ret = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 		if (need_resched()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 			time_spent += ktime_get_ns() - time_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 			preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 			rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 			cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 			rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 			preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 			time_start = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 	time_spent += ktime_get_ns() - time_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 	do_div(time_spent, repeat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 	duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 	ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 			      retval, duration);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 		ret = bpf_ctx_finish(kattr, uattr, user_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 				     sizeof(struct bpf_flow_keys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 	kfree(user_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 	kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }