Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) #include <linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/topology.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/ktime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/uidgid.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/filter.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/pid_namespace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/proc_ns.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/security.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include "../../lib/kstrtox.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) /* If kernel subsystem is allowing eBPF programs to call this function,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * inside its own verifier_ops->get_func_proto() callback it should return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * Different map implementations will rely on rcu in map methods
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * lookup/update/delete, therefore eBPF programs must run under rcu lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * if program is allowed to access maps, so check rcu_read_lock_held in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * all three functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	WARN_ON_ONCE(!rcu_read_lock_held());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	return (unsigned long) map->ops->map_lookup_elem(map, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) const struct bpf_func_proto bpf_map_lookup_elem_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	.func		= bpf_map_lookup_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	.gpl_only	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	.pkt_access	= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	.arg1_type	= ARG_CONST_MAP_PTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	   void *, value, u64, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	WARN_ON_ONCE(!rcu_read_lock_held());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	return map->ops->map_update_elem(map, key, value, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) const struct bpf_func_proto bpf_map_update_elem_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	.func		= bpf_map_update_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	.gpl_only	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	.pkt_access	= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	.ret_type	= RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	.arg1_type	= ARG_CONST_MAP_PTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	.arg3_type	= ARG_PTR_TO_MAP_VALUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	.arg4_type	= ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	WARN_ON_ONCE(!rcu_read_lock_held());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	return map->ops->map_delete_elem(map, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) const struct bpf_func_proto bpf_map_delete_elem_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	.func		= bpf_map_delete_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	.gpl_only	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	.pkt_access	= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	.ret_type	= RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	.arg1_type	= ARG_CONST_MAP_PTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	return map->ops->map_push_elem(map, value, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) const struct bpf_func_proto bpf_map_push_elem_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	.func		= bpf_map_push_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	.gpl_only	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	.pkt_access	= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	.ret_type	= RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	.arg1_type	= ARG_CONST_MAP_PTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	.arg2_type	= ARG_PTR_TO_MAP_VALUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	.arg3_type	= ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	return map->ops->map_pop_elem(map, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) const struct bpf_func_proto bpf_map_pop_elem_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	.func		= bpf_map_pop_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	.gpl_only	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	.ret_type	= RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	.arg1_type	= ARG_CONST_MAP_PTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	.arg2_type	= ARG_PTR_TO_UNINIT_MAP_VALUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	return map->ops->map_peek_elem(map, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) const struct bpf_func_proto bpf_map_peek_elem_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	.func		= bpf_map_peek_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	.gpl_only	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	.ret_type	= RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	.arg1_type	= ARG_CONST_MAP_PTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	.arg2_type	= ARG_PTR_TO_UNINIT_MAP_VALUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) const struct bpf_func_proto bpf_get_prandom_u32_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	.func		= bpf_user_rnd_u32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	.gpl_only	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	.ret_type	= RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) BPF_CALL_0(bpf_get_smp_processor_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	return smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	.func		= bpf_get_smp_processor_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	.gpl_only	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	.ret_type	= RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) BPF_CALL_0(bpf_get_numa_node_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	return numa_node_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) const struct bpf_func_proto bpf_get_numa_node_id_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	.func		= bpf_get_numa_node_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	.gpl_only	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	.ret_type	= RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) BPF_CALL_0(bpf_ktime_get_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	/* NMI safe access to clock monotonic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	return ktime_get_mono_fast_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) const struct bpf_func_proto bpf_ktime_get_ns_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	.func		= bpf_ktime_get_ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	.gpl_only	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	.ret_type	= RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) BPF_CALL_0(bpf_ktime_get_boot_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	/* NMI safe access to clock boottime */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	return ktime_get_boot_fast_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	.func		= bpf_ktime_get_boot_ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	.gpl_only	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	.ret_type	= RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) BPF_CALL_0(bpf_get_current_pid_tgid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	struct task_struct *task = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	if (unlikely(!task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	return (u64) task->tgid << 32 | task->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	.func		= bpf_get_current_pid_tgid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	.gpl_only	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	.ret_type	= RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) BPF_CALL_0(bpf_get_current_uid_gid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	struct task_struct *task = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	kuid_t uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	kgid_t gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	if (unlikely(!task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	current_uid_gid(&uid, &gid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	return (u64) from_kgid(&init_user_ns, gid) << 32 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		     from_kuid(&init_user_ns, uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	.func		= bpf_get_current_uid_gid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	.gpl_only	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	.ret_type	= RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	struct task_struct *task = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	if (unlikely(!task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		goto err_clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	strncpy(buf, task->comm, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	/* Verifier guarantees that size > 0. For task->comm exceeding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	 * size, guarantee that buf is %NUL-terminated. Unconditionally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	 * done here to save the size test.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	buf[size - 1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) err_clear:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	memset(buf, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) const struct bpf_func_proto bpf_get_current_comm_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	.func		= bpf_get_current_comm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	.gpl_only	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	.ret_type	= RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	.arg2_type	= ARG_CONST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	arch_spinlock_t *l = (void *)lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		__u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		arch_spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	} u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	arch_spin_lock(l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	arch_spinlock_t *l = (void *)lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	arch_spin_unlock(l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	atomic_t *l = (void *)lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		atomic_cond_read_relaxed(l, !VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	} while (atomic_xchg(l, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	atomic_t *l = (void *)lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	atomic_set_release(l, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static DEFINE_PER_CPU(unsigned long, irqsave_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	__bpf_spin_lock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	__this_cpu_write(irqsave_flags, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) const struct bpf_func_proto bpf_spin_lock_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	.func		= bpf_spin_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	.gpl_only	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	.ret_type	= RET_VOID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	.arg1_type	= ARG_PTR_TO_SPIN_LOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	flags = __this_cpu_read(irqsave_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	__bpf_spin_unlock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) const struct bpf_func_proto bpf_spin_unlock_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	.func		= bpf_spin_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	.gpl_only	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	.ret_type	= RET_VOID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	.arg1_type	= ARG_PTR_TO_SPIN_LOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 			   bool lock_src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	struct bpf_spin_lock *lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	if (lock_src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		lock = src + map->spin_lock_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		lock = dst + map->spin_lock_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	____bpf_spin_lock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	copy_map_value(map, dst, src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	____bpf_spin_unlock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) BPF_CALL_0(bpf_jiffies64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	return get_jiffies_64();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) const struct bpf_func_proto bpf_jiffies64_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	.func		= bpf_jiffies64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	.gpl_only	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	.ret_type	= RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) #ifdef CONFIG_CGROUPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) BPF_CALL_0(bpf_get_current_cgroup_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	struct cgroup *cgrp = task_dfl_cgroup(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	return cgroup_id(cgrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	.func		= bpf_get_current_cgroup_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	.gpl_only	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	.ret_type	= RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	struct cgroup *cgrp = task_dfl_cgroup(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	struct cgroup *ancestor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	ancestor = cgroup_ancestor(cgrp, ancestor_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	if (!ancestor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	return cgroup_id(ancestor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	.func		= bpf_get_current_ancestor_cgroup_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	.gpl_only	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	.ret_type	= RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	.arg1_type	= ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) #ifdef CONFIG_CGROUP_BPF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) DECLARE_PER_CPU(struct bpf_cgroup_storage_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	/* flags argument is not used now,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	 * but provides an ability to extend the API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	 * verifier checks that its value is correct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	struct bpf_cgroup_storage *storage = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		storage = this_cpu_read(bpf_cgroup_storage_info[i].storage[stype]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	if (stype == BPF_CGROUP_STORAGE_SHARED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		ptr = &READ_ONCE(storage->buf)->data[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		ptr = this_cpu_ptr(storage->percpu_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	return (unsigned long)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) const struct bpf_func_proto bpf_get_local_storage_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	.func		= bpf_get_local_storage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	.gpl_only	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	.ret_type	= RET_PTR_TO_MAP_VALUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	.arg1_type	= ARG_CONST_MAP_PTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	.arg2_type	= ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) #define BPF_STRTOX_BASE_MASK 0x1F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 			  unsigned long long *res, bool *is_negative)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	unsigned int base = flags & BPF_STRTOX_BASE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	const char *cur_buf = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	size_t cur_len = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	unsigned int consumed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	size_t val_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	char str[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	if (!buf || !buf_len || !res || !is_negative)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	if (base != 0 && base != 8 && base != 10 && base != 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	if (flags & ~BPF_STRTOX_BASE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	while (cur_buf < buf + buf_len && isspace(*cur_buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		++cur_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	*is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	if (*is_negative)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		++cur_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	consumed = cur_buf - buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	cur_len -= consumed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	if (!cur_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	cur_len = min(cur_len, sizeof(str) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	memcpy(str, cur_buf, cur_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	str[cur_len] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	cur_buf = str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	val_len = _parse_integer(cur_buf, base, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	if (val_len & KSTRTOX_OVERFLOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 		return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	if (val_len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	cur_buf += val_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	consumed += cur_buf - str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	return consumed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 			 long long *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	unsigned long long _res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	bool is_negative;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	if (is_negative) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		if ((long long)-_res > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 			return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		*res = -_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 		if ((long long)_res < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 			return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 		*res = _res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	   long *, res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	long long _res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	err = __bpf_strtoll(buf, buf_len, flags, &_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	if (_res != (long)_res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 		return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	*res = _res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) const struct bpf_func_proto bpf_strtol_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	.func		= bpf_strtol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	.gpl_only	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	.ret_type	= RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	.arg1_type	= ARG_PTR_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	.arg2_type	= ARG_CONST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	.arg3_type	= ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	.arg4_type	= ARG_PTR_TO_LONG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	   unsigned long *, res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	unsigned long long _res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	bool is_negative;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	if (is_negative)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	if (_res != (unsigned long)_res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	*res = _res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) const struct bpf_func_proto bpf_strtoul_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	.func		= bpf_strtoul,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	.gpl_only	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	.ret_type	= RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	.arg1_type	= ARG_PTR_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	.arg2_type	= ARG_CONST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	.arg3_type	= ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	.arg4_type	= ARG_PTR_TO_LONG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	   struct bpf_pidns_info *, nsdata, u32, size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	struct task_struct *task = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	struct pid_namespace *pidns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	if (unlikely(size != sizeof(struct bpf_pidns_info)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 		goto clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	if (unlikely((u64)(dev_t)dev != dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 		goto clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	if (unlikely(!task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 		goto clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	pidns = task_active_pid_ns(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	if (unlikely(!pidns)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 		err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 		goto clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	if (!ns_match(&pidns->ns, (dev_t)dev, ino))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 		goto clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	nsdata->pid = task_pid_nr_ns(task, pidns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	nsdata->tgid = task_tgid_nr_ns(task, pidns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) clear:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	memset((void *)nsdata, 0, (size_t) size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	.func		= bpf_get_ns_current_pid_tgid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	.gpl_only	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	.ret_type	= RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	.arg1_type	= ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	.arg2_type	= ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	.arg3_type      = ARG_PTR_TO_UNINIT_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	.arg4_type      = ARG_CONST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	.func		= bpf_get_raw_cpu_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	.gpl_only	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	.ret_type	= RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	   u64, flags, void *, data, u64, size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) const struct bpf_func_proto bpf_event_output_data_proto =  {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	.func		= bpf_event_output_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	.gpl_only       = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	.ret_type       = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	.arg1_type      = ARG_PTR_TO_CTX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	.arg2_type      = ARG_CONST_MAP_PTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	.arg3_type      = ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	.arg4_type      = ARG_PTR_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	.arg5_type      = ARG_CONST_SIZE_OR_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	   const void __user *, user_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	int ret = copy_from_user(dst, user_ptr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	if (unlikely(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 		memset(dst, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 		ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) const struct bpf_func_proto bpf_copy_from_user_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	.func		= bpf_copy_from_user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	.gpl_only	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	.ret_type	= RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	.arg3_type	= ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	if (cpu >= nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 		return (unsigned long)NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 	.func		= bpf_per_cpu_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	.gpl_only	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	.ret_type	= RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	.arg1_type	= ARG_PTR_TO_PERCPU_BTF_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	.arg2_type	= ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	.func		= bpf_this_cpu_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	.gpl_only	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	.ret_type	= RET_PTR_TO_MEM_OR_BTF_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	.arg1_type	= ARG_PTR_TO_PERCPU_BTF_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) const struct bpf_func_proto bpf_get_current_task_proto __weak;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) const struct bpf_func_proto bpf_probe_read_user_proto __weak;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) const struct bpf_func_proto *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) bpf_base_func_proto(enum bpf_func_id func_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 	switch (func_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	case BPF_FUNC_map_lookup_elem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 		return &bpf_map_lookup_elem_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	case BPF_FUNC_map_update_elem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 		return &bpf_map_update_elem_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	case BPF_FUNC_map_delete_elem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 		return &bpf_map_delete_elem_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	case BPF_FUNC_map_push_elem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 		return &bpf_map_push_elem_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 	case BPF_FUNC_map_pop_elem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 		return &bpf_map_pop_elem_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 	case BPF_FUNC_map_peek_elem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 		return &bpf_map_peek_elem_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 	case BPF_FUNC_get_prandom_u32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 		return &bpf_get_prandom_u32_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 	case BPF_FUNC_get_smp_processor_id:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 		return &bpf_get_raw_smp_processor_id_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	case BPF_FUNC_get_numa_node_id:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 		return &bpf_get_numa_node_id_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	case BPF_FUNC_tail_call:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 		return &bpf_tail_call_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 	case BPF_FUNC_ktime_get_ns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 		return &bpf_ktime_get_ns_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	case BPF_FUNC_ktime_get_boot_ns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 		return &bpf_ktime_get_boot_ns_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	case BPF_FUNC_ringbuf_output:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 		return &bpf_ringbuf_output_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	case BPF_FUNC_ringbuf_reserve:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 		return &bpf_ringbuf_reserve_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 	case BPF_FUNC_ringbuf_submit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 		return &bpf_ringbuf_submit_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	case BPF_FUNC_ringbuf_discard:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 		return &bpf_ringbuf_discard_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	case BPF_FUNC_ringbuf_query:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 		return &bpf_ringbuf_query_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 	if (!bpf_capable())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 	switch (func_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 	case BPF_FUNC_spin_lock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 		return &bpf_spin_lock_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 	case BPF_FUNC_spin_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 		return &bpf_spin_unlock_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 	case BPF_FUNC_jiffies64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 		return &bpf_jiffies64_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 	case BPF_FUNC_per_cpu_ptr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 		return &bpf_per_cpu_ptr_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	case BPF_FUNC_this_cpu_ptr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 		return &bpf_this_cpu_ptr_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 	if (!perfmon_capable())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 	switch (func_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 	case BPF_FUNC_trace_printk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 		return bpf_get_trace_printk_proto();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 	case BPF_FUNC_get_current_task:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 		return &bpf_get_current_task_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 	case BPF_FUNC_probe_read_user:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 		return &bpf_probe_read_user_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 	case BPF_FUNC_probe_read_kernel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 		return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 		       NULL : &bpf_probe_read_kernel_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 	case BPF_FUNC_probe_read_user_str:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 		return &bpf_probe_read_user_str_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 	case BPF_FUNC_probe_read_kernel_str:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 		return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 		       NULL : &bpf_probe_read_kernel_str_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	case BPF_FUNC_snprintf_btf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 		return &bpf_snprintf_btf_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }