^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2016 Facebook
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <uapi/linux/btf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/btf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/bpf_verifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/filter.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <net/netlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/stringify.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/bsearch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/sort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/error-injection.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/bpf_lsm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/btf_ids.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "disasm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) [_id] = & _name ## _verifier_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define BPF_MAP_TYPE(_id, _ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define BPF_LINK_TYPE(_id, _name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/bpf_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #undef BPF_PROG_TYPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #undef BPF_MAP_TYPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #undef BPF_LINK_TYPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /* bpf_check() is a static code analyzer that walks eBPF program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * instruction by instruction and updates register/stack state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * All paths of conditional branches are analyzed until 'bpf_exit' insn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * The first pass is depth-first-search to check that the program is a DAG.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * It rejects the following programs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * - larger than BPF_MAXINSNS insns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * - if loop is present (detected via back-edge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * - unreachable insns exist (shouldn't be a forest. program = one function)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * - out of bounds or malformed jumps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * The second pass is all possible path descent from the 1st insn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * Since it's analyzing all pathes through the program, the length of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * analysis is limited to 64k insn, which may be hit even if total number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * insn is less then 4K, but there are too many branches that change stack/regs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * Number of 'branches to be analyzed' is limited to 1k
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * On entry to each instruction, each register has a type, and the instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * changes the types of the registers depending on instruction semantics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * copied to R1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * All registers are 64-bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * R0 - return register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * R1-R5 argument passing registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * R6-R9 callee saved registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * R10 - frame pointer read-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * At the start of BPF program the register R1 contains a pointer to bpf_context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * and has type PTR_TO_CTX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * Verifier tracks arithmetic operations on pointers in case:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * 1st insn copies R10 (which has FRAME_PTR) type into R1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * and 2nd arithmetic instruction is pattern matched to recognize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * that it wants to construct a pointer to some element within stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * So after 2nd insn, the register R1 has type PTR_TO_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * (and -20 constant is saved for further stack bounds checking).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * Meaning that this reg is a pointer to stack plus known immediate constant.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * Most of the time the registers have SCALAR_VALUE type, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * means the register has some value, but it's not a valid pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * (like pointer plus pointer becomes SCALAR_VALUE type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * When verifier sees load or store instructions the type of base register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * four pointer types recognized by check_mem_access() function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * and the range of [ptr, ptr + map's value_size) is accessible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * registers used to pass values to function calls are checked against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * function argument constraints.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * It means that the register type passed to this function must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * PTR_TO_STACK and it will be used inside the function as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * 'pointer to map element key'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * For example the argument constraints for bpf_map_lookup_elem():
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * .arg1_type = ARG_CONST_MAP_PTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * .arg2_type = ARG_PTR_TO_MAP_KEY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * ret_type says that this function returns 'pointer to map elem value or null'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * function expects 1st argument to be a const pointer to 'struct bpf_map' and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * 2nd argument should be a pointer to stack, which will be used inside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * the helper function as a pointer to map element key.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * On the kernel side the helper function looks like:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * void *key = (void *) (unsigned long) r2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * void *value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * here kernel can access 'key' and 'map' pointers safely, knowing that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * [key, key + map->key_size) bytes are valid and were initialized on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * the stack of eBPF program.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * Corresponding eBPF program may look like:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * here verifier looks at prototype of map_lookup_elem() and sees:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * and were initialized prior to this call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * If it's ok, then verifier allows this BPF_CALL insn and looks at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * returns ether pointer to map value or NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * insn, the register holding that pointer in the true branch changes state to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * branch. See check_cond_jmp_op().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * After the call R0 is set to return type of the function and registers R1-R5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * are set to NOT_INIT to indicate that they are no longer readable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * The following reference types represent a potential reference to a kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * resource which, after first being allocated, must be checked and freed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * the BPF program:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * When the verifier sees a helper call return a reference type, it allocates a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * pointer id for the reference and stores it in the current function state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * passes through a NULL-check conditional. For the branch wherein the state is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * changed to CONST_IMM, the verifier releases the reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * For each helper function that allocates a reference, such as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * bpf_sk_release(). When a reference type passes into the release function,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * the verifier also releases the reference. If any unchecked or unreleased
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * reference remains at the end of the program, the verifier rejects it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* verifier_state + insn_idx are pushed to stack when branch is encountered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct bpf_verifier_stack_elem {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* verifer state is 'st'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * before processing instruction 'insn_idx'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * and after processing instruction 'prev_insn_idx'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct bpf_verifier_state st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) int insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) int prev_insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct bpf_verifier_stack_elem *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /* length of verifier log at the time this state was pushed on stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) u32 log_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #define BPF_COMPLEXITY_LIMIT_STATES 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define BPF_MAP_KEY_POISON (1ULL << 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #define BPF_MAP_KEY_SEEN (1ULL << 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #define BPF_MAP_PTR_UNPRIV 1UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) POISON_POINTER_DELTA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) const struct bpf_map *map, bool unpriv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) unpriv |= bpf_map_ptr_unpriv(aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) aux->map_ptr_state = (unsigned long)map |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return aux->map_key_state & BPF_MAP_KEY_POISON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return !(aux->map_key_state & BPF_MAP_KEY_SEEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) bool poisoned = bpf_map_key_poisoned(aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) aux->map_key_state = state | BPF_MAP_KEY_SEEN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) (poisoned ? BPF_MAP_KEY_POISON : 0ULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct bpf_call_arg_meta {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct bpf_map *map_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) bool raw_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) bool pkt_access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) int regno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) int access_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) int mem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) u64 msize_max_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) int ref_obj_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) int func_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) u32 btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) u32 ret_btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct btf *btf_vmlinux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static DEFINE_MUTEX(bpf_verifier_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static const struct bpf_line_info *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) const struct bpf_line_info *linfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) const struct bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) u32 i, nr_linfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) prog = env->prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) nr_linfo = prog->aux->nr_linfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (!nr_linfo || insn_off >= prog->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) linfo = prog->aux->linfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) for (i = 1; i < nr_linfo; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (insn_off < linfo[i].insn_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return &linfo[i - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) va_list args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) unsigned int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) "verifier log line truncated - local buffer too short\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) n = min(log->len_total - log->len_used - 1, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) log->kbuf[n] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (log->level == BPF_LOG_KERNEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) pr_err("BPF:%s\n", log->kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) log->len_used += n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) log->ubuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static void bpf_vlog_reset(struct bpf_verifier_log *log, u32 new_pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) char zero = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (!bpf_verifier_log_needed(log))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) log->len_used = new_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (put_user(zero, log->ubuf + new_pos))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) log->ubuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* log_level controls verbosity level of eBPF verifier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * bpf_verifier_log_write() is used to dump the verification trace to the log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * so the user can figure out what's wrong with the program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) const char *fmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) va_list args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (!bpf_verifier_log_needed(&env->log))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) va_start(args, fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) bpf_verifier_vlog(&env->log, fmt, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) va_end(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct bpf_verifier_env *env = private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) va_list args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (!bpf_verifier_log_needed(&env->log))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) va_start(args, fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) bpf_verifier_vlog(&env->log, fmt, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) va_end(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) __printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) const char *fmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) va_list args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (!bpf_verifier_log_needed(log))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) va_start(args, fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) bpf_verifier_vlog(log, fmt, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) va_end(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static const char *ltrim(const char *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) while (isspace(*s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) s++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) u32 insn_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) const char *prefix_fmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) const struct bpf_line_info *linfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (!bpf_verifier_log_needed(&env->log))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) linfo = find_linfo(env, insn_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (!linfo || linfo == env->prev_linfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (prefix_fmt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) va_list args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) va_start(args, prefix_fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) bpf_verifier_vlog(&env->log, prefix_fmt, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) va_end(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) verbose(env, "%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) ltrim(btf_name_by_offset(env->prog->aux->btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) linfo->line_off)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) env->prev_linfo = linfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static bool type_is_pkt_pointer(enum bpf_reg_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return type == PTR_TO_PACKET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) type == PTR_TO_PACKET_META;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static bool type_is_sk_pointer(enum bpf_reg_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return type == PTR_TO_SOCKET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) type == PTR_TO_SOCK_COMMON ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) type == PTR_TO_TCP_SOCK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) type == PTR_TO_XDP_SOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) static bool reg_type_not_null(enum bpf_reg_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return type == PTR_TO_SOCKET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) type == PTR_TO_TCP_SOCK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) type == PTR_TO_MAP_VALUE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) type == PTR_TO_SOCK_COMMON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static bool reg_type_may_be_null(enum bpf_reg_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return type == PTR_TO_MAP_VALUE_OR_NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) type == PTR_TO_SOCKET_OR_NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) type == PTR_TO_SOCK_COMMON_OR_NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) type == PTR_TO_TCP_SOCK_OR_NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) type == PTR_TO_BTF_ID_OR_NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) type == PTR_TO_MEM_OR_NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) type == PTR_TO_RDONLY_BUF_OR_NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) type == PTR_TO_RDWR_BUF_OR_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return reg->type == PTR_TO_MAP_VALUE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) map_value_has_spin_lock(reg->map_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return type == PTR_TO_SOCKET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) type == PTR_TO_SOCKET_OR_NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) type == PTR_TO_TCP_SOCK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) type == PTR_TO_TCP_SOCK_OR_NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) type == PTR_TO_MEM ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) type == PTR_TO_MEM_OR_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) return type == ARG_PTR_TO_SOCK_COMMON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) static bool arg_type_may_be_null(enum bpf_arg_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return type == ARG_PTR_TO_MAP_VALUE_OR_NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) type == ARG_PTR_TO_MEM_OR_NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) type == ARG_PTR_TO_CTX_OR_NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) type == ARG_PTR_TO_SOCKET_OR_NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) type == ARG_PTR_TO_ALLOC_MEM_OR_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /* Determine whether the function releases some resources allocated by another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * function call. The first reference type argument will be assumed to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * released by release_reference().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) static bool is_release_function(enum bpf_func_id func_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return func_id == BPF_FUNC_sk_release ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) func_id == BPF_FUNC_ringbuf_submit ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) func_id == BPF_FUNC_ringbuf_discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) static bool may_be_acquire_function(enum bpf_func_id func_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return func_id == BPF_FUNC_sk_lookup_tcp ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) func_id == BPF_FUNC_sk_lookup_udp ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) func_id == BPF_FUNC_skc_lookup_tcp ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) func_id == BPF_FUNC_map_lookup_elem ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) func_id == BPF_FUNC_ringbuf_reserve;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) static bool is_acquire_function(enum bpf_func_id func_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) const struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (func_id == BPF_FUNC_sk_lookup_tcp ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) func_id == BPF_FUNC_sk_lookup_udp ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) func_id == BPF_FUNC_skc_lookup_tcp ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) func_id == BPF_FUNC_ringbuf_reserve)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (func_id == BPF_FUNC_map_lookup_elem &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) (map_type == BPF_MAP_TYPE_SOCKMAP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) map_type == BPF_MAP_TYPE_SOCKHASH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) static bool is_ptr_cast_function(enum bpf_func_id func_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) return func_id == BPF_FUNC_tcp_sock ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) func_id == BPF_FUNC_sk_fullsock ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) func_id == BPF_FUNC_skc_to_tcp_sock ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) func_id == BPF_FUNC_skc_to_tcp6_sock ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) func_id == BPF_FUNC_skc_to_udp6_sock ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) func_id == BPF_FUNC_skc_to_tcp_timewait_sock ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) func_id == BPF_FUNC_skc_to_tcp_request_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /* string representation of 'enum bpf_reg_type' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) static const char * const reg_type_str[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) [NOT_INIT] = "?",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) [SCALAR_VALUE] = "inv",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) [PTR_TO_CTX] = "ctx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) [CONST_PTR_TO_MAP] = "map_ptr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) [PTR_TO_MAP_VALUE] = "map_value",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) [PTR_TO_STACK] = "fp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) [PTR_TO_PACKET] = "pkt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) [PTR_TO_PACKET_META] = "pkt_meta",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) [PTR_TO_PACKET_END] = "pkt_end",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) [PTR_TO_FLOW_KEYS] = "flow_keys",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) [PTR_TO_SOCKET] = "sock",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) [PTR_TO_SOCKET_OR_NULL] = "sock_or_null",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) [PTR_TO_SOCK_COMMON] = "sock_common",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) [PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) [PTR_TO_TCP_SOCK] = "tcp_sock",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) [PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) [PTR_TO_TP_BUFFER] = "tp_buffer",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) [PTR_TO_XDP_SOCK] = "xdp_sock",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) [PTR_TO_BTF_ID] = "ptr_",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) [PTR_TO_BTF_ID_OR_NULL] = "ptr_or_null_",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) [PTR_TO_PERCPU_BTF_ID] = "percpu_ptr_",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) [PTR_TO_MEM] = "mem",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) [PTR_TO_MEM_OR_NULL] = "mem_or_null",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) [PTR_TO_RDONLY_BUF] = "rdonly_buf",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) [PTR_TO_RDONLY_BUF_OR_NULL] = "rdonly_buf_or_null",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) [PTR_TO_RDWR_BUF] = "rdwr_buf",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) [PTR_TO_RDWR_BUF_OR_NULL] = "rdwr_buf_or_null",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) static char slot_type_char[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) [STACK_INVALID] = '?',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) [STACK_SPILL] = 'r',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) [STACK_MISC] = 'm',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) [STACK_ZERO] = '0',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) static void print_liveness(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) enum bpf_reg_liveness live)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) verbose(env, "_");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (live & REG_LIVE_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) verbose(env, "r");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (live & REG_LIVE_WRITTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) verbose(env, "w");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (live & REG_LIVE_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) verbose(env, "D");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static struct bpf_func_state *func(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) const struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct bpf_verifier_state *cur = env->cur_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) return cur->frame[reg->frameno];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) const char *kernel_type_name(u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) return btf_name_by_offset(btf_vmlinux,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) btf_type_by_id(btf_vmlinux, id)->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) static void print_verifier_state(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) const struct bpf_func_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) const struct bpf_reg_state *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) enum bpf_reg_type t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (state->frameno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) verbose(env, " frame%d:", state->frameno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) for (i = 0; i < MAX_BPF_REG; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) reg = &state->regs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) t = reg->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (t == NOT_INIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) verbose(env, " R%d", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) print_liveness(env, reg->live);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) verbose(env, "=%s", reg_type_str[t]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (t == SCALAR_VALUE && reg->precise)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) verbose(env, "P");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) tnum_is_const(reg->var_off)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /* reg->off should be 0 for SCALAR_VALUE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) verbose(env, "%lld", reg->var_off.value + reg->off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (t == PTR_TO_BTF_ID ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) t == PTR_TO_BTF_ID_OR_NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) t == PTR_TO_PERCPU_BTF_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) verbose(env, "%s", kernel_type_name(reg->btf_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) verbose(env, "(id=%d", reg->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (reg_type_may_be_refcounted_or_null(t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) verbose(env, ",ref_obj_id=%d", reg->ref_obj_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (t != SCALAR_VALUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) verbose(env, ",off=%d", reg->off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (type_is_pkt_pointer(t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) verbose(env, ",r=%d", reg->range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) else if (t == CONST_PTR_TO_MAP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) t == PTR_TO_MAP_VALUE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) t == PTR_TO_MAP_VALUE_OR_NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) verbose(env, ",ks=%d,vs=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) reg->map_ptr->key_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) reg->map_ptr->value_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (tnum_is_const(reg->var_off)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) /* Typically an immediate SCALAR_VALUE, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * could be a pointer whose offset is too big
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * for reg->off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) verbose(env, ",imm=%llx", reg->var_off.value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (reg->smin_value != reg->umin_value &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) reg->smin_value != S64_MIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) verbose(env, ",smin_value=%lld",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) (long long)reg->smin_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (reg->smax_value != reg->umax_value &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) reg->smax_value != S64_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) verbose(env, ",smax_value=%lld",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) (long long)reg->smax_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (reg->umin_value != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) verbose(env, ",umin_value=%llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) (unsigned long long)reg->umin_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (reg->umax_value != U64_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) verbose(env, ",umax_value=%llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) (unsigned long long)reg->umax_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (!tnum_is_unknown(reg->var_off)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) char tn_buf[48];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) verbose(env, ",var_off=%s", tn_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (reg->s32_min_value != reg->smin_value &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) reg->s32_min_value != S32_MIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) verbose(env, ",s32_min_value=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) (int)(reg->s32_min_value));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (reg->s32_max_value != reg->smax_value &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) reg->s32_max_value != S32_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) verbose(env, ",s32_max_value=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) (int)(reg->s32_max_value));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (reg->u32_min_value != reg->umin_value &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) reg->u32_min_value != U32_MIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) verbose(env, ",u32_min_value=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) (int)(reg->u32_min_value));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (reg->u32_max_value != reg->umax_value &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) reg->u32_max_value != U32_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) verbose(env, ",u32_max_value=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) (int)(reg->u32_max_value));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) verbose(env, ")");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) char types_buf[BPF_REG_SIZE + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) bool valid = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) for (j = 0; j < BPF_REG_SIZE; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (state->stack[i].slot_type[j] != STACK_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) valid = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) types_buf[j] = slot_type_char[
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) state->stack[i].slot_type[j]];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) types_buf[BPF_REG_SIZE] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (!valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) print_liveness(env, state->stack[i].spilled_ptr.live);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (state->stack[i].slot_type[0] == STACK_SPILL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) reg = &state->stack[i].spilled_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) t = reg->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) verbose(env, "=%s", reg_type_str[t]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (t == SCALAR_VALUE && reg->precise)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) verbose(env, "P");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) verbose(env, "%lld", reg->var_off.value + reg->off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) verbose(env, "=%s", types_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (state->acquired_refs && state->refs[0].id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) verbose(env, " refs=%d", state->refs[0].id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) for (i = 1; i < state->acquired_refs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (state->refs[i].id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) verbose(env, ",%d", state->refs[i].id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) verbose(env, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) #define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) static int copy_##NAME##_state(struct bpf_func_state *dst, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) const struct bpf_func_state *src) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (!src->FIELD) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) return 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) /* internal bug, make state invalid to reject the program */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) memset(dst, 0, sizeof(*dst)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) return -EFAULT; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) memcpy(dst->FIELD, src->FIELD, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) sizeof(*src->FIELD) * (src->COUNT / SIZE)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) /* copy_reference_state() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) COPY_STATE_FN(reference, acquired_refs, refs, 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) /* copy_stack_state() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) #undef COPY_STATE_FN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) #define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) bool copy_old) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) u32 old_size = state->COUNT; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct bpf_##NAME##_state *new_##FIELD; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) int slot = size / SIZE; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (size <= old_size || !size) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (copy_old) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) return 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) state->COUNT = slot * SIZE; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (!size && old_size) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) kfree(state->FIELD); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) state->FIELD = NULL; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) return 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) GFP_KERNEL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (!new_##FIELD) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) return -ENOMEM; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (copy_old) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (state->FIELD) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) memcpy(new_##FIELD, state->FIELD, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) sizeof(*new_##FIELD) * (old_size / SIZE)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) memset(new_##FIELD + old_size / SIZE, 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) sizeof(*new_##FIELD) * (size - old_size) / SIZE); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) state->COUNT = slot * SIZE; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) kfree(state->FIELD); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) state->FIELD = new_##FIELD; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) /* realloc_reference_state() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) REALLOC_STATE_FN(reference, acquired_refs, refs, 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) /* realloc_stack_state() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) #undef REALLOC_STATE_FN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) /* do_check() starts with zero-sized stack in struct bpf_verifier_state to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * make it consume minimal amount of memory. check_stack_write() access from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * the program calls into realloc_func_state() to grow the stack size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * Note there is a non-zero 'parent' pointer inside bpf_verifier_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * which realloc_stack_state() copies over. It points to previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * bpf_verifier_state which is never reallocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) static int realloc_func_state(struct bpf_func_state *state, int stack_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) int refs_size, bool copy_old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) int err = realloc_reference_state(state, refs_size, copy_old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) return realloc_stack_state(state, stack_size, copy_old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /* Acquire a pointer id from the env and update the state->refs to include
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * this new pointer reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * On success, returns a valid pointer id to associate with the register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * On failure, returns a negative errno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) struct bpf_func_state *state = cur_func(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) int new_ofs = state->acquired_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) int id, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) err = realloc_reference_state(state, state->acquired_refs + 1, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) id = ++env->id_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) state->refs[new_ofs].id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) state->refs[new_ofs].insn_idx = insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) return id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) /* release function corresponding to acquire_reference_state(). Idempotent. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) static int release_reference_state(struct bpf_func_state *state, int ptr_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) int i, last_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) last_idx = state->acquired_refs - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) for (i = 0; i < state->acquired_refs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (state->refs[i].id == ptr_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (last_idx && i != last_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) memcpy(&state->refs[i], &state->refs[last_idx],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) sizeof(*state->refs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) memset(&state->refs[last_idx], 0, sizeof(*state->refs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) state->acquired_refs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) static int transfer_reference_state(struct bpf_func_state *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) struct bpf_func_state *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) int err = realloc_reference_state(dst, src->acquired_refs, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) err = copy_reference_state(dst, src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) static void free_func_state(struct bpf_func_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (!state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) kfree(state->refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) kfree(state->stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) kfree(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) static void clear_jmp_history(struct bpf_verifier_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) kfree(state->jmp_history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) state->jmp_history = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) state->jmp_history_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) static void free_verifier_state(struct bpf_verifier_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) bool free_self)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) for (i = 0; i <= state->curframe; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) free_func_state(state->frame[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) state->frame[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) clear_jmp_history(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (free_self)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) kfree(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) /* copy verifier state from src to dst growing dst stack space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * when necessary to accommodate larger src stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) static int copy_func_state(struct bpf_func_state *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) const struct bpf_func_state *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) err = copy_reference_state(dst, src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) return copy_stack_state(dst, src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) static int copy_verifier_state(struct bpf_verifier_state *dst_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) const struct bpf_verifier_state *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) struct bpf_func_state *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) u32 jmp_sz = sizeof(struct bpf_idx_pair) * src->jmp_history_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (dst_state->jmp_history_cnt < src->jmp_history_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) kfree(dst_state->jmp_history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) dst_state->jmp_history = kmalloc(jmp_sz, GFP_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (!dst_state->jmp_history)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) memcpy(dst_state->jmp_history, src->jmp_history, jmp_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) dst_state->jmp_history_cnt = src->jmp_history_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) /* if dst has more stack frames then src frame, free them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) free_func_state(dst_state->frame[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) dst_state->frame[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) dst_state->speculative = src->speculative;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) dst_state->curframe = src->curframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) dst_state->active_spin_lock = src->active_spin_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) dst_state->branches = src->branches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) dst_state->parent = src->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) dst_state->first_insn_idx = src->first_insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) dst_state->last_insn_idx = src->last_insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) for (i = 0; i <= src->curframe; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) dst = dst_state->frame[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (!dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) dst = kzalloc(sizeof(*dst), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (!dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) dst_state->frame[i] = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) err = copy_func_state(dst, src->frame[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) while (st) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) u32 br = --st->branches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) /* WARN_ON(br > 1) technically makes sense here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * but see comment in push_stack(), hence:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) WARN_ONCE((int)br < 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) "BUG update_branch_counts:branches_to_explore=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) br);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (br)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) st = st->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) int *insn_idx, bool pop_log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) struct bpf_verifier_state *cur = env->cur_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) struct bpf_verifier_stack_elem *elem, *head = env->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) if (env->head == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if (cur) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) err = copy_verifier_state(cur, &head->st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (pop_log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) bpf_vlog_reset(&env->log, head->log_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (insn_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) *insn_idx = head->insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (prev_insn_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) *prev_insn_idx = head->prev_insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) elem = head->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) free_verifier_state(&head->st, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) kfree(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) env->head = elem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) env->stack_size--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) int insn_idx, int prev_insn_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) bool speculative)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) struct bpf_verifier_state *cur = env->cur_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) struct bpf_verifier_stack_elem *elem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (!elem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) elem->insn_idx = insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) elem->prev_insn_idx = prev_insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) elem->next = env->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) elem->log_pos = env->log.len_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) env->head = elem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) env->stack_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) err = copy_verifier_state(&elem->st, cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) elem->st.speculative |= speculative;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) verbose(env, "The sequence of %d jumps is too complex.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) env->stack_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (elem->st.parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) ++elem->st.parent->branches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) /* WARN_ON(branches > 2) technically makes sense here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) * but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * 1. speculative states will bump 'branches' for non-branch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) * instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * 2. is_state_visited() heuristics may decide not to create
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) * a new state for a sequence of branches and all such current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * and cloned states will be pointing to a single parent state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * which might have large 'branches' count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) return &elem->st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) free_verifier_state(env->cur_state, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) env->cur_state = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) /* pop all elements and return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) while (!pop_stack(env, NULL, NULL, false));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) #define CALLER_SAVED_REGS 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) static const int caller_saved[CALLER_SAVED_REGS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) static void __mark_reg_not_init(const struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) struct bpf_reg_state *reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) /* This helper doesn't clear reg->id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) reg->var_off = tnum_const(imm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) reg->smin_value = (s64)imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) reg->smax_value = (s64)imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) reg->umin_value = imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) reg->umax_value = imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) reg->s32_min_value = (s32)imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) reg->s32_max_value = (s32)imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) reg->u32_min_value = (u32)imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) reg->u32_max_value = (u32)imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) /* Mark the unknown part of a register (variable offset or scalar value) as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * known to have the value @imm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) /* Clear id, off, and union(map_ptr, range) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) memset(((u8 *)reg) + sizeof(reg->type), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) ___mark_reg_known(reg, imm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) reg->var_off = tnum_const_subreg(reg->var_off, imm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) reg->s32_min_value = (s32)imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) reg->s32_max_value = (s32)imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) reg->u32_min_value = (u32)imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) reg->u32_max_value = (u32)imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) /* Mark the 'variable offset' part of a register as zero. This should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * used only on registers holding a pointer type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) static void __mark_reg_known_zero(struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) __mark_reg_known(reg, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) static void __mark_reg_const_zero(struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) __mark_reg_known(reg, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) reg->type = SCALAR_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) static void mark_reg_known_zero(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) struct bpf_reg_state *regs, u32 regno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (WARN_ON(regno >= MAX_BPF_REG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) /* Something bad happened, let's kill all regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) for (regno = 0; regno < MAX_BPF_REG; regno++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) __mark_reg_not_init(env, regs + regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) __mark_reg_known_zero(regs + regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) return type_is_pkt_pointer(reg->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) return reg_is_pkt_pointer(reg) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) reg->type == PTR_TO_PACKET_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) enum bpf_reg_type which)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) /* The register can already have a range from prior markings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) * This is fine as long as it hasn't been advanced from its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * origin.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return reg->type == which &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) reg->id == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) reg->off == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) tnum_equals_const(reg->var_off, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) /* Reset the min/max bounds of a register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) static void __mark_reg_unbounded(struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) reg->smin_value = S64_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) reg->smax_value = S64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) reg->umin_value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) reg->umax_value = U64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) reg->s32_min_value = S32_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) reg->s32_max_value = S32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) reg->u32_min_value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) reg->u32_max_value = U32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) static void __mark_reg64_unbounded(struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) reg->smin_value = S64_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) reg->smax_value = S64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) reg->umin_value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) reg->umax_value = U64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) static void __mark_reg32_unbounded(struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) reg->s32_min_value = S32_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) reg->s32_max_value = S32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) reg->u32_min_value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) reg->u32_max_value = U32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) static void __update_reg32_bounds(struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) struct tnum var32_off = tnum_subreg(reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) /* min signed is max(sign bit) | min(other bits) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) reg->s32_min_value = max_t(s32, reg->s32_min_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) var32_off.value | (var32_off.mask & S32_MIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) /* max signed is min(sign bit) | max(other bits) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) reg->s32_max_value = min_t(s32, reg->s32_max_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) var32_off.value | (var32_off.mask & S32_MAX));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) reg->u32_max_value = min(reg->u32_max_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) (u32)(var32_off.value | var32_off.mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) static void __update_reg64_bounds(struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) /* min signed is max(sign bit) | min(other bits) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) reg->smin_value = max_t(s64, reg->smin_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) reg->var_off.value | (reg->var_off.mask & S64_MIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) /* max signed is min(sign bit) | max(other bits) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) reg->smax_value = min_t(s64, reg->smax_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) reg->var_off.value | (reg->var_off.mask & S64_MAX));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) reg->umin_value = max(reg->umin_value, reg->var_off.value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) reg->umax_value = min(reg->umax_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) reg->var_off.value | reg->var_off.mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) static void __update_reg_bounds(struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) __update_reg32_bounds(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) __update_reg64_bounds(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) /* Uses signed min/max values to inform unsigned, and vice-versa */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) /* Learn sign from signed bounds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * If we cannot cross the sign boundary, then signed and unsigned bounds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * are the same, so combine. This works even in the negative case, e.g.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) reg->s32_min_value = reg->u32_min_value =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) max_t(u32, reg->s32_min_value, reg->u32_min_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) reg->s32_max_value = reg->u32_max_value =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) min_t(u32, reg->s32_max_value, reg->u32_max_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /* Learn sign from unsigned bounds. Signed bounds cross the sign
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * boundary, so we must be careful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if ((s32)reg->u32_max_value >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /* Positive. We can't learn anything from the smin, but smax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * is positive, hence safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) reg->s32_min_value = reg->u32_min_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) reg->s32_max_value = reg->u32_max_value =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) min_t(u32, reg->s32_max_value, reg->u32_max_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) } else if ((s32)reg->u32_min_value < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) /* Negative. We can't learn anything from the smax, but smin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) * is negative, hence safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) reg->s32_min_value = reg->u32_min_value =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) max_t(u32, reg->s32_min_value, reg->u32_min_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) reg->s32_max_value = reg->u32_max_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) static void __reg64_deduce_bounds(struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) /* Learn sign from signed bounds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) * If we cannot cross the sign boundary, then signed and unsigned bounds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * are the same, so combine. This works even in the negative case, e.g.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) if (reg->smin_value >= 0 || reg->smax_value < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) reg->umin_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) reg->umax_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) /* Learn sign from unsigned bounds. Signed bounds cross the sign
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) * boundary, so we must be careful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if ((s64)reg->umax_value >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) /* Positive. We can't learn anything from the smin, but smax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) * is positive, hence safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) reg->smin_value = reg->umin_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) reg->umax_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) } else if ((s64)reg->umin_value < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) /* Negative. We can't learn anything from the smax, but smin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * is negative, hence safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) reg->umin_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) reg->smax_value = reg->umax_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) static void __reg_deduce_bounds(struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) __reg32_deduce_bounds(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) __reg64_deduce_bounds(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) /* Attempts to improve var_off based on unsigned min/max information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) static void __reg_bound_offset(struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) struct tnum var64_off = tnum_intersect(reg->var_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) tnum_range(reg->umin_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) reg->umax_value));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) tnum_range(reg->u32_min_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) reg->u32_max_value));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) static bool __reg32_bound_s64(s32 a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) return a >= 0 && a <= S32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) reg->umin_value = reg->u32_min_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) reg->umax_value = reg->u32_max_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) /* Attempt to pull 32-bit signed bounds into 64-bit bounds but must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) * be positive otherwise set to worse case bounds and refine later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) * from tnum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) if (__reg32_bound_s64(reg->s32_min_value) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) __reg32_bound_s64(reg->s32_max_value)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) reg->smin_value = reg->s32_min_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) reg->smax_value = reg->s32_max_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) reg->smin_value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) reg->smax_value = U32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) /* special case when 64-bit register has upper 32-bit register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) * zeroed. Typically happens after zext or <<32, >>32 sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) * allowing us to use 32-bit bounds directly,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) __reg_assign_32_into_64(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) /* Otherwise the best we can do is push lower 32bit known and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) * unknown bits into register (var_off set from jmp logic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) * then learn as much as possible from the 64-bit tnum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) * known and unknown bits. The previous smin/smax bounds are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) * invalid here because of jmp32 compare so mark them unknown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) * so they do not impact tnum bounds calculation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) __mark_reg64_unbounded(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) __update_reg_bounds(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) /* Intersecting with the old var_off might have improved our bounds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) * then new var_off is (0; 0x7f...fc) which improves our umax.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) __reg_deduce_bounds(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) __reg_bound_offset(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) __update_reg_bounds(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) static bool __reg64_bound_s32(s64 a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) return a >= S32_MIN && a <= S32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) static bool __reg64_bound_u32(u64 a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) return a >= U32_MIN && a <= U32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) __mark_reg32_unbounded(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) reg->s32_min_value = (s32)reg->smin_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) reg->s32_max_value = (s32)reg->smax_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) reg->u32_min_value = (u32)reg->umin_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) reg->u32_max_value = (u32)reg->umax_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) /* Intersecting with the old var_off might have improved our bounds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) * then new var_off is (0; 0x7f...fc) which improves our umax.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) __reg_deduce_bounds(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) __reg_bound_offset(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) __update_reg_bounds(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) /* Mark a register as having a completely unknown (scalar) value. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) static void __mark_reg_unknown(const struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) * Clear type, id, off, and union(map_ptr, range) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) * padding between 'type' and union
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) reg->type = SCALAR_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) reg->var_off = tnum_unknown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) reg->frameno = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) reg->precise = env->subprog_cnt > 1 || !env->bpf_capable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) __mark_reg_unbounded(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) static void mark_reg_unknown(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) struct bpf_reg_state *regs, u32 regno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) if (WARN_ON(regno >= MAX_BPF_REG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) /* Something bad happened, let's kill all regs except FP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) for (regno = 0; regno < BPF_REG_FP; regno++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) __mark_reg_not_init(env, regs + regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) __mark_reg_unknown(env, regs + regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) static void __mark_reg_not_init(const struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) __mark_reg_unknown(env, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) reg->type = NOT_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) static void mark_reg_not_init(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) struct bpf_reg_state *regs, u32 regno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) if (WARN_ON(regno >= MAX_BPF_REG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) /* Something bad happened, let's kill all regs except FP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) for (regno = 0; regno < BPF_REG_FP; regno++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) __mark_reg_not_init(env, regs + regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) __mark_reg_not_init(env, regs + regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) static void mark_btf_ld_reg(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) struct bpf_reg_state *regs, u32 regno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) enum bpf_reg_type reg_type, u32 btf_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) if (reg_type == SCALAR_VALUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) mark_reg_unknown(env, regs, regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) mark_reg_known_zero(env, regs, regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) regs[regno].type = PTR_TO_BTF_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) regs[regno].btf_id = btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) #define DEF_NOT_SUBREG (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) static void init_reg_state(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) struct bpf_func_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) struct bpf_reg_state *regs = state->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) for (i = 0; i < MAX_BPF_REG; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) mark_reg_not_init(env, regs, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) regs[i].live = REG_LIVE_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) regs[i].parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) regs[i].subreg_def = DEF_NOT_SUBREG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) /* frame pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) regs[BPF_REG_FP].type = PTR_TO_STACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) mark_reg_known_zero(env, regs, BPF_REG_FP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) regs[BPF_REG_FP].frameno = state->frameno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) #define BPF_MAIN_FUNC (-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) static void init_func_state(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) struct bpf_func_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) int callsite, int frameno, int subprogno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) state->callsite = callsite;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) state->frameno = frameno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) state->subprogno = subprogno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) init_reg_state(env, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) enum reg_arg_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) SRC_OP, /* register is used as source operand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) DST_OP, /* register is used as destination operand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) DST_OP_NO_MARK /* same as above, check only, don't mark */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) static int cmp_subprogs(const void *a, const void *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) return ((struct bpf_subprog_info *)a)->start -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) ((struct bpf_subprog_info *)b)->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) static int find_subprog(struct bpf_verifier_env *env, int off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) struct bpf_subprog_info *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) p = bsearch(&off, env->subprog_info, env->subprog_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) sizeof(env->subprog_info[0]), cmp_subprogs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) return p - env->subprog_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) static int add_subprog(struct bpf_verifier_env *env, int off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) int insn_cnt = env->prog->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) if (off >= insn_cnt || off < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) verbose(env, "call to invalid destination\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) ret = find_subprog(env, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) verbose(env, "too many subprograms\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) env->subprog_info[env->subprog_cnt++].start = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) sort(env->subprog_info, env->subprog_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) static int check_subprogs(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) struct bpf_subprog_info *subprog = env->subprog_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) struct bpf_insn *insn = env->prog->insnsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) int insn_cnt = env->prog->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) /* Add entry function. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) ret = add_subprog(env, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) /* determine subprog starts. The end is one before the next starts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) for (i = 0; i < insn_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) if (insn[i].code != (BPF_JMP | BPF_CALL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) if (insn[i].src_reg != BPF_PSEUDO_CALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) if (!env->bpf_capable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) "function calls to other bpf functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) ret = add_subprog(env, i + insn[i].imm + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) /* Add a fake 'exit' subprog which could simplify subprog iteration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) * logic. 'subprog_cnt' should not be increased.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) subprog[env->subprog_cnt].start = insn_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) if (env->log.level & BPF_LOG_LEVEL2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) for (i = 0; i < env->subprog_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) verbose(env, "func#%d @%d\n", i, subprog[i].start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) /* now check that all jumps are within the same subprog */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) subprog_start = subprog[cur_subprog].start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) subprog_end = subprog[cur_subprog + 1].start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) for (i = 0; i < insn_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) u8 code = insn[i].code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) if (code == (BPF_JMP | BPF_CALL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) insn[i].imm == BPF_FUNC_tail_call &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) insn[i].src_reg != BPF_PSEUDO_CALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) subprog[cur_subprog].has_tail_call = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) if (BPF_CLASS(code) == BPF_LD &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) subprog[cur_subprog].has_ld_abs = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) off = i + insn[i].off + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) if (off < subprog_start || off >= subprog_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) verbose(env, "jump out of range from insn %d to %d\n", i, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) if (i == subprog_end - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) /* to avoid fall-through from one subprog into another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) * the last insn of the subprog should be either exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) * or unconditional jump back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) if (code != (BPF_JMP | BPF_EXIT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) code != (BPF_JMP | BPF_JA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) verbose(env, "last insn is not an exit or jmp\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) subprog_start = subprog_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) cur_subprog++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) if (cur_subprog < env->subprog_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) subprog_end = subprog[cur_subprog + 1].start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) /* Parentage chain of this register (or stack slot) should take care of all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) * issues like callee-saved registers, stack slot allocation time, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) static int mark_reg_read(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) const struct bpf_reg_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) struct bpf_reg_state *parent, u8 flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) bool writes = parent == state->parent; /* Observe write marks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) int cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) while (parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) /* if read wasn't screened by an earlier write ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (writes && state->live & REG_LIVE_WRITTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) if (parent->live & REG_LIVE_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) verbose(env, "verifier BUG type %s var_off %lld off %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) reg_type_str[parent->type],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) parent->var_off.value, parent->off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) /* The first condition is more likely to be true than the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) * second, checked it first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) if ((parent->live & REG_LIVE_READ) == flag ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) parent->live & REG_LIVE_READ64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) /* The parentage chain never changes and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) * this parent was already marked as LIVE_READ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) * There is no need to keep walking the chain again and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) * keep re-marking all parents as LIVE_READ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) * This case happens when the same register is read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) * multiple times without writes into it in-between.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) * Also, if parent has the stronger REG_LIVE_READ64 set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) * then no need to set the weak REG_LIVE_READ32.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) /* ... then we depend on parent's value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) parent->live |= flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) if (flag == REG_LIVE_READ64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) parent->live &= ~REG_LIVE_READ32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) state = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) parent = state->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) writes = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) if (env->longest_mark_read_walk < cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) env->longest_mark_read_walk = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) /* This function is supposed to be used by the following 32-bit optimization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) * code only. It returns TRUE if the source or destination register operates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) * on 64-bit, otherwise return FALSE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) u8 code, class, op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) code = insn->code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) class = BPF_CLASS(code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) op = BPF_OP(code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) if (class == BPF_JMP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) /* BPF_EXIT for "main" will reach here. Return TRUE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) * conservatively.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) if (op == BPF_EXIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) if (op == BPF_CALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) /* BPF to BPF call will reach here because of marking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) * caller saved clobber with DST_OP_NO_MARK for which we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) * don't care the register def because they are anyway
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) * marked as NOT_INIT already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) if (insn->src_reg == BPF_PSEUDO_CALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) /* Helper call will reach here because of arg type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) * check, conservatively return TRUE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) if (t == SRC_OP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) if (class == BPF_ALU64 || class == BPF_JMP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) /* BPF_END always use BPF_ALU class. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) (class == BPF_ALU && op == BPF_END && insn->imm == 64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) if (class == BPF_ALU || class == BPF_JMP32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) if (class == BPF_LDX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) if (t != SRC_OP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) return BPF_SIZE(code) == BPF_DW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) /* LDX source must be ptr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) if (class == BPF_STX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) if (reg->type != SCALAR_VALUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) return BPF_SIZE(code) == BPF_DW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) if (class == BPF_LD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) u8 mode = BPF_MODE(code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) /* LD_IMM64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) if (mode == BPF_IMM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) /* Both LD_IND and LD_ABS return 32-bit data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) if (t != SRC_OP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) /* Implicit ctx ptr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) if (regno == BPF_REG_6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) /* Explicit source could be any width. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) if (class == BPF_ST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) /* The only source register for BPF_ST is a ptr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) /* Conservatively return true at default. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) /* Return TRUE if INSN doesn't have explicit value define. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) static bool insn_no_def(struct bpf_insn *insn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) u8 class = BPF_CLASS(insn->code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) return (class == BPF_JMP || class == BPF_JMP32 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) class == BPF_STX || class == BPF_ST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) /* Return TRUE if INSN has defined any 32-bit value explicitly. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) if (insn_no_def(insn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) return !is_reg64(env, insn, insn->dst_reg, NULL, DST_OP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) static void mark_insn_zext(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) s32 def_idx = reg->subreg_def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) if (def_idx == DEF_NOT_SUBREG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) env->insn_aux_data[def_idx - 1].zext_dst = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) /* The dst will be zero extended, so won't be sub-register anymore. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) reg->subreg_def = DEF_NOT_SUBREG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) enum reg_arg_type t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) struct bpf_verifier_state *vstate = env->cur_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) struct bpf_func_state *state = vstate->frame[vstate->curframe];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) struct bpf_insn *insn = env->prog->insnsi + env->insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) struct bpf_reg_state *reg, *regs = state->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) bool rw64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) if (regno >= MAX_BPF_REG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) verbose(env, "R%d is invalid\n", regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) reg = ®s[regno];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) rw64 = is_reg64(env, insn, regno, reg, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) if (t == SRC_OP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) /* check whether register used as source operand can be read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) if (reg->type == NOT_INIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) verbose(env, "R%d !read_ok\n", regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) /* We don't need to worry about FP liveness because it's read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) if (regno == BPF_REG_FP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) if (rw64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) mark_insn_zext(env, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) return mark_reg_read(env, reg, reg->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) /* check whether register used as dest operand can be written to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) if (regno == BPF_REG_FP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) verbose(env, "frame pointer is read only\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) reg->live |= REG_LIVE_WRITTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) if (t == DST_OP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) mark_reg_unknown(env, regs, regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) /* for any branch, call, exit record the history of jmps in the given state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) static int push_jmp_history(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) struct bpf_verifier_state *cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) u32 cnt = cur->jmp_history_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) struct bpf_idx_pair *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) p[cnt - 1].idx = env->insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) p[cnt - 1].prev_idx = env->prev_insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) cur->jmp_history = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) cur->jmp_history_cnt = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) /* Backtrack one insn at a time. If idx is not at the top of recorded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) * history then previous instruction came from straight line execution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) u32 *history)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) u32 cnt = *history;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) if (cnt && st->jmp_history[cnt - 1].idx == i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) i = st->jmp_history[cnt - 1].prev_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) (*history)--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) /* For given verifier state backtrack_insn() is called from the last insn to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) * the first insn. Its purpose is to compute a bitmask of registers and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) * stack slots that needs precision in the parent verifier state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) static int backtrack_insn(struct bpf_verifier_env *env, int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) u32 *reg_mask, u64 *stack_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) const struct bpf_insn_cbs cbs = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) .cb_print = verbose,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) .private_data = env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) struct bpf_insn *insn = env->prog->insnsi + idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) u8 class = BPF_CLASS(insn->code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) u8 opcode = BPF_OP(insn->code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) u8 mode = BPF_MODE(insn->code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) u32 dreg = 1u << insn->dst_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) u32 sreg = 1u << insn->src_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) u32 spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) if (insn->code == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) if (env->log.level & BPF_LOG_LEVEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) verbose(env, "%d: ", idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) if (class == BPF_ALU || class == BPF_ALU64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) if (!(*reg_mask & dreg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) if (opcode == BPF_MOV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) if (BPF_SRC(insn->code) == BPF_X) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) /* dreg = sreg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) * dreg needs precision after this insn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) * sreg needs precision before this insn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) *reg_mask &= ~dreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) *reg_mask |= sreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) /* dreg = K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) * dreg needs precision after this insn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) * Corresponding register is already marked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) * as precise=true in this verifier state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) * No further markings in parent are necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) *reg_mask &= ~dreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) if (BPF_SRC(insn->code) == BPF_X) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) /* dreg += sreg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) * both dreg and sreg need precision
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) * before this insn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) *reg_mask |= sreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) } /* else dreg += K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) * dreg still needs precision before this insn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) } else if (class == BPF_LDX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) if (!(*reg_mask & dreg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) *reg_mask &= ~dreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) /* scalars can only be spilled into stack w/o losing precision.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) * Load from any other memory can be zero extended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) * The desire to keep that precision is already indicated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) * by 'precise' mark in corresponding register of this state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) * No further tracking necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) if (insn->src_reg != BPF_REG_FP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) if (BPF_SIZE(insn->code) != BPF_DW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) /* dreg = *(u64 *)[fp - off] was a fill from the stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) * that [fp - off] slot contains scalar that needs to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) * tracked with precision
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) spi = (-insn->off - 1) / BPF_REG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) if (spi >= 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) verbose(env, "BUG spi %d\n", spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) WARN_ONCE(1, "verifier backtracking bug");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) *stack_mask |= 1ull << spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) } else if (class == BPF_STX || class == BPF_ST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) if (*reg_mask & dreg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) /* stx & st shouldn't be using _scalar_ dst_reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) * to access memory. It means backtracking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) * encountered a case of pointer subtraction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) /* scalars can only be spilled into stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) if (insn->dst_reg != BPF_REG_FP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) if (BPF_SIZE(insn->code) != BPF_DW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) spi = (-insn->off - 1) / BPF_REG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) if (spi >= 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) verbose(env, "BUG spi %d\n", spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) WARN_ONCE(1, "verifier backtracking bug");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) if (!(*stack_mask & (1ull << spi)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) *stack_mask &= ~(1ull << spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) if (class == BPF_STX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) *reg_mask |= sreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) } else if (class == BPF_JMP || class == BPF_JMP32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) if (opcode == BPF_CALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) if (insn->src_reg == BPF_PSEUDO_CALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) /* regular helper call sets R0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) *reg_mask &= ~1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) if (*reg_mask & 0x3f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) /* if backtracing was looking for registers R1-R5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) * they should have been found already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) verbose(env, "BUG regs %x\n", *reg_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) WARN_ONCE(1, "verifier backtracking bug");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) } else if (opcode == BPF_EXIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) } else if (class == BPF_LD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) if (!(*reg_mask & dreg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) *reg_mask &= ~dreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) /* It's ld_imm64 or ld_abs or ld_ind.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) * For ld_imm64 no further tracking of precision
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) * into parent is necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) if (mode == BPF_IND || mode == BPF_ABS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) /* to be analyzed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) /* the scalar precision tracking algorithm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) * . at the start all registers have precise=false.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) * . scalar ranges are tracked as normal through alu and jmp insns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) * . once precise value of the scalar register is used in:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) * . ptr + scalar alu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) * . if (scalar cond K|scalar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) * . helper_call(.., scalar, ...) where ARG_CONST is expected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) * backtrack through the verifier states and mark all registers and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) * stack slots with spilled constants that these scalar regisers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) * should be precise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) * . during state pruning two registers (or spilled stack slots)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) * are equivalent if both are not precise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) * Note the verifier cannot simply walk register parentage chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) * since many different registers and stack slots could have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) * used to compute single precise scalar.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) * The approach of starting with precise=true for all registers and then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) * backtrack to mark a register as not precise when the verifier detects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) * that program doesn't care about specific value (e.g., when helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) * takes register as ARG_ANYTHING parameter) is not safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) * It's ok to walk single parentage chain of the verifier states.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) * It's possible that this backtracking will go all the way till 1st insn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) * All other branches will be explored for needing precision later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) * The backtracking needs to deal with cases like:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) * r9 -= r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) * r5 = r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) * if r5 > 0x79f goto pc+7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) * r5 += 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) * ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) * call bpf_perf_event_output#25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) * where .arg5_type = ARG_CONST_SIZE_OR_ZERO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) * and this case:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) * r6 = 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) * call foo // uses callee's r6 inside to compute r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) * r0 += r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) * if r0 == 0 goto
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) * to track above reg_mask/stack_mask needs to be independent for each frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) * Also if parent's curframe > frame where backtracking started,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) * the verifier need to mark registers in both frames, otherwise callees
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) * may incorrectly prune callers. This is similar to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) * For now backtracking falls back into conservative marking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) static void mark_all_scalars_precise(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) struct bpf_verifier_state *st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) struct bpf_func_state *func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) struct bpf_reg_state *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) /* big hammer: mark all scalars precise in this path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) * pop_stack may still get !precise scalars.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) for (; st; st = st->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) for (i = 0; i <= st->curframe; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) func = st->frame[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) for (j = 0; j < BPF_REG_FP; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) reg = &func->regs[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) if (reg->type != SCALAR_VALUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) reg->precise = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) if (func->stack[j].slot_type[0] != STACK_SPILL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) reg = &func->stack[j].spilled_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) if (reg->type != SCALAR_VALUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) reg->precise = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) int spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) struct bpf_verifier_state *st = env->cur_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) int first_idx = st->first_insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) int last_idx = env->insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) struct bpf_func_state *func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) struct bpf_reg_state *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) u32 reg_mask = regno >= 0 ? 1u << regno : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) u64 stack_mask = spi >= 0 ? 1ull << spi : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) bool skip_first = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) bool new_marks = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) if (!env->bpf_capable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) func = st->frame[st->curframe];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) if (regno >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) reg = &func->regs[regno];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) if (reg->type != SCALAR_VALUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) WARN_ONCE(1, "backtracing misuse");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) if (!reg->precise)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) new_marks = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) reg_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) reg->precise = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) while (spi >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) if (func->stack[spi].slot_type[0] != STACK_SPILL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) stack_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) reg = &func->stack[spi].spilled_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) if (reg->type != SCALAR_VALUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) stack_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) if (!reg->precise)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) new_marks = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) stack_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) reg->precise = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) if (!new_marks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) if (!reg_mask && !stack_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) DECLARE_BITMAP(mask, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) u32 history = st->jmp_history_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) if (env->log.level & BPF_LOG_LEVEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) for (i = last_idx;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) if (skip_first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) skip_first = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) err = backtrack_insn(env, i, ®_mask, &stack_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) if (err == -ENOTSUPP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) mark_all_scalars_precise(env, st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) } else if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) if (!reg_mask && !stack_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) /* Found assignment(s) into tracked register in this state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) * Since this state is already marked, just return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) * Nothing to be tracked further in the parent state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) if (i == first_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) i = get_prev_insn_idx(st, i, &history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) if (i >= env->prog->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) /* This can happen if backtracking reached insn 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) * and there are still reg_mask or stack_mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) * to backtrack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) * It means the backtracking missed the spot where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) * particular register was initialized with a constant.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) verbose(env, "BUG backtracking idx %d\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) WARN_ONCE(1, "verifier backtracking bug");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) st = st->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) if (!st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) new_marks = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) func = st->frame[st->curframe];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) bitmap_from_u64(mask, reg_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) for_each_set_bit(i, mask, 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) reg = &func->regs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) if (reg->type != SCALAR_VALUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) reg_mask &= ~(1u << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) if (!reg->precise)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) new_marks = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) reg->precise = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) bitmap_from_u64(mask, stack_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) for_each_set_bit(i, mask, 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) if (i >= func->allocated_stack / BPF_REG_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) /* the sequence of instructions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) * 2: (bf) r3 = r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) * 3: (7b) *(u64 *)(r3 -8) = r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) * 4: (79) r4 = *(u64 *)(r10 -8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) * doesn't contain jmps. It's backtracked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) * as a single block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) * During backtracking insn 3 is not recognized as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) * stack access, so at the end of backtracking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) * stack slot fp-8 is still marked in stack_mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) * However the parent state may not have accessed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) * fp-8 and it's "unallocated" stack space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) * In such case fallback to conservative.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) mark_all_scalars_precise(env, st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) if (func->stack[i].slot_type[0] != STACK_SPILL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) stack_mask &= ~(1ull << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) reg = &func->stack[i].spilled_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) if (reg->type != SCALAR_VALUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) stack_mask &= ~(1ull << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) if (!reg->precise)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) new_marks = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) reg->precise = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) if (env->log.level & BPF_LOG_LEVEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) print_verifier_state(env, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) verbose(env, "parent %s regs=%x stack=%llx marks\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) new_marks ? "didn't have" : "already had",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) reg_mask, stack_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) if (!reg_mask && !stack_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) if (!new_marks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) last_idx = st->last_insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) first_idx = st->first_insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) return __mark_chain_precision(env, regno, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) return __mark_chain_precision(env, -1, spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) static bool is_spillable_regtype(enum bpf_reg_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) case PTR_TO_MAP_VALUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) case PTR_TO_MAP_VALUE_OR_NULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) case PTR_TO_STACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) case PTR_TO_CTX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) case PTR_TO_PACKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) case PTR_TO_PACKET_META:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) case PTR_TO_PACKET_END:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) case PTR_TO_FLOW_KEYS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) case CONST_PTR_TO_MAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) case PTR_TO_SOCKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) case PTR_TO_SOCKET_OR_NULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) case PTR_TO_SOCK_COMMON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) case PTR_TO_SOCK_COMMON_OR_NULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) case PTR_TO_TCP_SOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) case PTR_TO_TCP_SOCK_OR_NULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) case PTR_TO_XDP_SOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) case PTR_TO_BTF_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) case PTR_TO_BTF_ID_OR_NULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) case PTR_TO_RDONLY_BUF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) case PTR_TO_RDONLY_BUF_OR_NULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) case PTR_TO_RDWR_BUF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) case PTR_TO_RDWR_BUF_OR_NULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) case PTR_TO_PERCPU_BTF_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) case PTR_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) case PTR_TO_MEM_OR_NULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) /* Does this register contain a constant zero? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) static bool register_is_null(struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) static bool register_is_const(struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) static bool __is_scalar_unbounded(struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) return tnum_is_unknown(reg->var_off) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) reg->smin_value == S64_MIN && reg->smax_value == S64_MAX &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) reg->umin_value == 0 && reg->umax_value == U64_MAX &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) reg->s32_min_value == S32_MIN && reg->s32_max_value == S32_MAX &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) reg->u32_min_value == 0 && reg->u32_max_value == U32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) static bool register_is_bounded(struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) return reg->type == SCALAR_VALUE && !__is_scalar_unbounded(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) static bool __is_pointer_value(bool allow_ptr_leaks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) const struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) if (allow_ptr_leaks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) return reg->type != SCALAR_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) static void save_register_state(struct bpf_func_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) int spi, struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) state->stack[spi].spilled_ptr = *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) for (i = 0; i < BPF_REG_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) state->stack[spi].slot_type[i] = STACK_SPILL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) /* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) * stack boundary and alignment are checked in check_mem_access()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) /* stack frame we're writing to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) struct bpf_func_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) int off, int size, int value_regno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) int insn_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) struct bpf_func_state *cur; /* state of the current function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) struct bpf_reg_state *reg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) state->acquired_refs, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) * so it's aligned access and [off, off + size) are within stack limits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) if (!env->allow_ptr_leaks &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) state->stack[spi].slot_type[0] == STACK_SPILL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) size != BPF_REG_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) verbose(env, "attempt to corrupt spilled pointer on stack\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) cur = env->cur_state->frame[env->cur_state->curframe];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) if (value_regno >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) reg = &cur->regs[value_regno];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) if (!env->bypass_spec_v4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) bool sanitize = reg && is_spillable_regtype(reg->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) for (i = 0; i < size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) if (state->stack[spi].slot_type[i] == STACK_INVALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) sanitize = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) if (sanitize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) if (reg && size == BPF_REG_SIZE && register_is_bounded(reg) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) !register_is_null(reg) && env->bpf_capable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) if (dst_reg != BPF_REG_FP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) /* The backtracking logic can only recognize explicit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) * stack slot address like [fp - 8]. Other spill of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) * scalar via different register has to be conervative.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) * Backtrack from here and mark all registers as precise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) * that contributed into 'reg' being a constant.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) err = mark_chain_precision(env, value_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) save_register_state(state, spi, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) } else if (reg && is_spillable_regtype(reg->type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) /* register containing pointer is being spilled into stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) if (size != BPF_REG_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) verbose_linfo(env, insn_idx, "; ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) verbose(env, "invalid size of register spill\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) if (state != cur && reg->type == PTR_TO_STACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) save_register_state(state, spi, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) u8 type = STACK_MISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) /* regular write of data into stack destroys any spilled ptr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) state->stack[spi].spilled_ptr.type = NOT_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) /* Mark slots as STACK_MISC if they belonged to spilled ptr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) if (state->stack[spi].slot_type[0] == STACK_SPILL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) for (i = 0; i < BPF_REG_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) state->stack[spi].slot_type[i] = STACK_MISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) /* only mark the slot as written if all 8 bytes were written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) * otherwise read propagation may incorrectly stop too soon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) * when stack slots are partially written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) * This heuristic means that read propagation will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) * conservative, since it will add reg_live_read marks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) * to stack slots all the way to first state when programs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) * writes+reads less than 8 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) if (size == BPF_REG_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) /* when we zero initialize stack slots mark them as such */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) if (reg && register_is_null(reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) /* backtracking doesn't work for STACK_ZERO yet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) err = mark_chain_precision(env, value_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) type = STACK_ZERO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) /* Mark slots affected by this stack write. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) for (i = 0; i < size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) /* Write the stack: 'stack[ptr_regno + off] = value_regno'. 'ptr_regno' is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) * known to contain a variable offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) * This function checks whether the write is permitted and conservatively
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) * tracks the effects of the write, considering that each stack slot in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) * dynamic range is potentially written to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) * 'off' includes 'regno->off'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) * 'value_regno' can be -1, meaning that an unknown value is being written to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) * the stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) * Spilled pointers in range are not marked as written because we don't know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) * what's going to be actually written. This means that read propagation for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) * future reads cannot be terminated by this write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) * For privileged programs, uninitialized stack slots are considered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) * initialized by this write (even though we don't know exactly what offsets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) * are going to be written to). The idea is that we don't want the verifier to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) * reject future reads that access slots written to through variable offsets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) static int check_stack_write_var_off(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) /* func where register points to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) struct bpf_func_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) int ptr_regno, int off, int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) int value_regno, int insn_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) struct bpf_func_state *cur; /* state of the current function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) int min_off, max_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) bool writing_zero = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) /* set if the fact that we're writing a zero is used to let any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) * stack slots remain STACK_ZERO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) bool zero_used = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) cur = env->cur_state->frame[env->cur_state->curframe];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) ptr_reg = &cur->regs[ptr_regno];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) min_off = ptr_reg->smin_value + off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) max_off = ptr_reg->smax_value + off + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) if (value_regno >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) value_reg = &cur->regs[value_regno];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) if (value_reg && register_is_null(value_reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) writing_zero = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) err = realloc_func_state(state, round_up(-min_off, BPF_REG_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) state->acquired_refs, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) /* Variable offset writes destroy any spilled pointers in range. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) for (i = min_off; i < max_off; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) u8 new_type, *stype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) int slot, spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) slot = -i - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) spi = slot / BPF_REG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) if (!env->allow_ptr_leaks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) && *stype != NOT_INIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) && *stype != SCALAR_VALUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) /* Reject the write if there's are spilled pointers in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) * range. If we didn't reject here, the ptr status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) * would be erased below (even though not all slots are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) * actually overwritten), possibly opening the door to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) * leaks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) insn_idx, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) /* Erase all spilled pointers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) state->stack[spi].spilled_ptr.type = NOT_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) /* Update the slot type. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) new_type = STACK_MISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) if (writing_zero && *stype == STACK_ZERO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) new_type = STACK_ZERO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) zero_used = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) /* If the slot is STACK_INVALID, we check whether it's OK to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) * pretend that it will be initialized by this write. The slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) * might not actually be written to, and so if we mark it as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) * initialized future reads might leak uninitialized memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) * For privileged programs, we will accept such reads to slots
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) * that may or may not be written because, if we're reject
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) * them, the error would be too confusing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) if (*stype == STACK_INVALID && !env->allow_uninit_stack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) insn_idx, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) *stype = new_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) if (zero_used) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) /* backtracking doesn't work for STACK_ZERO yet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) err = mark_chain_precision(env, value_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) /* When register 'dst_regno' is assigned some values from stack[min_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) * max_off), we set the register's type according to the types of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) * respective stack slots. If all the stack values are known to be zeros, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) * so is the destination reg. Otherwise, the register is considered to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) * SCALAR. This function does not deal with register filling; the caller must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) * ensure that all spilled registers in the stack range have been marked as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) * read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) static void mark_reg_stack_read(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) /* func where src register points to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) struct bpf_func_state *ptr_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) int min_off, int max_off, int dst_regno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) struct bpf_verifier_state *vstate = env->cur_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) struct bpf_func_state *state = vstate->frame[vstate->curframe];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) int i, slot, spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) u8 *stype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) int zeros = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) for (i = min_off; i < max_off; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) slot = -i - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) spi = slot / BPF_REG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) stype = ptr_state->stack[spi].slot_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) if (stype[slot % BPF_REG_SIZE] != STACK_ZERO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) zeros++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) if (zeros == max_off - min_off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) /* any access_size read into register is zero extended,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) * so the whole register == const_zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) __mark_reg_const_zero(&state->regs[dst_regno]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) /* backtracking doesn't support STACK_ZERO yet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) * so mark it precise here, so that later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) * backtracking can stop here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) * Backtracking may not need this if this register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) * doesn't participate in pointer adjustment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) * Forward propagation of precise flag is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) * necessary either. This mark is only to stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) * backtracking. Any register that contributed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) * to const 0 was marked precise before spill.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) state->regs[dst_regno].precise = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) /* have read misc data from the stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) mark_reg_unknown(env, state->regs, dst_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) /* Read the stack at 'off' and put the results into the register indicated by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) * 'dst_regno'. It handles reg filling if the addressed stack slot is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) * spilled reg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) * 'dst_regno' can be -1, meaning that the read value is not going to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) * register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) * The access is assumed to be within the current stack bounds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) /* func where src register points to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) struct bpf_func_state *reg_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) int off, int size, int dst_regno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) struct bpf_verifier_state *vstate = env->cur_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) struct bpf_func_state *state = vstate->frame[vstate->curframe];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) struct bpf_reg_state *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) u8 *stype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) stype = reg_state->stack[spi].slot_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) reg = ®_state->stack[spi].spilled_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) if (stype[0] == STACK_SPILL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) if (size != BPF_REG_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) if (reg->type != SCALAR_VALUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) verbose_linfo(env, env->insn_idx, "; ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) verbose(env, "invalid size of register fill\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) if (dst_regno >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) mark_reg_unknown(env, state->regs, dst_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) for (i = 1; i < BPF_REG_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) verbose(env, "corrupted spill memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) if (dst_regno >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) /* restore register state from stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) state->regs[dst_regno] = *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) /* mark reg as written since spilled pointer state likely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) * has its liveness marks cleared by is_state_visited()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) * which resets stack/reg liveness for state transitions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) /* If dst_regno==-1, the caller is asking us whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) * it is acceptable to use this value as a SCALAR_VALUE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) * (e.g. for XADD).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) * We must not allow unprivileged callers to do that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) * with spilled pointers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) verbose(env, "leaking pointer from stack off %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) for (i = 0; i < size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) type = stype[(slot - i) % BPF_REG_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) if (type == STACK_MISC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) if (type == STACK_ZERO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) verbose(env, "invalid read from stack off %d+%d size %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) off, i, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) if (dst_regno >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) mark_reg_stack_read(env, reg_state, off, off + size, dst_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) enum stack_access_src {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) ACCESS_DIRECT = 1, /* the access is performed by an instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) ACCESS_HELPER = 2, /* the access is performed by a helper */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) static int check_stack_range_initialized(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) int regno, int off, int access_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) bool zero_size_allowed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) enum stack_access_src type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) struct bpf_call_arg_meta *meta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) return cur_regs(env) + regno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) /* Read the stack at 'ptr_regno + off' and put the result into the register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) * 'dst_regno'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) * 'off' includes the pointer register's fixed offset(i.e. 'ptr_regno.off'),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) * but not its variable offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) * 'size' is assumed to be <= reg size and the access is assumed to be aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) * As opposed to check_stack_read_fixed_off, this function doesn't deal with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) * filling registers (i.e. reads of spilled register cannot be detected when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) * the offset is not fixed). We conservatively mark 'dst_regno' as containing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) * offset; for a fixed offset check_stack_read_fixed_off should be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) * instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) static int check_stack_read_var_off(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) int ptr_regno, int off, int size, int dst_regno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) /* The state of the source register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) struct bpf_reg_state *reg = reg_state(env, ptr_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) struct bpf_func_state *ptr_state = func(env, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) int min_off, max_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) /* Note that we pass a NULL meta, so raw access will not be permitted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) err = check_stack_range_initialized(env, ptr_regno, off, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) false, ACCESS_DIRECT, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) min_off = reg->smin_value + off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) max_off = reg->smax_value + off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) /* check_stack_read dispatches to check_stack_read_fixed_off or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) * check_stack_read_var_off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) * The caller must ensure that the offset falls within the allocated stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) * bounds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) * 'dst_regno' is a register which will receive the value from the stack. It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) * can be -1, meaning that the read value is not going to a register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) static int check_stack_read(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) int ptr_regno, int off, int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) int dst_regno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) struct bpf_reg_state *reg = reg_state(env, ptr_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) struct bpf_func_state *state = func(env, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) /* Some accesses are only permitted with a static offset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) bool var_off = !tnum_is_const(reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) /* The offset is required to be static when reads don't go to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) * register, in order to not leak pointers (see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) * check_stack_read_fixed_off).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) if (dst_regno < 0 && var_off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) char tn_buf[48];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) verbose(env, "variable offset stack pointer cannot be passed into helper function; var_off=%s off=%d size=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) tn_buf, off, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) /* Variable offset is prohibited for unprivileged mode for simplicity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) * since it requires corresponding support in Spectre masking for stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) * ALU. See also retrieve_ptr_limit().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) if (!env->bypass_spec_v1 && var_off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) char tn_buf[48];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) ptr_regno, tn_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) if (!var_off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) off += reg->var_off.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) err = check_stack_read_fixed_off(env, state, off, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) dst_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) /* Variable offset stack reads need more conservative handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) * than fixed offset ones. Note that dst_regno >= 0 on this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) * branch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) err = check_stack_read_var_off(env, ptr_regno, off, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) dst_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) /* check_stack_write dispatches to check_stack_write_fixed_off or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) * check_stack_write_var_off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) * 'ptr_regno' is the register used as a pointer into the stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) * 'off' includes 'ptr_regno->off', but not its variable offset (if any).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) * 'value_regno' is the register whose value we're writing to the stack. It can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) * be -1, meaning that we're not writing from a register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) * The caller must ensure that the offset falls within the maximum stack size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) static int check_stack_write(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) int ptr_regno, int off, int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) int value_regno, int insn_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) struct bpf_reg_state *reg = reg_state(env, ptr_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) struct bpf_func_state *state = func(env, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) if (tnum_is_const(reg->var_off)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) off += reg->var_off.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) err = check_stack_write_fixed_off(env, state, off, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) value_regno, insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) /* Variable offset stack reads need more conservative handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) * than fixed offset ones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) err = check_stack_write_var_off(env, state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) ptr_regno, off, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) value_regno, insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) int off, int size, enum bpf_access_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) struct bpf_reg_state *regs = cur_regs(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) struct bpf_map *map = regs[regno].map_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) u32 cap = bpf_map_flags_to_cap(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) map->value_size, off, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) map->value_size, off, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) /* check read/write into memory region (e.g., map value, ringbuf sample, etc) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) static int __check_mem_access(struct bpf_verifier_env *env, int regno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) int off, int size, u32 mem_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) bool zero_size_allowed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) bool size_ok = size > 0 || (size == 0 && zero_size_allowed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) struct bpf_reg_state *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) if (off >= 0 && size_ok && (u64)off + size <= mem_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) reg = &cur_regs(env)[regno];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) switch (reg->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) case PTR_TO_MAP_VALUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) mem_size, off, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) case PTR_TO_PACKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) case PTR_TO_PACKET_META:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) case PTR_TO_PACKET_END:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) off, size, regno, reg->id, off, mem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) case PTR_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) mem_size, off, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) /* check read/write into a memory region with possible variable offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) int off, int size, u32 mem_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) bool zero_size_allowed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) struct bpf_verifier_state *vstate = env->cur_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) struct bpf_func_state *state = vstate->frame[vstate->curframe];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) struct bpf_reg_state *reg = &state->regs[regno];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) /* We may have adjusted the register pointing to memory region, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) * need to try adding each of min_value and max_value to off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) * to make sure our theoretical access will be safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) if (env->log.level & BPF_LOG_LEVEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) print_verifier_state(env, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) /* The minimum value is only important with signed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) * comparisons where we can't assume the floor of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) * value is 0. If we are using signed variables for our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) * index'es we need to make sure that whatever we use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) * will have a set floor within our range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) if (reg->smin_value < 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) (reg->smin_value == S64_MIN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) reg->smin_value + off < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) err = __check_mem_access(env, regno, reg->smin_value + off, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) mem_size, zero_size_allowed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) verbose(env, "R%d min value is outside of the allowed memory range\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) /* If we haven't set a max value then we need to bail since we can't be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) * sure we won't do bad things.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) * If reg->umax_value + off could overflow, treat that as unbounded too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) if (reg->umax_value >= BPF_MAX_VAR_OFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) err = __check_mem_access(env, regno, reg->umax_value + off, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) mem_size, zero_size_allowed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) verbose(env, "R%d max value is outside of the allowed memory range\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) /* check read/write into a map element with possible variable offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) static int check_map_access(struct bpf_verifier_env *env, u32 regno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) int off, int size, bool zero_size_allowed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) struct bpf_verifier_state *vstate = env->cur_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) struct bpf_func_state *state = vstate->frame[vstate->curframe];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) struct bpf_reg_state *reg = &state->regs[regno];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) struct bpf_map *map = reg->map_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) err = check_mem_region_access(env, regno, off, size, map->value_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) zero_size_allowed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) if (map_value_has_spin_lock(map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) u32 lock = map->spin_lock_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) /* if any part of struct bpf_spin_lock can be touched by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) * load/store reject this program.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) * To check that [x1, x2) overlaps with [y1, y2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) * it is sufficient to check x1 < y2 && y1 < x2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) if (reg->smin_value + off < lock + sizeof(struct bpf_spin_lock) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) lock < reg->umax_value + off + size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) verbose(env, "bpf_spin_lock cannot be accessed directly by load/store\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) #define MAX_PACKET_OFF 0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) static enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) return prog->aux->dst_prog ? prog->aux->dst_prog->type : prog->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) const struct bpf_call_arg_meta *meta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) enum bpf_access_type t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) switch (prog_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) /* Program types only with direct read access go here! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) case BPF_PROG_TYPE_LWT_IN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) case BPF_PROG_TYPE_LWT_OUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) case BPF_PROG_TYPE_LWT_SEG6LOCAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) case BPF_PROG_TYPE_SK_REUSEPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) case BPF_PROG_TYPE_FLOW_DISSECTOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) case BPF_PROG_TYPE_CGROUP_SKB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) if (t == BPF_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) /* Program types with direct read + write access go here! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) case BPF_PROG_TYPE_SCHED_CLS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) case BPF_PROG_TYPE_SCHED_ACT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) case BPF_PROG_TYPE_XDP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) case BPF_PROG_TYPE_LWT_XMIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) case BPF_PROG_TYPE_SK_SKB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) case BPF_PROG_TYPE_SK_MSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) if (meta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) return meta->pkt_access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) env->seen_direct_write = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) case BPF_PROG_TYPE_CGROUP_SOCKOPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) if (t == BPF_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) env->seen_direct_write = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) int size, bool zero_size_allowed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) struct bpf_reg_state *regs = cur_regs(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) struct bpf_reg_state *reg = ®s[regno];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) /* We may have added a variable offset to the packet pointer; but any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) * reg->range we have comes after that. We are only checking the fixed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) * offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) /* We don't allow negative numbers, because we aren't tracking enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) * detail to prove they're safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) if (reg->smin_value < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) err = __check_mem_access(env, regno, off, size, reg->range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) zero_size_allowed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) verbose(env, "R%d offset is outside of the packet\n", regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) /* __check_mem_access has made sure "off + size - 1" is within u16.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) * otherwise find_good_pkt_pointers would have refused to set range info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) * that __check_mem_access would have rejected this pkt access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) env->prog->aux->max_pkt_offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) max_t(u32, env->prog->aux->max_pkt_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) off + reg->umax_value + size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) /* check access to 'struct bpf_context' fields. Supports fixed offsets only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) enum bpf_access_type t, enum bpf_reg_type *reg_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) u32 *btf_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) struct bpf_insn_access_aux info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) .reg_type = *reg_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) .log = &env->log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) if (env->ops->is_valid_access &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) env->ops->is_valid_access(off, size, t, env->prog, &info)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) /* A non zero info.ctx_field_size indicates that this field is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) * candidate for later verifier transformation to load the whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) * field and then apply a mask when accessed with a narrower
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) * access than actual ctx access size. A zero info.ctx_field_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) * will only allow for whole field access and rejects any other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) * type of narrower access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) *reg_type = info.reg_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) if (*reg_type == PTR_TO_BTF_ID || *reg_type == PTR_TO_BTF_ID_OR_NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) *btf_id = info.btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) /* remember the offset of last byte accessed in ctx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) if (env->prog->aux->max_ctx_offset < off + size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) env->prog->aux->max_ctx_offset = off + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) if (size < 0 || off < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) (u64)off + size > sizeof(struct bpf_flow_keys)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) verbose(env, "invalid access to flow keys off=%d size=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) off, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) u32 regno, int off, int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) enum bpf_access_type t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) struct bpf_reg_state *regs = cur_regs(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) struct bpf_reg_state *reg = ®s[regno];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) struct bpf_insn_access_aux info = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) bool valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) if (reg->smin_value < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) switch (reg->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) case PTR_TO_SOCK_COMMON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) valid = bpf_sock_common_is_valid_access(off, size, t, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) case PTR_TO_SOCKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) valid = bpf_sock_is_valid_access(off, size, t, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) case PTR_TO_TCP_SOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) valid = bpf_tcp_sock_is_valid_access(off, size, t, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) case PTR_TO_XDP_SOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) valid = bpf_xdp_sock_is_valid_access(off, size, t, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) valid = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) if (valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) env->insn_aux_data[insn_idx].ctx_field_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) info.ctx_field_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) verbose(env, "R%d invalid %s access off=%d size=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) regno, reg_type_str[reg->type], off, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) const struct bpf_reg_state *reg = reg_state(env, regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) return reg->type == PTR_TO_CTX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) static bool is_sk_reg(struct bpf_verifier_env *env, int regno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) const struct bpf_reg_state *reg = reg_state(env, regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) return type_is_sk_pointer(reg->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) const struct bpf_reg_state *reg = reg_state(env, regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) return type_is_pkt_pointer(reg->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) const struct bpf_reg_state *reg = reg_state(env, regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) return reg->type == PTR_TO_FLOW_KEYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) const struct bpf_reg_state *reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) int off, int size, bool strict)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) struct tnum reg_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) int ip_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) /* Byte size accesses are always allowed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) if (!strict || size == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) /* For platforms that do not have a Kconfig enabling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) * NET_IP_ALIGN is universally set to '2'. And on platforms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) * to this code only in strict mode where we want to emulate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) * the NET_IP_ALIGN==2 checking. Therefore use an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) * unconditional IP align value of '2'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) ip_align = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) if (!tnum_is_aligned(reg_off, size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) char tn_buf[48];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) "misaligned packet access off %d+%s+%d+%d size %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) ip_align, tn_buf, reg->off, off, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) const struct bpf_reg_state *reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) const char *pointer_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) int off, int size, bool strict)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) struct tnum reg_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) /* Byte size accesses are always allowed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) if (!strict || size == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) if (!tnum_is_aligned(reg_off, size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) char tn_buf[48];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) pointer_desc, tn_buf, reg->off, off, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) static int check_ptr_alignment(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) const struct bpf_reg_state *reg, int off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) int size, bool strict_alignment_once)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) bool strict = env->strict_alignment || strict_alignment_once;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) const char *pointer_desc = "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) switch (reg->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) case PTR_TO_PACKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) case PTR_TO_PACKET_META:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) /* Special case, because of NET_IP_ALIGN. Given metadata sits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) * right in front, treat it the very same way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) return check_pkt_ptr_alignment(env, reg, off, size, strict);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) case PTR_TO_FLOW_KEYS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) pointer_desc = "flow keys ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) case PTR_TO_MAP_VALUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) pointer_desc = "value ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) case PTR_TO_CTX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) pointer_desc = "context ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) case PTR_TO_STACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) pointer_desc = "stack ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) /* The stack spill tracking logic in check_stack_write_fixed_off()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) * and check_stack_read_fixed_off() relies on stack accesses being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) * aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) strict = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) case PTR_TO_SOCKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) pointer_desc = "sock ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) case PTR_TO_SOCK_COMMON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) pointer_desc = "sock_common ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) case PTR_TO_TCP_SOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) pointer_desc = "tcp_sock ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) case PTR_TO_XDP_SOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) pointer_desc = "xdp_sock ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) strict);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) static int update_stack_depth(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) const struct bpf_func_state *func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) int off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) u16 stack = env->subprog_info[func->subprogno].stack_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) if (stack >= -off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) /* update known max for given subprogram */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) env->subprog_info[func->subprogno].stack_depth = -off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) /* starting from main bpf function walk all instructions of the function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) * and recursively walk all callees that given function can call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) * Ignore jump and exit insns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) * Since recursion is prevented by check_cfg() this algorithm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) * only needs a local stack of MAX_CALL_FRAMES to remember callsites
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) static int check_max_stack_depth(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) struct bpf_subprog_info *subprog = env->subprog_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) struct bpf_insn *insn = env->prog->insnsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) bool tail_call_reachable = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) int ret_insn[MAX_CALL_FRAMES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) int ret_prog[MAX_CALL_FRAMES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) process_func:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) /* protect against potential stack overflow that might happen when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) * bpf2bpf calls get combined with tailcalls. Limit the caller's stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) * depth for such case down to 256 so that the worst case scenario
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) * would result in 8k stack size (32 which is tailcall limit * 256 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) * 8k).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) * To get the idea what might happen, see an example:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) * func1 -> sub rsp, 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) * subfunc1 -> sub rsp, 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) * tailcall1 -> add rsp, 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) * subfunc2 -> sub rsp, 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) * subfunc22 -> sub rsp, 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) * tailcall2 -> add rsp, 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) * tailcall will unwind the current stack frame but it will not get rid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) * of caller's stack as shown on the example above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) if (idx && subprog[idx].has_tail_call && depth >= 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) "tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) /* round up to 32-bytes, since this is granularity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) * of interpreter stack size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) if (depth > MAX_BPF_STACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) verbose(env, "combined stack size of %d calls is %d. Too large\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) frame + 1, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) continue_func:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) subprog_end = subprog[idx + 1].start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) for (; i < subprog_end; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) if (insn[i].code != (BPF_JMP | BPF_CALL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) if (insn[i].src_reg != BPF_PSEUDO_CALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) /* remember insn and function to return to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) ret_insn[frame] = i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) ret_prog[frame] = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) /* find the callee */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) i = i + insn[i].imm + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) idx = find_subprog(env, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) if (idx < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) if (subprog[idx].has_tail_call)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) tail_call_reachable = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) frame++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) if (frame >= MAX_CALL_FRAMES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) verbose(env, "the call stack of %d frames is too deep !\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) goto process_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) /* if tail call got detected across bpf2bpf calls then mark each of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) * currently present subprog frames as tail call reachable subprogs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) * this info will be utilized by JIT so that we will be preserving the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) * tail call counter throughout bpf2bpf calls combined with tailcalls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) if (tail_call_reachable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) for (j = 0; j < frame; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) subprog[ret_prog[j]].tail_call_reachable = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) if (subprog[0].tail_call_reachable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) env->prog->aux->tail_call_reachable = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) /* end of for() loop means the last insn of the 'subprog'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) * was reached. Doesn't matter whether it was JA or EXIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) if (frame == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) frame--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) i = ret_insn[frame];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) idx = ret_prog[frame];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) goto continue_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) #ifndef CONFIG_BPF_JIT_ALWAYS_ON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) static int get_callee_stack_depth(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) const struct bpf_insn *insn, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) int start = idx + insn->imm + 1, subprog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) subprog = find_subprog(env, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) if (subprog < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) return env->subprog_info[subprog].stack_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) int check_ctx_reg(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) const struct bpf_reg_state *reg, int regno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) /* Access to ctx or passing it to a helper is only allowed in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) * its original, unmodified form.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) if (reg->off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) regno, reg->off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) char tn_buf[48];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) static int __check_buffer_access(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) const char *buf_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) const struct bpf_reg_state *reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) int regno, int off, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) if (off < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) "R%d invalid %s buffer access: off=%d, size=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) regno, buf_info, off, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) char tn_buf[48];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) "R%d invalid variable buffer offset: off=%d, var_off=%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) regno, off, tn_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) static int check_tp_buffer_access(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) const struct bpf_reg_state *reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) int regno, int off, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) err = __check_buffer_access(env, "tracepoint", reg, regno, off, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) if (off + size > env->prog->aux->max_tp_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) env->prog->aux->max_tp_access = off + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) static int check_buffer_access(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) const struct bpf_reg_state *reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) int regno, int off, int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) bool zero_size_allowed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) const char *buf_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) u32 *max_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) err = __check_buffer_access(env, buf_info, reg, regno, off, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) if (off + size > *max_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) *max_access = off + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) /* BPF architecture zero extends alu32 ops into 64-bit registesr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) static void zext_32_to_64(struct bpf_reg_state *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) reg->var_off = tnum_subreg(reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) __reg_assign_32_into_64(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) /* truncate register to smaller size (in bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) * must be called with size < BPF_REG_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) /* clear high bits in bit representation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) reg->var_off = tnum_cast(reg->var_off, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) /* fix arithmetic bounds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) mask = ((u64)1 << (size * 8)) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) reg->umin_value &= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) reg->umax_value &= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) reg->umin_value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) reg->umax_value = mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) reg->smin_value = reg->umin_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) reg->smax_value = reg->umax_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) /* If size is smaller than 32bit register the 32bit register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) * values are also truncated so we push 64-bit bounds into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) * 32-bit bounds. Above were truncated < 32-bits already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) if (size >= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) __reg_combine_64_into_32(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) static bool bpf_map_is_rdonly(const struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) /* A map is considered read-only if the following condition are true:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) * 1) BPF program side cannot change any of the map content. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) * BPF_F_RDONLY_PROG flag is throughout the lifetime of a map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) * and was set at map creation time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) * 2) The map value(s) have been initialized from user space by a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) * loader and then "frozen", such that no new map update/delete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) * operations from syscall side are possible for the rest of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) * the map's lifetime from that point onwards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) * 3) Any parallel/pending map update/delete operations from syscall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) * side have been completed. Only after that point, it's safe to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) * assume that map value(s) are immutable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) return (map->map_flags & BPF_F_RDONLY_PROG) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) READ_ONCE(map->frozen) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) !bpf_map_write_active(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) err = map->ops->map_direct_value_addr(map, &addr, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) ptr = (void *)(long)addr + off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) case sizeof(u8):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) *val = (u64)*(u8 *)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) case sizeof(u16):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) *val = (u64)*(u16 *)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) case sizeof(u32):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) *val = (u64)*(u32 *)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) case sizeof(u64):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) *val = *(u64 *)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) struct bpf_reg_state *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) int regno, int off, int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) enum bpf_access_type atype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) int value_regno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) struct bpf_reg_state *reg = regs + regno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) const struct btf_type *t = btf_type_by_id(btf_vmlinux, reg->btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) const char *tname = btf_name_by_offset(btf_vmlinux, t->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) u32 btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) if (off < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) "R%d is ptr_%s invalid negative access: off=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) regno, tname, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) char tn_buf[48];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) regno, tname, off, tn_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) if (env->ops->btf_struct_access) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) ret = env->ops->btf_struct_access(&env->log, t, off, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) atype, &btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) if (atype != BPF_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) verbose(env, "only read is supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) ret = btf_struct_access(&env->log, t, off, size, atype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) &btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) if (atype == BPF_READ && value_regno >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) mark_btf_ld_reg(env, regs, value_regno, ret, btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) static int check_ptr_to_map_access(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) struct bpf_reg_state *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) int regno, int off, int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) enum bpf_access_type atype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) int value_regno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) struct bpf_reg_state *reg = regs + regno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) struct bpf_map *map = reg->map_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) const struct btf_type *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) const char *tname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) u32 btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) if (!btf_vmlinux) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) if (!map->ops->map_btf_id || !*map->ops->map_btf_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) verbose(env, "map_ptr access not supported for map type %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) map->map_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) tname = btf_name_by_offset(btf_vmlinux, t->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) if (!env->allow_ptr_to_map_access) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) "%s access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) tname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) if (off < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) verbose(env, "R%d is %s invalid negative access: off=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) regno, tname, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) if (atype != BPF_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) verbose(env, "only read from %s is supported\n", tname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) ret = btf_struct_access(&env->log, t, off, size, atype, &btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) if (value_regno >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) mark_btf_ld_reg(env, regs, value_regno, ret, btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) /* Check that the stack access at the given offset is within bounds. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) * maximum valid offset is -1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) * The minimum valid offset is -MAX_BPF_STACK for writes, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) * -state->allocated_stack for reads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) static int check_stack_slot_within_bounds(int off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) struct bpf_func_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) enum bpf_access_type t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) int min_valid_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) if (t == BPF_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) min_valid_off = -MAX_BPF_STACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) min_valid_off = -state->allocated_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) if (off < min_valid_off || off > -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) /* Check that the stack access at 'regno + off' falls within the maximum stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) * bounds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) * 'off' includes `regno->offset`, but not its dynamic part (if any).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) static int check_stack_access_within_bounds(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) int regno, int off, int access_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) enum stack_access_src src, enum bpf_access_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) struct bpf_reg_state *regs = cur_regs(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) struct bpf_reg_state *reg = regs + regno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) struct bpf_func_state *state = func(env, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) int min_off, max_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) char *err_extra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) if (src == ACCESS_HELPER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) /* We don't know if helpers are reading or writing (or both). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) err_extra = " indirect access to";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) else if (type == BPF_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) err_extra = " read from";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) err_extra = " write to";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) if (tnum_is_const(reg->var_off)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) min_off = reg->var_off.value + off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) if (access_size > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) max_off = min_off + access_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) max_off = min_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) if (reg->smax_value >= BPF_MAX_VAR_OFF ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) reg->smin_value <= -BPF_MAX_VAR_OFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) verbose(env, "invalid unbounded variable-offset%s stack R%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) err_extra, regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) min_off = reg->smin_value + off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) if (access_size > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) max_off = reg->smax_value + off + access_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) max_off = min_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) err = check_stack_slot_within_bounds(min_off, state, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) err = check_stack_slot_within_bounds(max_off, state, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) if (tnum_is_const(reg->var_off)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) verbose(env, "invalid%s stack R%d off=%d size=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) err_extra, regno, off, access_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) char tn_buf[48];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) err_extra, regno, tn_buf, access_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) /* check whether memory at (regno + off) is accessible for t = (read | write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) * if t==write, value_regno is a register which value is stored into memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) * if t==read, value_regno is a register which will receive the value from memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) * if t==write && value_regno==-1, some unknown value is stored into memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) * if t==read && value_regno==-1, don't care what we read from memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) int off, int bpf_size, enum bpf_access_type t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) int value_regno, bool strict_alignment_once)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) struct bpf_reg_state *regs = cur_regs(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) struct bpf_reg_state *reg = regs + regno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) struct bpf_func_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) int size, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) size = bpf_size_to_bytes(bpf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) if (size < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) /* alignment checks will add in reg->off themselves */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) /* for access checks, reg->off is just part of off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) off += reg->off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) if (reg->type == PTR_TO_MAP_VALUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) if (t == BPF_WRITE && value_regno >= 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) is_pointer_value(env, value_regno)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) verbose(env, "R%d leaks addr into map\n", value_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) err = check_map_access_type(env, regno, off, size, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) err = check_map_access(env, regno, off, size, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) if (!err && t == BPF_READ && value_regno >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) struct bpf_map *map = reg->map_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) /* if map is read-only, track its contents as scalars */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) if (tnum_is_const(reg->var_off) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) bpf_map_is_rdonly(map) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) map->ops->map_direct_value_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) int map_off = off + reg->var_off.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) u64 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) err = bpf_map_direct_read(map, map_off, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) regs[value_regno].type = SCALAR_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) __mark_reg_known(®s[value_regno], val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) mark_reg_unknown(env, regs, value_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) } else if (reg->type == PTR_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) if (t == BPF_WRITE && value_regno >= 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) is_pointer_value(env, value_regno)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) verbose(env, "R%d leaks addr into mem\n", value_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) err = check_mem_region_access(env, regno, off, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) reg->mem_size, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) if (!err && t == BPF_READ && value_regno >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) mark_reg_unknown(env, regs, value_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) } else if (reg->type == PTR_TO_CTX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) enum bpf_reg_type reg_type = SCALAR_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) u32 btf_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) if (t == BPF_WRITE && value_regno >= 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) is_pointer_value(env, value_regno)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) verbose(env, "R%d leaks addr into ctx\n", value_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) err = check_ctx_reg(env, reg, regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) err = check_ctx_access(env, insn_idx, off, size, t, ®_type, &btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) verbose_linfo(env, insn_idx, "; ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) if (!err && t == BPF_READ && value_regno >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) /* ctx access returns either a scalar, or a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) * PTR_TO_PACKET[_META,_END]. In the latter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) * case, we know the offset is zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) if (reg_type == SCALAR_VALUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) mark_reg_unknown(env, regs, value_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) mark_reg_known_zero(env, regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) value_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) if (reg_type_may_be_null(reg_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) regs[value_regno].id = ++env->id_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) /* A load of ctx field could have different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) * actual load size with the one encoded in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) * insn. When the dst is PTR, it is for sure not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) * a sub-register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) regs[value_regno].subreg_def = DEF_NOT_SUBREG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) if (reg_type == PTR_TO_BTF_ID ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) reg_type == PTR_TO_BTF_ID_OR_NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) regs[value_regno].btf_id = btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) regs[value_regno].type = reg_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) } else if (reg->type == PTR_TO_STACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) /* Basic bounds checks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) err = check_stack_access_within_bounds(env, regno, off, size, ACCESS_DIRECT, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) state = func(env, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) err = update_stack_depth(env, state, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) if (t == BPF_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) err = check_stack_read(env, regno, off, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) value_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) err = check_stack_write(env, regno, off, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) value_regno, insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) } else if (reg_is_pkt_pointer(reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) verbose(env, "cannot write into packet\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) if (t == BPF_WRITE && value_regno >= 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) is_pointer_value(env, value_regno)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) verbose(env, "R%d leaks addr into packet\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) value_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) err = check_packet_access(env, regno, off, size, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) if (!err && t == BPF_READ && value_regno >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) mark_reg_unknown(env, regs, value_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) } else if (reg->type == PTR_TO_FLOW_KEYS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) if (t == BPF_WRITE && value_regno >= 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) is_pointer_value(env, value_regno)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) verbose(env, "R%d leaks addr into flow keys\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) value_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) err = check_flow_keys_access(env, off, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) if (!err && t == BPF_READ && value_regno >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) mark_reg_unknown(env, regs, value_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) } else if (type_is_sk_pointer(reg->type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) if (t == BPF_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) verbose(env, "R%d cannot write into %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) regno, reg_type_str[reg->type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) err = check_sock_access(env, insn_idx, regno, off, size, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) if (!err && value_regno >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) mark_reg_unknown(env, regs, value_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) } else if (reg->type == PTR_TO_TP_BUFFER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) err = check_tp_buffer_access(env, reg, regno, off, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) if (!err && t == BPF_READ && value_regno >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) mark_reg_unknown(env, regs, value_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) } else if (reg->type == PTR_TO_BTF_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) value_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) } else if (reg->type == CONST_PTR_TO_MAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) err = check_ptr_to_map_access(env, regs, regno, off, size, t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) value_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) } else if (reg->type == PTR_TO_RDONLY_BUF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) if (t == BPF_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) verbose(env, "R%d cannot write into %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) regno, reg_type_str[reg->type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) err = check_buffer_access(env, reg, regno, off, size, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) "rdonly",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) &env->prog->aux->max_rdonly_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) if (!err && value_regno >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) mark_reg_unknown(env, regs, value_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) } else if (reg->type == PTR_TO_RDWR_BUF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) err = check_buffer_access(env, reg, regno, off, size, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) "rdwr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) &env->prog->aux->max_rdwr_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) if (!err && t == BPF_READ && value_regno >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) mark_reg_unknown(env, regs, value_regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) verbose(env, "R%d invalid mem access '%s'\n", regno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) reg_type_str[reg->type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) regs[value_regno].type == SCALAR_VALUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) /* b/h/w load zero-extends, mark upper bits as known 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) coerce_reg_to_size(®s[value_regno], size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) insn->imm != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) verbose(env, "BPF_XADD uses reserved fields\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) /* check src1 operand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) err = check_reg_arg(env, insn->src_reg, SRC_OP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) /* check src2 operand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) err = check_reg_arg(env, insn->dst_reg, SRC_OP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) if (is_pointer_value(env, insn->src_reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) if (is_ctx_reg(env, insn->dst_reg) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) is_pkt_reg(env, insn->dst_reg) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) is_flow_key_reg(env, insn->dst_reg) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) is_sk_reg(env, insn->dst_reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) insn->dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) reg_type_str[reg_state(env, insn->dst_reg)->type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) /* check whether atomic_add can read the memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) BPF_SIZE(insn->code), BPF_READ, -1, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) /* check whether atomic_add can write into the same memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) BPF_SIZE(insn->code), BPF_WRITE, -1, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) /* When register 'regno' is used to read the stack (either directly or through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) * a helper function) make sure that it's within stack boundary and, depending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) * on the access type, that all elements of the stack are initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) * 'off' includes 'regno->off', but not its dynamic part (if any).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) * All registers that have been spilled on the stack in the slots within the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) * read offsets are marked as read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) static int check_stack_range_initialized(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) struct bpf_verifier_env *env, int regno, int off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) int access_size, bool zero_size_allowed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) enum stack_access_src type, struct bpf_call_arg_meta *meta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) struct bpf_reg_state *reg = reg_state(env, regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) struct bpf_func_state *state = func(env, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) int err, min_off, max_off, i, j, slot, spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) char *err_extra = type == ACCESS_HELPER ? " indirect" : "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) enum bpf_access_type bounds_check_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) /* Some accesses can write anything into the stack, others are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) * read-only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) bool clobber = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) if (access_size == 0 && !zero_size_allowed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) verbose(env, "invalid zero-sized read\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) if (type == ACCESS_HELPER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) /* The bounds checks for writes are more permissive than for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) * reads. However, if raw_mode is not set, we'll do extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) * checks below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) bounds_check_type = BPF_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) clobber = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) bounds_check_type = BPF_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) err = check_stack_access_within_bounds(env, regno, off, access_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) type, bounds_check_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) if (tnum_is_const(reg->var_off)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) min_off = max_off = reg->var_off.value + off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) /* Variable offset is prohibited for unprivileged mode for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) * simplicity since it requires corresponding support in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) * Spectre masking for stack ALU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) * See also retrieve_ptr_limit().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) if (!env->bypass_spec_v1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) char tn_buf[48];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) verbose(env, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) regno, err_extra, tn_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) /* Only initialized buffer on stack is allowed to be accessed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) * with variable offset. With uninitialized buffer it's hard to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) * guarantee that whole memory is marked as initialized on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) * helper return since specific bounds are unknown what may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) * cause uninitialized stack leaking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) if (meta && meta->raw_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) meta = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) min_off = reg->smin_value + off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) max_off = reg->smax_value + off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) if (meta && meta->raw_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) meta->access_size = access_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) meta->regno = regno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) for (i = min_off; i < max_off + access_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) u8 *stype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) slot = -i - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) spi = slot / BPF_REG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) if (state->allocated_stack <= slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) if (*stype == STACK_MISC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) goto mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) if (*stype == STACK_ZERO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) if (clobber) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) /* helper can write anything into the stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) *stype = STACK_MISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) goto mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) if (state->stack[spi].slot_type[0] == STACK_SPILL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) goto mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) if (state->stack[spi].slot_type[0] == STACK_SPILL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) (state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) env->allow_ptr_leaks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) if (clobber) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) __mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) for (j = 0; j < BPF_REG_SIZE; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) state->stack[spi].slot_type[j] = STACK_MISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) goto mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) if (tnum_is_const(reg->var_off)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) err_extra, regno, min_off, i - min_off, access_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) char tn_buf[48];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) verbose(env, "invalid%s read from stack R%d var_off %s+%d size %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) err_extra, regno, tn_buf, i - min_off, access_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) mark:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) /* reading any byte out of 8-byte 'spill_slot' will cause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) * the whole slot to be marked as 'read'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) mark_reg_read(env, &state->stack[spi].spilled_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) state->stack[spi].spilled_ptr.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) REG_LIVE_READ64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) return update_stack_depth(env, state, min_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) int access_size, bool zero_size_allowed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) struct bpf_call_arg_meta *meta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) switch (reg->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) case PTR_TO_PACKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) case PTR_TO_PACKET_META:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) return check_packet_access(env, regno, reg->off, access_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) zero_size_allowed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) case PTR_TO_MAP_VALUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) if (check_map_access_type(env, regno, reg->off, access_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) meta && meta->raw_mode ? BPF_WRITE :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) BPF_READ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) return check_map_access(env, regno, reg->off, access_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) zero_size_allowed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) case PTR_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) return check_mem_region_access(env, regno, reg->off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) access_size, reg->mem_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) zero_size_allowed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) case PTR_TO_RDONLY_BUF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) if (meta && meta->raw_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) return check_buffer_access(env, reg, regno, reg->off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) access_size, zero_size_allowed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) "rdonly",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) &env->prog->aux->max_rdonly_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) case PTR_TO_RDWR_BUF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) return check_buffer_access(env, reg, regno, reg->off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) access_size, zero_size_allowed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) "rdwr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) &env->prog->aux->max_rdwr_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) case PTR_TO_STACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) return check_stack_range_initialized(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) regno, reg->off, access_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) zero_size_allowed, ACCESS_HELPER, meta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) default: /* scalar_value or invalid ptr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) /* Allow zero-byte read from NULL, regardless of pointer type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) if (zero_size_allowed && access_size == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) register_is_null(reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) verbose(env, "R%d type=%s expected=%s\n", regno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) reg_type_str[reg->type],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) reg_type_str[PTR_TO_STACK]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) /* Implementation details:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) * Two bpf_map_lookups (even with the same key) will have different reg->id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) * value_or_null->value transition, since the verifier only cares about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) * the range of access to valid map value pointer and doesn't care about actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) * address of the map element.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) * reg->id > 0 after value_or_null->value transition. By doing so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) * two bpf_map_lookups will be considered two different pointers that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) * point to different bpf_spin_locks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) * The verifier allows taking only one bpf_spin_lock at a time to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) * dead-locks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) * Since only one bpf_spin_lock is allowed the checks are simpler than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) * reg_is_refcounted() logic. The verifier needs to remember only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) * one spin_lock instead of array of acquired_refs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) * cur_state->active_spin_lock remembers which map value element got locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) * and clears it after bpf_spin_unlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) static int process_spin_lock(struct bpf_verifier_env *env, int regno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) bool is_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) struct bpf_verifier_state *cur = env->cur_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) bool is_const = tnum_is_const(reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) struct bpf_map *map = reg->map_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) u64 val = reg->var_off.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) if (!is_const) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) if (!map->btf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) "map '%s' has to have BTF in order to use bpf_spin_lock\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) map->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) if (!map_value_has_spin_lock(map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) if (map->spin_lock_off == -E2BIG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) "map '%s' has more than one 'struct bpf_spin_lock'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) map->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) else if (map->spin_lock_off == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) "map '%s' doesn't have 'struct bpf_spin_lock'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) map->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) "map '%s' is not a struct type or bpf_spin_lock is mangled\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) map->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) if (map->spin_lock_off != val + reg->off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) val + reg->off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) if (is_lock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) if (cur->active_spin_lock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) "Locking two bpf_spin_locks are not allowed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) cur->active_spin_lock = reg->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) if (!cur->active_spin_lock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) verbose(env, "bpf_spin_unlock without taking a lock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) if (cur->active_spin_lock != reg->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) verbose(env, "bpf_spin_unlock of different lock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) cur->active_spin_lock = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) static bool arg_type_is_mem_ptr(enum bpf_arg_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) return type == ARG_PTR_TO_MEM ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) type == ARG_PTR_TO_MEM_OR_NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) type == ARG_PTR_TO_UNINIT_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) static bool arg_type_is_mem_size(enum bpf_arg_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) return type == ARG_CONST_SIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) type == ARG_CONST_SIZE_OR_ZERO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) static bool arg_type_is_alloc_size(enum bpf_arg_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) return type == ARG_CONST_ALLOC_SIZE_OR_ZERO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) static bool arg_type_is_int_ptr(enum bpf_arg_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) return type == ARG_PTR_TO_INT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) type == ARG_PTR_TO_LONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) static int int_ptr_type_to_size(enum bpf_arg_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) if (type == ARG_PTR_TO_INT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) return sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) else if (type == ARG_PTR_TO_LONG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) return sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) static int resolve_map_arg_type(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) const struct bpf_call_arg_meta *meta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) enum bpf_arg_type *arg_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) if (!meta->map_ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) /* kernel subsystem misconfigured verifier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) verbose(env, "invalid map_ptr to access map->type\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) switch (meta->map_ptr->map_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) case BPF_MAP_TYPE_SOCKMAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) case BPF_MAP_TYPE_SOCKHASH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) if (*arg_type == ARG_PTR_TO_MAP_VALUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) *arg_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) verbose(env, "invalid arg_type for sockmap/sockhash\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) struct bpf_reg_types {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) const enum bpf_reg_type types[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) u32 *btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) static const struct bpf_reg_types map_key_value_types = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) .types = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) PTR_TO_STACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) PTR_TO_PACKET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) PTR_TO_PACKET_META,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) PTR_TO_MAP_VALUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) static const struct bpf_reg_types sock_types = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) .types = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) PTR_TO_SOCK_COMMON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) PTR_TO_SOCKET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) PTR_TO_TCP_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) PTR_TO_XDP_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) #ifdef CONFIG_NET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) static const struct bpf_reg_types btf_id_sock_common_types = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) .types = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) PTR_TO_SOCK_COMMON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) PTR_TO_SOCKET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) PTR_TO_TCP_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) PTR_TO_XDP_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) PTR_TO_BTF_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) .btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) static const struct bpf_reg_types mem_types = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) .types = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) PTR_TO_STACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) PTR_TO_PACKET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) PTR_TO_PACKET_META,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) PTR_TO_MAP_VALUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) PTR_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) PTR_TO_RDONLY_BUF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) PTR_TO_RDWR_BUF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) static const struct bpf_reg_types int_ptr_types = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) .types = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) PTR_TO_STACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) PTR_TO_PACKET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) PTR_TO_PACKET_META,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) PTR_TO_MAP_VALUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) static const struct bpf_reg_types alloc_mem_types = { .types = { PTR_TO_MEM } };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) static const struct bpf_reg_types btf_ptr_types = { .types = { PTR_TO_BTF_ID } };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) static const struct bpf_reg_types spin_lock_types = { .types = { PTR_TO_MAP_VALUE } };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) static const struct bpf_reg_types percpu_btf_ptr_types = { .types = { PTR_TO_PERCPU_BTF_ID } };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) [ARG_PTR_TO_MAP_KEY] = &map_key_value_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) [ARG_PTR_TO_MAP_VALUE] = &map_key_value_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) [ARG_PTR_TO_UNINIT_MAP_VALUE] = &map_key_value_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) [ARG_PTR_TO_MAP_VALUE_OR_NULL] = &map_key_value_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) [ARG_CONST_SIZE] = &scalar_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) [ARG_CONST_SIZE_OR_ZERO] = &scalar_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) [ARG_CONST_ALLOC_SIZE_OR_ZERO] = &scalar_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) [ARG_CONST_MAP_PTR] = &const_map_ptr_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) [ARG_PTR_TO_CTX] = &context_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) [ARG_PTR_TO_CTX_OR_NULL] = &context_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) [ARG_PTR_TO_SOCK_COMMON] = &sock_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) #ifdef CONFIG_NET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) [ARG_PTR_TO_BTF_ID_SOCK_COMMON] = &btf_id_sock_common_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) [ARG_PTR_TO_SOCKET] = &fullsock_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) [ARG_PTR_TO_SOCKET_OR_NULL] = &fullsock_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) [ARG_PTR_TO_BTF_ID] = &btf_ptr_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) [ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) [ARG_PTR_TO_MEM] = &mem_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) [ARG_PTR_TO_MEM_OR_NULL] = &mem_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) [ARG_PTR_TO_UNINIT_MEM] = &mem_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) [ARG_PTR_TO_ALLOC_MEM] = &alloc_mem_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) [ARG_PTR_TO_ALLOC_MEM_OR_NULL] = &alloc_mem_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) [ARG_PTR_TO_INT] = &int_ptr_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) [ARG_PTR_TO_LONG] = &int_ptr_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) [ARG_PTR_TO_PERCPU_BTF_ID] = &percpu_btf_ptr_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) enum bpf_arg_type arg_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) const u32 *arg_btf_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) enum bpf_reg_type expected, type = reg->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) const struct bpf_reg_types *compatible;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) compatible = compatible_reg_types[arg_type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) if (!compatible) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) for (i = 0; i < ARRAY_SIZE(compatible->types); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) expected = compatible->types[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) if (expected == NOT_INIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) if (type == expected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) verbose(env, "R%d type=%s expected=", regno, reg_type_str[type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) for (j = 0; j + 1 < i; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) verbose(env, "%s, ", reg_type_str[compatible->types[j]]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) verbose(env, "%s\n", reg_type_str[compatible->types[j]]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) if (type == PTR_TO_BTF_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) if (!arg_btf_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) if (!compatible->btf_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) verbose(env, "verifier internal error: missing arg compatible BTF ID\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) arg_btf_id = compatible->btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) if (!btf_struct_ids_match(&env->log, reg->off, reg->btf_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) *arg_btf_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) verbose(env, "R%d is of type %s but %s is expected\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) regno, kernel_type_name(reg->btf_id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) kernel_type_name(*arg_btf_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) struct bpf_call_arg_meta *meta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) const struct bpf_func_proto *fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) u32 regno = BPF_REG_1 + arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) enum bpf_arg_type arg_type = fn->arg_type[arg];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) enum bpf_reg_type type = reg->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) if (arg_type == ARG_DONTCARE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) err = check_reg_arg(env, regno, SRC_OP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) if (arg_type == ARG_ANYTHING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) if (is_pointer_value(env, regno)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) verbose(env, "R%d leaks addr into helper function\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) if (type_is_pkt_pointer(type) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) !may_access_direct_pkt_data(env, meta, BPF_READ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) verbose(env, "helper access to the packet is not allowed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) if (arg_type == ARG_PTR_TO_MAP_VALUE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) err = resolve_map_arg_type(env, meta, &arg_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) if (register_is_null(reg) && arg_type_may_be_null(arg_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) /* A NULL register has a SCALAR_VALUE type, so skip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) * type checking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) goto skip_type_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) err = check_reg_type(env, regno, arg_type, fn->arg_btf_id[arg]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) if (type == PTR_TO_CTX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) err = check_ctx_reg(env, reg, regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) skip_type_check:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) if (reg->ref_obj_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) if (meta->ref_obj_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) regno, reg->ref_obj_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) meta->ref_obj_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) meta->ref_obj_id = reg->ref_obj_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) if (arg_type == ARG_CONST_MAP_PTR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) meta->map_ptr = reg->map_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) } else if (arg_type == ARG_PTR_TO_MAP_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) /* bpf_map_xxx(..., map_ptr, ..., key) call:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) * check that [key, key + map->key_size) are within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) * stack limits and initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) if (!meta->map_ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) /* in function declaration map_ptr must come before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) * map_key, so that it's verified and known before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) * we have to check map_key here. Otherwise it means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) * that kernel subsystem misconfigured verifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) verbose(env, "invalid map_ptr to access map->key\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) err = check_helper_mem_access(env, regno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) meta->map_ptr->key_size, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) } else if (arg_type == ARG_PTR_TO_MAP_VALUE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) (arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) !register_is_null(reg)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) /* bpf_map_xxx(..., map_ptr, ..., value) call:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) * check [value, value + map->value_size) validity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) if (!meta->map_ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) /* kernel subsystem misconfigured verifier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) verbose(env, "invalid map_ptr to access map->value\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) err = check_helper_mem_access(env, regno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) meta->map_ptr->value_size, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) meta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) } else if (arg_type == ARG_PTR_TO_PERCPU_BTF_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) if (!reg->btf_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) verbose(env, "Helper has invalid btf_id in R%d\n", regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) meta->ret_btf_id = reg->btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) if (meta->func_id == BPF_FUNC_spin_lock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) if (process_spin_lock(env, regno, true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) } else if (meta->func_id == BPF_FUNC_spin_unlock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) if (process_spin_lock(env, regno, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) verbose(env, "verifier internal error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) } else if (arg_type_is_mem_ptr(arg_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) /* The access to this pointer is only checked when we hit the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) * next is_mem_size argument below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) } else if (arg_type_is_mem_size(arg_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) /* This is used to refine r0 return value bounds for helpers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) * that enforce this value as an upper bound on return values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) * See do_refine_retval_range() for helpers that can refine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) * the return value. C type of helper is u32 so we pull register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) * bound from umax_value however, if negative verifier errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) * out. Only upper bounds can be learned because retval is an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) * int type and negative retvals are allowed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) meta->msize_max_value = reg->umax_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) /* The register is SCALAR_VALUE; the access check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) * happens using its boundaries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) if (!tnum_is_const(reg->var_off))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) /* For unprivileged variable accesses, disable raw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) * mode so that the program is required to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) * initialize all the memory that the helper could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) * just partially fill up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) meta = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) if (reg->smin_value < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) if (reg->umin_value == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) err = check_helper_mem_access(env, regno - 1, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) zero_size_allowed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) meta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) err = check_helper_mem_access(env, regno - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) reg->umax_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) zero_size_allowed, meta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) err = mark_chain_precision(env, regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) } else if (arg_type_is_alloc_size(arg_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) if (!tnum_is_const(reg->var_off)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) verbose(env, "R%d unbounded size, use 'var &= const' or 'if (var < const)'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) regno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) meta->mem_size = reg->var_off.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) } else if (arg_type_is_int_ptr(arg_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) int size = int_ptr_type_to_size(arg_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) err = check_helper_mem_access(env, regno, size, false, meta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) err = check_ptr_alignment(env, reg, 0, size, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) enum bpf_attach_type eatype = env->prog->expected_attach_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) enum bpf_prog_type type = resolve_prog_type(env->prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) if (func_id != BPF_FUNC_map_update_elem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) /* It's not possible to get access to a locked struct sock in these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) * contexts, so updating is safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) case BPF_PROG_TYPE_TRACING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) if (eatype == BPF_TRACE_ITER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) case BPF_PROG_TYPE_SOCKET_FILTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) case BPF_PROG_TYPE_SCHED_CLS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) case BPF_PROG_TYPE_SCHED_ACT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) case BPF_PROG_TYPE_XDP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) case BPF_PROG_TYPE_SK_REUSEPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) case BPF_PROG_TYPE_FLOW_DISSECTOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) case BPF_PROG_TYPE_SK_LOOKUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) verbose(env, "cannot update sockmap in this context\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) return env->prog->jit_requested && IS_ENABLED(CONFIG_X86_64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) static int check_map_func_compatibility(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) struct bpf_map *map, int func_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) if (!map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) /* We need a two way check, first is from map perspective ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) switch (map->map_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) case BPF_MAP_TYPE_PROG_ARRAY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) if (func_id != BPF_FUNC_tail_call)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) if (func_id != BPF_FUNC_perf_event_read &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) func_id != BPF_FUNC_perf_event_output &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) func_id != BPF_FUNC_skb_output &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) func_id != BPF_FUNC_perf_event_read_value &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) func_id != BPF_FUNC_xdp_output)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) case BPF_MAP_TYPE_RINGBUF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) if (func_id != BPF_FUNC_ringbuf_output &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) func_id != BPF_FUNC_ringbuf_reserve &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) func_id != BPF_FUNC_ringbuf_query)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) case BPF_MAP_TYPE_STACK_TRACE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) if (func_id != BPF_FUNC_get_stackid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) case BPF_MAP_TYPE_CGROUP_ARRAY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) if (func_id != BPF_FUNC_skb_under_cgroup &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) func_id != BPF_FUNC_current_task_under_cgroup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) case BPF_MAP_TYPE_CGROUP_STORAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) if (func_id != BPF_FUNC_get_local_storage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) case BPF_MAP_TYPE_DEVMAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) case BPF_MAP_TYPE_DEVMAP_HASH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) if (func_id != BPF_FUNC_redirect_map &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) func_id != BPF_FUNC_map_lookup_elem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) /* Restrict bpf side of cpumap and xskmap, open when use-cases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) * appear.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) case BPF_MAP_TYPE_CPUMAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) if (func_id != BPF_FUNC_redirect_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) case BPF_MAP_TYPE_XSKMAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) if (func_id != BPF_FUNC_redirect_map &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) func_id != BPF_FUNC_map_lookup_elem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) case BPF_MAP_TYPE_ARRAY_OF_MAPS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) case BPF_MAP_TYPE_HASH_OF_MAPS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) if (func_id != BPF_FUNC_map_lookup_elem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) case BPF_MAP_TYPE_SOCKMAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) if (func_id != BPF_FUNC_sk_redirect_map &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) func_id != BPF_FUNC_sock_map_update &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) func_id != BPF_FUNC_map_delete_elem &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) func_id != BPF_FUNC_msg_redirect_map &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) func_id != BPF_FUNC_sk_select_reuseport &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) func_id != BPF_FUNC_map_lookup_elem &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) !may_update_sockmap(env, func_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) case BPF_MAP_TYPE_SOCKHASH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) if (func_id != BPF_FUNC_sk_redirect_hash &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) func_id != BPF_FUNC_sock_hash_update &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) func_id != BPF_FUNC_map_delete_elem &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) func_id != BPF_FUNC_msg_redirect_hash &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) func_id != BPF_FUNC_sk_select_reuseport &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) func_id != BPF_FUNC_map_lookup_elem &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) !may_update_sockmap(env, func_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) if (func_id != BPF_FUNC_sk_select_reuseport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) case BPF_MAP_TYPE_QUEUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) case BPF_MAP_TYPE_STACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) if (func_id != BPF_FUNC_map_peek_elem &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) func_id != BPF_FUNC_map_pop_elem &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) func_id != BPF_FUNC_map_push_elem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) case BPF_MAP_TYPE_SK_STORAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) if (func_id != BPF_FUNC_sk_storage_get &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) func_id != BPF_FUNC_sk_storage_delete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) case BPF_MAP_TYPE_INODE_STORAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) if (func_id != BPF_FUNC_inode_storage_get &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) func_id != BPF_FUNC_inode_storage_delete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) /* ... and second from the function itself. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) switch (func_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) case BPF_FUNC_tail_call:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) case BPF_FUNC_perf_event_read:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) case BPF_FUNC_perf_event_output:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) case BPF_FUNC_perf_event_read_value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) case BPF_FUNC_skb_output:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) case BPF_FUNC_xdp_output:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) case BPF_FUNC_ringbuf_output:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) case BPF_FUNC_ringbuf_reserve:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) case BPF_FUNC_ringbuf_query:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) if (map->map_type != BPF_MAP_TYPE_RINGBUF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) case BPF_FUNC_get_stackid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) case BPF_FUNC_current_task_under_cgroup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) case BPF_FUNC_skb_under_cgroup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) case BPF_FUNC_redirect_map:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) map->map_type != BPF_MAP_TYPE_DEVMAP_HASH &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) map->map_type != BPF_MAP_TYPE_CPUMAP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) map->map_type != BPF_MAP_TYPE_XSKMAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) case BPF_FUNC_sk_redirect_map:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) case BPF_FUNC_msg_redirect_map:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) case BPF_FUNC_sock_map_update:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) case BPF_FUNC_sk_redirect_hash:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) case BPF_FUNC_msg_redirect_hash:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) case BPF_FUNC_sock_hash_update:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) case BPF_FUNC_get_local_storage:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) case BPF_FUNC_sk_select_reuseport:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) map->map_type != BPF_MAP_TYPE_SOCKMAP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) map->map_type != BPF_MAP_TYPE_SOCKHASH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) case BPF_FUNC_map_peek_elem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) case BPF_FUNC_map_pop_elem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) case BPF_FUNC_map_push_elem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) if (map->map_type != BPF_MAP_TYPE_QUEUE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) map->map_type != BPF_MAP_TYPE_STACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) case BPF_FUNC_sk_storage_get:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) case BPF_FUNC_sk_storage_delete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) case BPF_FUNC_inode_storage_get:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) case BPF_FUNC_inode_storage_delete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) verbose(env, "cannot pass map_type %d into func %s#%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) map->map_type, func_id_name(func_id), func_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) /* We only support one arg being in raw mode at the moment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) * which is sufficient for the helper functions we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) * right now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) return count <= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) enum bpf_arg_type arg_next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) return (arg_type_is_mem_ptr(arg_curr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) !arg_type_is_mem_size(arg_next)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) (!arg_type_is_mem_ptr(arg_curr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) arg_type_is_mem_size(arg_next));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) /* bpf_xxx(..., buf, len) call will access 'len'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) * bytes from memory 'buf'. Both arg types need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928) * to be paired, so make sure there's no buggy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) * helper function specification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) if (arg_type_is_mem_size(fn->arg1_type) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) arg_type_is_mem_ptr(fn->arg5_type) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946) if (arg_type_may_be_refcounted(fn->arg1_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) if (arg_type_may_be_refcounted(fn->arg2_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) if (arg_type_may_be_refcounted(fn->arg3_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) if (arg_type_may_be_refcounted(fn->arg4_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) if (arg_type_may_be_refcounted(fn->arg5_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) /* A reference acquiring function cannot acquire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) * another refcounted ptr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960) if (may_be_acquire_function(func_id) && count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) /* We only support one arg being unreferenced at the moment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964) * which is sufficient for the helper functions we have right now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) return count <= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) static bool check_btf_id_ok(const struct bpf_func_proto *fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) if (fn->arg_type[i] == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) if (fn->arg_type[i] != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) return check_raw_mode_ok(fn) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) check_arg_pair_ok(fn) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) check_btf_id_ok(fn) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) check_refcount_ok(fn, func_id) ? 0 : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993) * are now invalid, so turn them into unknown SCALAR_VALUE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996) struct bpf_func_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) struct bpf_reg_state *regs = state->regs, *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) for (i = 0; i < MAX_BPF_REG; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) if (reg_is_pkt_pointer_any(®s[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) mark_reg_unknown(env, regs, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) bpf_for_each_spilled_reg(i, state, reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) if (!reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) if (reg_is_pkt_pointer_any(reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) __mark_reg_unknown(env, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013) static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) struct bpf_verifier_state *vstate = env->cur_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) for (i = 0; i <= vstate->curframe; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) __clear_all_pkt_pointers(env, vstate->frame[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) static void release_reg_references(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) struct bpf_func_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024) int ref_obj_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) struct bpf_reg_state *regs = state->regs, *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029) for (i = 0; i < MAX_BPF_REG; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030) if (regs[i].ref_obj_id == ref_obj_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) mark_reg_unknown(env, regs, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) bpf_for_each_spilled_reg(i, state, reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034) if (!reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036) if (reg->ref_obj_id == ref_obj_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037) __mark_reg_unknown(env, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) /* The pointer with the specified id has released its reference to kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) * resources. Identify all copies of the same pointer and clear the reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044) static int release_reference(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) int ref_obj_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) struct bpf_verifier_state *vstate = env->cur_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) err = release_reference_state(cur_func(env), ref_obj_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) for (i = 0; i <= vstate->curframe; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) release_reg_references(env, vstate->frame[i], ref_obj_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) static void clear_caller_saved_regs(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) struct bpf_reg_state *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) /* after the call registers r0 - r5 were scratched */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067) for (i = 0; i < CALLER_SAVED_REGS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) mark_reg_not_init(env, regs, caller_saved[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073) static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074) int *insn_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076) struct bpf_verifier_state *state = env->cur_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) struct bpf_func_info_aux *func_info_aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078) struct bpf_func_state *caller, *callee;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) int i, err, subprog, target_insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) bool is_global = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) if (state->curframe + 1 >= MAX_CALL_FRAMES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083) verbose(env, "the call stack of %d frames is too deep\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) state->curframe + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) target_insn = *insn_idx + insn->imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089) subprog = find_subprog(env, target_insn + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090) if (subprog < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) verbose(env, "verifier bug. No program starts at insn %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) target_insn + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096) caller = state->frame[state->curframe];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) if (state->frame[state->curframe + 1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098) verbose(env, "verifier bug. Frame %d already allocated\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) state->curframe + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) func_info_aux = env->prog->aux->func_info_aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) if (func_info_aux)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105) is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) err = btf_check_func_arg_match(env, subprog, caller->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107) if (err == -EFAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109) if (is_global) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) verbose(env, "Caller passes invalid args into func#%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) subprog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) if (env->log.level & BPF_LOG_LEVEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117) "Func#%d is global and valid. Skipping.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118) subprog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) clear_caller_saved_regs(env, caller->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121) /* All global functions return a 64-bit SCALAR_VALUE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) mark_reg_unknown(env, caller->regs, BPF_REG_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) /* continue with next insn after call */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130) callee = kzalloc(sizeof(*callee), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131) if (!callee)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133) state->frame[state->curframe + 1] = callee;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135) /* callee cannot access r0, r6 - r9 for reading and has to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136) * into its own stack before reading from it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) * callee can read/write into caller's stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) init_func_state(env, callee,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140) /* remember the callsite, it will be used by bpf_exit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) *insn_idx /* callsite */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142) state->curframe + 1 /* frameno within this callchain */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143) subprog /* subprog number within this prog */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) /* Transfer references to the callee */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146) err = transfer_reference_state(callee, caller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) /* copy r1 - r5 args that callee can access. The copy includes parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151) * pointers, which connects us up to the liveness chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153) for (i = BPF_REG_1; i <= BPF_REG_5; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) callee->regs[i] = caller->regs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156) clear_caller_saved_regs(env, caller->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) /* only increment it after check_reg_arg() finished */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159) state->curframe++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161) /* and go analyze first insn of the callee */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162) *insn_idx = target_insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164) if (env->log.level & BPF_LOG_LEVEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165) verbose(env, "caller:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166) print_verifier_state(env, caller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167) verbose(env, "callee:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168) print_verifier_state(env, callee);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173) static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175) struct bpf_verifier_state *state = env->cur_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176) struct bpf_func_state *caller, *callee;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177) struct bpf_reg_state *r0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) callee = state->frame[state->curframe];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181) r0 = &callee->regs[BPF_REG_0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182) if (r0->type == PTR_TO_STACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183) /* technically it's ok to return caller's stack pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184) * (or caller's caller's pointer) back to the caller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185) * since these pointers are valid. Only current stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186) * pointer will be invalid as soon as function exits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187) * but let's be conservative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189) verbose(env, "cannot return stack pointer to the caller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) state->curframe--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) caller = state->frame[state->curframe];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195) /* return to the caller whatever r0 had in the callee */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196) caller->regs[BPF_REG_0] = *r0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198) /* Transfer references to the caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199) err = transfer_reference_state(caller, callee);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203) *insn_idx = callee->callsite + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204) if (env->log.level & BPF_LOG_LEVEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205) verbose(env, "returning from callee:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206) print_verifier_state(env, callee);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207) verbose(env, "to caller at %d:\n", *insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208) print_verifier_state(env, caller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210) /* clear everything in the callee */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211) free_func_state(callee);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212) state->frame[state->curframe + 1] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216) static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217) int func_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218) struct bpf_call_arg_meta *meta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220) struct bpf_reg_state *ret_reg = ®s[BPF_REG_0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222) if (ret_type != RET_INTEGER ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223) (func_id != BPF_FUNC_get_stack &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224) func_id != BPF_FUNC_probe_read_str &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225) func_id != BPF_FUNC_probe_read_kernel_str &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226) func_id != BPF_FUNC_probe_read_user_str))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229) ret_reg->smax_value = meta->msize_max_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) ret_reg->s32_max_value = meta->msize_max_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231) ret_reg->smin_value = -MAX_ERRNO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232) ret_reg->s32_min_value = -MAX_ERRNO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233) __reg_deduce_bounds(ret_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234) __reg_bound_offset(ret_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235) __update_reg_bounds(ret_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239) record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240) int func_id, int insn_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242) struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243) struct bpf_map *map = meta->map_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245) if (func_id != BPF_FUNC_tail_call &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246) func_id != BPF_FUNC_map_lookup_elem &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) func_id != BPF_FUNC_map_update_elem &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248) func_id != BPF_FUNC_map_delete_elem &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249) func_id != BPF_FUNC_map_push_elem &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250) func_id != BPF_FUNC_map_pop_elem &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251) func_id != BPF_FUNC_map_peek_elem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254) if (map == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255) verbose(env, "kernel subsystem misconfigured verifier\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259) /* In case of read-only, some additional restrictions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260) * need to be applied in order to prevent altering the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5261) * state of the map from program side.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5263) if ((map->map_flags & BPF_F_RDONLY_PROG) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5264) (func_id == BPF_FUNC_map_delete_elem ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5265) func_id == BPF_FUNC_map_update_elem ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5266) func_id == BPF_FUNC_map_push_elem ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5267) func_id == BPF_FUNC_map_pop_elem)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5268) verbose(env, "write into map forbidden\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5269) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5272) if (!BPF_MAP_PTR(aux->map_ptr_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5273) bpf_map_ptr_store(aux, meta->map_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5274) !meta->map_ptr->bypass_spec_v1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5275) else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5276) bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5277) !meta->map_ptr->bypass_spec_v1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5278) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5281) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5282) record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5283) int func_id, int insn_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5285) struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5286) struct bpf_reg_state *regs = cur_regs(env), *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5287) struct bpf_map *map = meta->map_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5288) struct tnum range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5289) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5290) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5292) if (func_id != BPF_FUNC_tail_call)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5293) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5294) if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5295) verbose(env, "kernel subsystem misconfigured verifier\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5296) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5299) range = tnum_range(0, map->max_entries - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5300) reg = ®s[BPF_REG_3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5302) if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5303) bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5304) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5307) err = mark_chain_precision(env, BPF_REG_3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5308) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5309) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5311) val = reg->var_off.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5312) if (bpf_map_key_unseen(aux))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5313) bpf_map_key_store(aux, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5314) else if (!bpf_map_key_poisoned(aux) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5315) bpf_map_key_immediate(aux) != val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5316) bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5317) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5320) static int check_reference_leak(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5322) struct bpf_func_state *state = cur_func(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5323) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5325) for (i = 0; i < state->acquired_refs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5326) verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5327) state->refs[i].id, state->refs[i].insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5329) return state->acquired_refs ? -EINVAL : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5332) static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5334) const struct bpf_func_proto *fn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5335) struct bpf_reg_state *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5336) struct bpf_call_arg_meta meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5337) bool changes_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5338) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5340) /* find function prototype */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5341) if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5342) verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5343) func_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5344) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5347) if (env->ops->get_func_proto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5348) fn = env->ops->get_func_proto(func_id, env->prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5349) if (!fn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5350) verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5351) func_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5352) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5355) /* eBPF programs must be GPL compatible to use GPL-ed functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5356) if (!env->prog->gpl_compatible && fn->gpl_only) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5357) verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5358) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5361) if (fn->allowed && !fn->allowed(env->prog)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5362) verbose(env, "helper call is not allowed in probe\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5363) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5366) /* With LD_ABS/IND some JITs save/restore skb from r1. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5367) changes_data = bpf_helper_changes_pkt_data(fn->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5368) if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5369) verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5370) func_id_name(func_id), func_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5371) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5374) memset(&meta, 0, sizeof(meta));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5375) meta.pkt_access = fn->pkt_access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5377) err = check_func_proto(fn, func_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5378) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5379) verbose(env, "kernel subsystem misconfigured func %s#%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5380) func_id_name(func_id), func_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5381) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5384) meta.func_id = func_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5385) /* check args */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5386) for (i = 0; i < 5; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5387) err = check_func_arg(env, i, &meta, fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5388) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5389) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5392) err = record_func_map(env, &meta, func_id, insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5393) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5394) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5396) err = record_func_key(env, &meta, func_id, insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5397) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5398) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5400) /* Mark slots with STACK_MISC in case of raw mode, stack offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5401) * is inferred from register state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5402) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5403) for (i = 0; i < meta.access_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5404) err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5405) BPF_WRITE, -1, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5406) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5407) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5410) if (func_id == BPF_FUNC_tail_call) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5411) err = check_reference_leak(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5412) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5413) verbose(env, "tail_call would lead to reference leak\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5414) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5416) } else if (is_release_function(func_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5417) err = release_reference(env, meta.ref_obj_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5418) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5419) verbose(env, "func %s#%d reference has not been acquired before\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5420) func_id_name(func_id), func_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5421) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5425) regs = cur_regs(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5427) /* check that flags argument in get_local_storage(map, flags) is 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5428) * this is required because get_local_storage() can't return an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5429) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5430) if (func_id == BPF_FUNC_get_local_storage &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5431) !register_is_null(®s[BPF_REG_2])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5432) verbose(env, "get_local_storage() doesn't support non-zero flags\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5433) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5436) /* reset caller saved regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5437) for (i = 0; i < CALLER_SAVED_REGS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5438) mark_reg_not_init(env, regs, caller_saved[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5439) check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5442) /* helper call returns 64-bit value. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5443) regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5445) /* update return register (already marked as written above) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5446) if (fn->ret_type == RET_INTEGER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5447) /* sets type to SCALAR_VALUE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5448) mark_reg_unknown(env, regs, BPF_REG_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5449) } else if (fn->ret_type == RET_VOID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5450) regs[BPF_REG_0].type = NOT_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5451) } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5452) fn->ret_type == RET_PTR_TO_MAP_VALUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5453) /* There is no offset yet applied, variable or fixed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5454) mark_reg_known_zero(env, regs, BPF_REG_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5455) /* remember map_ptr, so that check_map_access()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5456) * can check 'value_size' boundary of memory access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5457) * to map element returned from bpf_map_lookup_elem()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5458) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5459) if (meta.map_ptr == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5460) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5461) "kernel subsystem misconfigured verifier\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5462) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5464) regs[BPF_REG_0].map_ptr = meta.map_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5465) if (fn->ret_type == RET_PTR_TO_MAP_VALUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5466) regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5467) if (map_value_has_spin_lock(meta.map_ptr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5468) regs[BPF_REG_0].id = ++env->id_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5469) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5470) regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5472) } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5473) mark_reg_known_zero(env, regs, BPF_REG_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5474) regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5475) } else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5476) mark_reg_known_zero(env, regs, BPF_REG_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5477) regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5478) } else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5479) mark_reg_known_zero(env, regs, BPF_REG_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5480) regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5481) } else if (fn->ret_type == RET_PTR_TO_ALLOC_MEM_OR_NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5482) mark_reg_known_zero(env, regs, BPF_REG_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5483) regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5484) regs[BPF_REG_0].mem_size = meta.mem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5485) } else if (fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5486) fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5487) const struct btf_type *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5489) mark_reg_known_zero(env, regs, BPF_REG_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5490) t = btf_type_skip_modifiers(btf_vmlinux, meta.ret_btf_id, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5491) if (!btf_type_is_struct(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5492) u32 tsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5493) const struct btf_type *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5494) const char *tname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5496) /* resolve the type size of ksym. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5497) ret = btf_resolve_size(btf_vmlinux, t, &tsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5498) if (IS_ERR(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5499) tname = btf_name_by_offset(btf_vmlinux, t->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5500) verbose(env, "unable to resolve the size of type '%s': %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5501) tname, PTR_ERR(ret));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5502) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5504) regs[BPF_REG_0].type =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5505) fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5506) PTR_TO_MEM : PTR_TO_MEM_OR_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5507) regs[BPF_REG_0].mem_size = tsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5508) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5509) regs[BPF_REG_0].type =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5510) fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5511) PTR_TO_BTF_ID : PTR_TO_BTF_ID_OR_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5512) regs[BPF_REG_0].btf_id = meta.ret_btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5514) } else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5515) int ret_btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5517) mark_reg_known_zero(env, regs, BPF_REG_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5518) regs[BPF_REG_0].type = PTR_TO_BTF_ID_OR_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5519) ret_btf_id = *fn->ret_btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5520) if (ret_btf_id == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5521) verbose(env, "invalid return type %d of func %s#%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5522) fn->ret_type, func_id_name(func_id), func_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5523) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5525) regs[BPF_REG_0].btf_id = ret_btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5526) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5527) verbose(env, "unknown return type %d of func %s#%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5528) fn->ret_type, func_id_name(func_id), func_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5529) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5532) if (reg_type_may_be_null(regs[BPF_REG_0].type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5533) regs[BPF_REG_0].id = ++env->id_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5535) if (is_ptr_cast_function(func_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5536) /* For release_reference() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5537) regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5538) } else if (is_acquire_function(func_id, meta.map_ptr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5539) int id = acquire_reference_state(env, insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5541) if (id < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5542) return id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5543) /* For mark_ptr_or_null_reg() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5544) regs[BPF_REG_0].id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5545) /* For release_reference() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5546) regs[BPF_REG_0].ref_obj_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5549) do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5551) err = check_map_func_compatibility(env, meta.map_ptr, func_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5552) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5553) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5555) if ((func_id == BPF_FUNC_get_stack ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5556) func_id == BPF_FUNC_get_task_stack) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5557) !env->prog->has_callchain_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5558) const char *err_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5560) #ifdef CONFIG_PERF_EVENTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5561) err = get_callchain_buffers(sysctl_perf_event_max_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5562) err_str = "cannot get callchain buffer for func %s#%d\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5563) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5564) err = -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5565) err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5566) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5567) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5568) verbose(env, err_str, func_id_name(func_id), func_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5569) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5572) env->prog->has_callchain_buf = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5575) if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5576) env->prog->call_get_stack = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5578) if (changes_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5579) clear_all_pkt_pointers(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5580) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5583) static bool signed_add_overflows(s64 a, s64 b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5585) /* Do the add in u64, where overflow is well-defined */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5586) s64 res = (s64)((u64)a + (u64)b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5588) if (b < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5589) return res > a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5590) return res < a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5593) static bool signed_add32_overflows(s32 a, s32 b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5595) /* Do the add in u32, where overflow is well-defined */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5596) s32 res = (s32)((u32)a + (u32)b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5598) if (b < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5599) return res > a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5600) return res < a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5603) static bool signed_sub_overflows(s64 a, s64 b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5605) /* Do the sub in u64, where overflow is well-defined */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5606) s64 res = (s64)((u64)a - (u64)b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5608) if (b < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5609) return res < a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5610) return res > a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5613) static bool signed_sub32_overflows(s32 a, s32 b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5615) /* Do the sub in u32, where overflow is well-defined */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5616) s32 res = (s32)((u32)a - (u32)b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5618) if (b < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5619) return res < a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5620) return res > a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5623) static bool check_reg_sane_offset(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5624) const struct bpf_reg_state *reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5625) enum bpf_reg_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5627) bool known = tnum_is_const(reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5628) s64 val = reg->var_off.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5629) s64 smin = reg->smin_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5631) if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5632) verbose(env, "math between %s pointer and %lld is not allowed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5633) reg_type_str[type], val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5634) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5637) if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5638) verbose(env, "%s pointer offset %d is not allowed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5639) reg_type_str[type], reg->off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5640) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5643) if (smin == S64_MIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5644) verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5645) reg_type_str[type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5646) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5649) if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5650) verbose(env, "value %lld makes %s pointer be out of bounds\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5651) smin, reg_type_str[type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5652) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5655) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5658) static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5660) return &env->insn_aux_data[env->insn_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5663) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5664) REASON_BOUNDS = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5665) REASON_TYPE = -2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5666) REASON_PATHS = -3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5667) REASON_LIMIT = -4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5668) REASON_STACK = -5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5669) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5671) static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5672) u32 *alu_limit, bool mask_to_left)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5674) u32 max = 0, ptr_limit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5676) switch (ptr_reg->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5677) case PTR_TO_STACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5678) /* Offset 0 is out-of-bounds, but acceptable start for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5679) * left direction, see BPF_REG_FP. Also, unknown scalar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5680) * offset where we would need to deal with min/max bounds is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5681) * currently prohibited for unprivileged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5682) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5683) max = MAX_BPF_STACK + mask_to_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5684) ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5685) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5686) case PTR_TO_MAP_VALUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5687) max = ptr_reg->map_ptr->value_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5688) ptr_limit = (mask_to_left ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5689) ptr_reg->smin_value :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5690) ptr_reg->umax_value) + ptr_reg->off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5691) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5692) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5693) return REASON_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5696) if (ptr_limit >= max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5697) return REASON_LIMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5698) *alu_limit = ptr_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5699) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5702) static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5703) const struct bpf_insn *insn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5705) return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5708) static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5709) u32 alu_state, u32 alu_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5711) /* If we arrived here from different branches with different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5712) * state or limits to sanitize, then this won't work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5713) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5714) if (aux->alu_state &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5715) (aux->alu_state != alu_state ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5716) aux->alu_limit != alu_limit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5717) return REASON_PATHS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5719) /* Corresponding fixup done in fixup_bpf_calls(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5720) aux->alu_state = alu_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5721) aux->alu_limit = alu_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5722) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5725) static int sanitize_val_alu(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5726) struct bpf_insn *insn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5728) struct bpf_insn_aux_data *aux = cur_aux(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5730) if (can_skip_alu_sanitation(env, insn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5731) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5733) return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5736) static bool sanitize_needed(u8 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5738) return opcode == BPF_ADD || opcode == BPF_SUB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5741) struct bpf_sanitize_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5742) struct bpf_insn_aux_data aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5743) bool mask_to_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5744) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5746) static struct bpf_verifier_state *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5747) sanitize_speculative_path(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5748) const struct bpf_insn *insn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5749) u32 next_idx, u32 curr_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5751) struct bpf_verifier_state *branch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5752) struct bpf_reg_state *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5754) branch = push_stack(env, next_idx, curr_idx, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5755) if (branch && insn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5756) regs = branch->frame[branch->curframe]->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5757) if (BPF_SRC(insn->code) == BPF_K) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5758) mark_reg_unknown(env, regs, insn->dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5759) } else if (BPF_SRC(insn->code) == BPF_X) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5760) mark_reg_unknown(env, regs, insn->dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5761) mark_reg_unknown(env, regs, insn->src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5764) return branch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5767) static int sanitize_ptr_alu(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5768) struct bpf_insn *insn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5769) const struct bpf_reg_state *ptr_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5770) const struct bpf_reg_state *off_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5771) struct bpf_reg_state *dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5772) struct bpf_sanitize_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5773) const bool commit_window)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5775) struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5776) struct bpf_verifier_state *vstate = env->cur_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5777) bool off_is_imm = tnum_is_const(off_reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5778) bool off_is_neg = off_reg->smin_value < 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5779) bool ptr_is_dst_reg = ptr_reg == dst_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5780) u8 opcode = BPF_OP(insn->code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5781) u32 alu_state, alu_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5782) struct bpf_reg_state tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5783) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5784) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5786) if (can_skip_alu_sanitation(env, insn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5787) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5789) /* We already marked aux for masking from non-speculative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5790) * paths, thus we got here in the first place. We only care
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5791) * to explore bad access from here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5792) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5793) if (vstate->speculative)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5794) goto do_sim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5796) if (!commit_window) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5797) if (!tnum_is_const(off_reg->var_off) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5798) (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5799) return REASON_BOUNDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5801) info->mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5802) (opcode == BPF_SUB && !off_is_neg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5805) err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5806) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5807) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5809) if (commit_window) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5810) /* In commit phase we narrow the masking window based on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5811) * the observed pointer move after the simulated operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5812) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5813) alu_state = info->aux.alu_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5814) alu_limit = abs(info->aux.alu_limit - alu_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5815) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5816) alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5817) alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5818) alu_state |= ptr_is_dst_reg ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5819) BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5821) /* Limit pruning on unknown scalars to enable deep search for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5822) * potential masking differences from other program paths.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5823) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5824) if (!off_is_imm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5825) env->explore_alu_limits = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5828) err = update_alu_sanitation_state(aux, alu_state, alu_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5829) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5830) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5831) do_sim:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5832) /* If we're in commit phase, we're done here given we already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5833) * pushed the truncated dst_reg into the speculative verification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5834) * stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5835) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5836) * Also, when register is a known constant, we rewrite register-based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5837) * operation to immediate-based, and thus do not need masking (and as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5838) * a consequence, do not need to simulate the zero-truncation either).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5839) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5840) if (commit_window || off_is_imm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5841) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5843) /* Simulate and find potential out-of-bounds access under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5844) * speculative execution from truncation as a result of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5845) * masking when off was not within expected range. If off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5846) * sits in dst, then we temporarily need to move ptr there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5847) * to simulate dst (== 0) +/-= ptr. Needed, for example,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5848) * for cases where we use K-based arithmetic in one direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5849) * and truncated reg-based in the other in order to explore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5850) * bad access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5851) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5852) if (!ptr_is_dst_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5853) tmp = *dst_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5854) *dst_reg = *ptr_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5856) ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5857) env->insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5858) if (!ptr_is_dst_reg && ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5859) *dst_reg = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5860) return !ret ? REASON_STACK : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5863) static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5865) struct bpf_verifier_state *vstate = env->cur_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5867) /* If we simulate paths under speculation, we don't update the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5868) * insn as 'seen' such that when we verify unreachable paths in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5869) * the non-speculative domain, sanitize_dead_code() can still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5870) * rewrite/sanitize them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5871) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5872) if (!vstate->speculative)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5873) env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5876) static int sanitize_err(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5877) const struct bpf_insn *insn, int reason,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5878) const struct bpf_reg_state *off_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5879) const struct bpf_reg_state *dst_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5881) static const char *err = "pointer arithmetic with it prohibited for !root";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5882) const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5883) u32 dst = insn->dst_reg, src = insn->src_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5885) switch (reason) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5886) case REASON_BOUNDS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5887) verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5888) off_reg == dst_reg ? dst : src, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5889) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5890) case REASON_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5891) verbose(env, "R%d has pointer with unsupported alu operation, %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5892) off_reg == dst_reg ? src : dst, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5893) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5894) case REASON_PATHS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5895) verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5896) dst, op, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5897) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5898) case REASON_LIMIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5899) verbose(env, "R%d tried to %s beyond pointer bounds, %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5900) dst, op, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5901) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5902) case REASON_STACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5903) verbose(env, "R%d could not be pushed for speculative verification, %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5904) dst, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5905) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5906) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5907) verbose(env, "verifier internal error: unknown reason (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5908) reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5909) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5912) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5915) /* check that stack access falls within stack limits and that 'reg' doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5916) * have a variable offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5917) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5918) * Variable offset is prohibited for unprivileged mode for simplicity since it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5919) * requires corresponding support in Spectre masking for stack ALU. See also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5920) * retrieve_ptr_limit().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5921) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5922) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5923) * 'off' includes 'reg->off'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5924) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5925) static int check_stack_access_for_ptr_arithmetic(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5926) struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5927) int regno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5928) const struct bpf_reg_state *reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5929) int off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5931) if (!tnum_is_const(reg->var_off)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5932) char tn_buf[48];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5934) tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5935) verbose(env, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5936) regno, tn_buf, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5937) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5940) if (off >= 0 || off < -MAX_BPF_STACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5941) verbose(env, "R%d stack pointer arithmetic goes out of range, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5942) "prohibited for !root; off=%d\n", regno, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5943) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5946) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5949) static int sanitize_check_bounds(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5950) const struct bpf_insn *insn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5951) const struct bpf_reg_state *dst_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5953) u32 dst = insn->dst_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5955) /* For unprivileged we require that resulting offset must be in bounds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5956) * in order to be able to sanitize access later on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5957) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5958) if (env->bypass_spec_v1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5959) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5961) switch (dst_reg->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5962) case PTR_TO_STACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5963) if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5964) dst_reg->off + dst_reg->var_off.value))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5965) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5966) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5967) case PTR_TO_MAP_VALUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5968) if (check_map_access(env, dst, dst_reg->off, 1, false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5969) verbose(env, "R%d pointer arithmetic of map value goes out of range, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5970) "prohibited for !root\n", dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5971) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5973) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5974) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5975) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5978) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5981) /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5982) * Caller should also handle BPF_MOV case separately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5983) * If we return -EACCES, caller may want to try again treating pointer as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5984) * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5985) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5986) static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5987) struct bpf_insn *insn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5988) const struct bpf_reg_state *ptr_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5989) const struct bpf_reg_state *off_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5991) struct bpf_verifier_state *vstate = env->cur_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5992) struct bpf_func_state *state = vstate->frame[vstate->curframe];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5993) struct bpf_reg_state *regs = state->regs, *dst_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5994) bool known = tnum_is_const(off_reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5995) s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5996) smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5997) u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5998) umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5999) struct bpf_sanitize_info info = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6000) u8 opcode = BPF_OP(insn->code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6001) u32 dst = insn->dst_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6002) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6004) dst_reg = ®s[dst];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6006) if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6007) smin_val > smax_val || umin_val > umax_val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6008) /* Taint dst register if offset had invalid bounds derived from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6009) * e.g. dead branches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6010) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6011) __mark_reg_unknown(env, dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6012) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6015) if (BPF_CLASS(insn->code) != BPF_ALU64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6016) /* 32-bit ALU ops on pointers produce (meaningless) scalars */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6017) if (opcode == BPF_SUB && env->allow_ptr_leaks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6018) __mark_reg_unknown(env, dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6019) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6022) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6023) "R%d 32-bit pointer arithmetic prohibited\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6024) dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6025) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6028) switch (ptr_reg->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6029) case PTR_TO_MAP_VALUE_OR_NULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6030) verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6031) dst, reg_type_str[ptr_reg->type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6032) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6033) case CONST_PTR_TO_MAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6034) /* smin_val represents the known value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6035) if (known && smin_val == 0 && opcode == BPF_ADD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6036) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6037) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6038) case PTR_TO_PACKET_END:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6039) case PTR_TO_SOCKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6040) case PTR_TO_SOCK_COMMON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6041) case PTR_TO_TCP_SOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6042) case PTR_TO_XDP_SOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6043) reject:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6044) verbose(env, "R%d pointer arithmetic on %s prohibited\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6045) dst, reg_type_str[ptr_reg->type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6046) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6047) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6048) if (reg_type_may_be_null(ptr_reg->type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6049) goto reject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6050) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6053) /* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6054) * The id may be overwritten later if we create a new variable offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6055) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6056) dst_reg->type = ptr_reg->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6057) dst_reg->id = ptr_reg->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6059) if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6060) !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6061) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6063) /* pointer types do not carry 32-bit bounds at the moment. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6064) __mark_reg32_unbounded(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6066) if (sanitize_needed(opcode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6067) ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6068) &info, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6069) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6070) return sanitize_err(env, insn, ret, off_reg, dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6073) switch (opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6074) case BPF_ADD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6075) /* We can take a fixed offset as long as it doesn't overflow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6076) * the s32 'off' field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6077) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6078) if (known && (ptr_reg->off + smin_val ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6079) (s64)(s32)(ptr_reg->off + smin_val))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6080) /* pointer += K. Accumulate it into fixed offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6081) dst_reg->smin_value = smin_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6082) dst_reg->smax_value = smax_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6083) dst_reg->umin_value = umin_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6084) dst_reg->umax_value = umax_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6085) dst_reg->var_off = ptr_reg->var_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6086) dst_reg->off = ptr_reg->off + smin_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6087) dst_reg->raw = ptr_reg->raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6088) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6090) /* A new variable offset is created. Note that off_reg->off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6091) * == 0, since it's a scalar.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6092) * dst_reg gets the pointer type and since some positive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6093) * integer value was added to the pointer, give it a new 'id'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6094) * if it's a PTR_TO_PACKET.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6095) * this creates a new 'base' pointer, off_reg (variable) gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6096) * added into the variable offset, and we copy the fixed offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6097) * from ptr_reg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6098) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6099) if (signed_add_overflows(smin_ptr, smin_val) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6100) signed_add_overflows(smax_ptr, smax_val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6101) dst_reg->smin_value = S64_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6102) dst_reg->smax_value = S64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6103) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6104) dst_reg->smin_value = smin_ptr + smin_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6105) dst_reg->smax_value = smax_ptr + smax_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6107) if (umin_ptr + umin_val < umin_ptr ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6108) umax_ptr + umax_val < umax_ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6109) dst_reg->umin_value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6110) dst_reg->umax_value = U64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6111) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6112) dst_reg->umin_value = umin_ptr + umin_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6113) dst_reg->umax_value = umax_ptr + umax_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6115) dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6116) dst_reg->off = ptr_reg->off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6117) dst_reg->raw = ptr_reg->raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6118) if (reg_is_pkt_pointer(ptr_reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6119) dst_reg->id = ++env->id_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6120) /* something was added to pkt_ptr, set range to zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6121) dst_reg->raw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6123) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6124) case BPF_SUB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6125) if (dst_reg == off_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6126) /* scalar -= pointer. Creates an unknown scalar */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6127) verbose(env, "R%d tried to subtract pointer from scalar\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6128) dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6129) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6131) /* We don't allow subtraction from FP, because (according to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6132) * test_verifier.c test "invalid fp arithmetic", JITs might not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6133) * be able to deal with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6135) if (ptr_reg->type == PTR_TO_STACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6136) verbose(env, "R%d subtraction from stack pointer prohibited\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6137) dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6138) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6140) if (known && (ptr_reg->off - smin_val ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6141) (s64)(s32)(ptr_reg->off - smin_val))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6142) /* pointer -= K. Subtract it from fixed offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6143) dst_reg->smin_value = smin_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6144) dst_reg->smax_value = smax_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6145) dst_reg->umin_value = umin_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6146) dst_reg->umax_value = umax_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6147) dst_reg->var_off = ptr_reg->var_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6148) dst_reg->id = ptr_reg->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6149) dst_reg->off = ptr_reg->off - smin_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6150) dst_reg->raw = ptr_reg->raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6151) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6153) /* A new variable offset is created. If the subtrahend is known
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6154) * nonnegative, then any reg->range we had before is still good.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6156) if (signed_sub_overflows(smin_ptr, smax_val) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6157) signed_sub_overflows(smax_ptr, smin_val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6158) /* Overflow possible, we know nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6159) dst_reg->smin_value = S64_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6160) dst_reg->smax_value = S64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6161) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6162) dst_reg->smin_value = smin_ptr - smax_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6163) dst_reg->smax_value = smax_ptr - smin_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6165) if (umin_ptr < umax_val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6166) /* Overflow possible, we know nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6167) dst_reg->umin_value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6168) dst_reg->umax_value = U64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6169) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6170) /* Cannot overflow (as long as bounds are consistent) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6171) dst_reg->umin_value = umin_ptr - umax_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6172) dst_reg->umax_value = umax_ptr - umin_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6174) dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6175) dst_reg->off = ptr_reg->off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6176) dst_reg->raw = ptr_reg->raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6177) if (reg_is_pkt_pointer(ptr_reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6178) dst_reg->id = ++env->id_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6179) /* something was added to pkt_ptr, set range to zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6180) if (smin_val < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6181) dst_reg->raw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6183) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6184) case BPF_AND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6185) case BPF_OR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6186) case BPF_XOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6187) /* bitwise ops on pointers are troublesome, prohibit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6188) verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6189) dst, bpf_alu_string[opcode >> 4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6190) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6191) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6192) /* other operators (e.g. MUL,LSH) produce non-pointer results */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6193) verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6194) dst, bpf_alu_string[opcode >> 4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6195) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6198) if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6199) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6201) __update_reg_bounds(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6202) __reg_deduce_bounds(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6203) __reg_bound_offset(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6205) if (sanitize_check_bounds(env, insn, dst_reg) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6206) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6207) if (sanitize_needed(opcode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6208) ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6209) &info, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6210) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6211) return sanitize_err(env, insn, ret, off_reg, dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6214) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6217) static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6218) struct bpf_reg_state *src_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6220) s32 smin_val = src_reg->s32_min_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6221) s32 smax_val = src_reg->s32_max_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6222) u32 umin_val = src_reg->u32_min_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6223) u32 umax_val = src_reg->u32_max_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6225) if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6226) signed_add32_overflows(dst_reg->s32_max_value, smax_val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6227) dst_reg->s32_min_value = S32_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6228) dst_reg->s32_max_value = S32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6229) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6230) dst_reg->s32_min_value += smin_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6231) dst_reg->s32_max_value += smax_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6233) if (dst_reg->u32_min_value + umin_val < umin_val ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6234) dst_reg->u32_max_value + umax_val < umax_val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6235) dst_reg->u32_min_value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6236) dst_reg->u32_max_value = U32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6237) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6238) dst_reg->u32_min_value += umin_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6239) dst_reg->u32_max_value += umax_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6243) static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6244) struct bpf_reg_state *src_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6246) s64 smin_val = src_reg->smin_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6247) s64 smax_val = src_reg->smax_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6248) u64 umin_val = src_reg->umin_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6249) u64 umax_val = src_reg->umax_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6251) if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6252) signed_add_overflows(dst_reg->smax_value, smax_val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6253) dst_reg->smin_value = S64_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6254) dst_reg->smax_value = S64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6255) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6256) dst_reg->smin_value += smin_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6257) dst_reg->smax_value += smax_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6259) if (dst_reg->umin_value + umin_val < umin_val ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6260) dst_reg->umax_value + umax_val < umax_val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6261) dst_reg->umin_value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6262) dst_reg->umax_value = U64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6263) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6264) dst_reg->umin_value += umin_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6265) dst_reg->umax_value += umax_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6269) static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6270) struct bpf_reg_state *src_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6272) s32 smin_val = src_reg->s32_min_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6273) s32 smax_val = src_reg->s32_max_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6274) u32 umin_val = src_reg->u32_min_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6275) u32 umax_val = src_reg->u32_max_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6277) if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6278) signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6279) /* Overflow possible, we know nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6280) dst_reg->s32_min_value = S32_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6281) dst_reg->s32_max_value = S32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6282) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6283) dst_reg->s32_min_value -= smax_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6284) dst_reg->s32_max_value -= smin_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6286) if (dst_reg->u32_min_value < umax_val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6287) /* Overflow possible, we know nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6288) dst_reg->u32_min_value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6289) dst_reg->u32_max_value = U32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6290) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6291) /* Cannot overflow (as long as bounds are consistent) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6292) dst_reg->u32_min_value -= umax_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6293) dst_reg->u32_max_value -= umin_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6297) static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6298) struct bpf_reg_state *src_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6300) s64 smin_val = src_reg->smin_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6301) s64 smax_val = src_reg->smax_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6302) u64 umin_val = src_reg->umin_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6303) u64 umax_val = src_reg->umax_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6305) if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6306) signed_sub_overflows(dst_reg->smax_value, smin_val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6307) /* Overflow possible, we know nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6308) dst_reg->smin_value = S64_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6309) dst_reg->smax_value = S64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6310) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6311) dst_reg->smin_value -= smax_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6312) dst_reg->smax_value -= smin_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6314) if (dst_reg->umin_value < umax_val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6315) /* Overflow possible, we know nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6316) dst_reg->umin_value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6317) dst_reg->umax_value = U64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6318) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6319) /* Cannot overflow (as long as bounds are consistent) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6320) dst_reg->umin_value -= umax_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6321) dst_reg->umax_value -= umin_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6325) static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6326) struct bpf_reg_state *src_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6328) s32 smin_val = src_reg->s32_min_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6329) u32 umin_val = src_reg->u32_min_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6330) u32 umax_val = src_reg->u32_max_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6332) if (smin_val < 0 || dst_reg->s32_min_value < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6333) /* Ain't nobody got time to multiply that sign */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6334) __mark_reg32_unbounded(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6335) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6337) /* Both values are positive, so we can work with unsigned and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6338) * copy the result to signed (unless it exceeds S32_MAX).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6340) if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6341) /* Potential overflow, we know nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6342) __mark_reg32_unbounded(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6343) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6345) dst_reg->u32_min_value *= umin_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6346) dst_reg->u32_max_value *= umax_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6347) if (dst_reg->u32_max_value > S32_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6348) /* Overflow possible, we know nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6349) dst_reg->s32_min_value = S32_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6350) dst_reg->s32_max_value = S32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6351) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6352) dst_reg->s32_min_value = dst_reg->u32_min_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6353) dst_reg->s32_max_value = dst_reg->u32_max_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6357) static void scalar_min_max_mul(struct bpf_reg_state *dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6358) struct bpf_reg_state *src_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6360) s64 smin_val = src_reg->smin_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6361) u64 umin_val = src_reg->umin_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6362) u64 umax_val = src_reg->umax_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6364) if (smin_val < 0 || dst_reg->smin_value < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6365) /* Ain't nobody got time to multiply that sign */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6366) __mark_reg64_unbounded(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6367) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6369) /* Both values are positive, so we can work with unsigned and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6370) * copy the result to signed (unless it exceeds S64_MAX).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6372) if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6373) /* Potential overflow, we know nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6374) __mark_reg64_unbounded(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6375) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6377) dst_reg->umin_value *= umin_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6378) dst_reg->umax_value *= umax_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6379) if (dst_reg->umax_value > S64_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6380) /* Overflow possible, we know nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6381) dst_reg->smin_value = S64_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6382) dst_reg->smax_value = S64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6383) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6384) dst_reg->smin_value = dst_reg->umin_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6385) dst_reg->smax_value = dst_reg->umax_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6389) static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6390) struct bpf_reg_state *src_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6392) bool src_known = tnum_subreg_is_const(src_reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6393) bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6394) struct tnum var32_off = tnum_subreg(dst_reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6395) s32 smin_val = src_reg->s32_min_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6396) u32 umax_val = src_reg->u32_max_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6398) if (src_known && dst_known) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6399) __mark_reg32_known(dst_reg, var32_off.value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6400) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6403) /* We get our minimum from the var_off, since that's inherently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6404) * bitwise. Our maximum is the minimum of the operands' maxima.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6405) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6406) dst_reg->u32_min_value = var32_off.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6407) dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6408) if (dst_reg->s32_min_value < 0 || smin_val < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6409) /* Lose signed bounds when ANDing negative numbers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6410) * ain't nobody got time for that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6411) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6412) dst_reg->s32_min_value = S32_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6413) dst_reg->s32_max_value = S32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6414) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6415) /* ANDing two positives gives a positive, so safe to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6416) * cast result into s64.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6417) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6418) dst_reg->s32_min_value = dst_reg->u32_min_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6419) dst_reg->s32_max_value = dst_reg->u32_max_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6423) static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6424) struct bpf_reg_state *src_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6426) bool src_known = tnum_is_const(src_reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6427) bool dst_known = tnum_is_const(dst_reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6428) s64 smin_val = src_reg->smin_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6429) u64 umax_val = src_reg->umax_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6431) if (src_known && dst_known) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6432) __mark_reg_known(dst_reg, dst_reg->var_off.value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6433) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6436) /* We get our minimum from the var_off, since that's inherently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6437) * bitwise. Our maximum is the minimum of the operands' maxima.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6438) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6439) dst_reg->umin_value = dst_reg->var_off.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6440) dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6441) if (dst_reg->smin_value < 0 || smin_val < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6442) /* Lose signed bounds when ANDing negative numbers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6443) * ain't nobody got time for that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6444) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6445) dst_reg->smin_value = S64_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6446) dst_reg->smax_value = S64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6447) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6448) /* ANDing two positives gives a positive, so safe to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6449) * cast result into s64.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6451) dst_reg->smin_value = dst_reg->umin_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6452) dst_reg->smax_value = dst_reg->umax_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6454) /* We may learn something more from the var_off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6455) __update_reg_bounds(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6458) static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6459) struct bpf_reg_state *src_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6461) bool src_known = tnum_subreg_is_const(src_reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6462) bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6463) struct tnum var32_off = tnum_subreg(dst_reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6464) s32 smin_val = src_reg->s32_min_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6465) u32 umin_val = src_reg->u32_min_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6467) if (src_known && dst_known) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6468) __mark_reg32_known(dst_reg, var32_off.value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6469) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6472) /* We get our maximum from the var_off, and our minimum is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6473) * maximum of the operands' minima
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6474) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6475) dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6476) dst_reg->u32_max_value = var32_off.value | var32_off.mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6477) if (dst_reg->s32_min_value < 0 || smin_val < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6478) /* Lose signed bounds when ORing negative numbers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6479) * ain't nobody got time for that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6480) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6481) dst_reg->s32_min_value = S32_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6482) dst_reg->s32_max_value = S32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6483) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6484) /* ORing two positives gives a positive, so safe to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6485) * cast result into s64.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6486) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6487) dst_reg->s32_min_value = dst_reg->u32_min_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6488) dst_reg->s32_max_value = dst_reg->u32_max_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6492) static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6493) struct bpf_reg_state *src_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6495) bool src_known = tnum_is_const(src_reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6496) bool dst_known = tnum_is_const(dst_reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6497) s64 smin_val = src_reg->smin_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6498) u64 umin_val = src_reg->umin_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6500) if (src_known && dst_known) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6501) __mark_reg_known(dst_reg, dst_reg->var_off.value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6502) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6505) /* We get our maximum from the var_off, and our minimum is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6506) * maximum of the operands' minima
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6507) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6508) dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6509) dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6510) if (dst_reg->smin_value < 0 || smin_val < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6511) /* Lose signed bounds when ORing negative numbers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6512) * ain't nobody got time for that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6513) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6514) dst_reg->smin_value = S64_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6515) dst_reg->smax_value = S64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6516) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6517) /* ORing two positives gives a positive, so safe to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6518) * cast result into s64.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6519) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6520) dst_reg->smin_value = dst_reg->umin_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6521) dst_reg->smax_value = dst_reg->umax_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6523) /* We may learn something more from the var_off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6524) __update_reg_bounds(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6527) static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6528) struct bpf_reg_state *src_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6530) bool src_known = tnum_subreg_is_const(src_reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6531) bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6532) struct tnum var32_off = tnum_subreg(dst_reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6533) s32 smin_val = src_reg->s32_min_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6535) if (src_known && dst_known) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6536) __mark_reg32_known(dst_reg, var32_off.value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6537) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6540) /* We get both minimum and maximum from the var32_off. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6541) dst_reg->u32_min_value = var32_off.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6542) dst_reg->u32_max_value = var32_off.value | var32_off.mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6544) if (dst_reg->s32_min_value >= 0 && smin_val >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6545) /* XORing two positive sign numbers gives a positive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6546) * so safe to cast u32 result into s32.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6547) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6548) dst_reg->s32_min_value = dst_reg->u32_min_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6549) dst_reg->s32_max_value = dst_reg->u32_max_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6550) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6551) dst_reg->s32_min_value = S32_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6552) dst_reg->s32_max_value = S32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6556) static void scalar_min_max_xor(struct bpf_reg_state *dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6557) struct bpf_reg_state *src_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6559) bool src_known = tnum_is_const(src_reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6560) bool dst_known = tnum_is_const(dst_reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6561) s64 smin_val = src_reg->smin_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6563) if (src_known && dst_known) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6564) /* dst_reg->var_off.value has been updated earlier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6565) __mark_reg_known(dst_reg, dst_reg->var_off.value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6566) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6569) /* We get both minimum and maximum from the var_off. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6570) dst_reg->umin_value = dst_reg->var_off.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6571) dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6573) if (dst_reg->smin_value >= 0 && smin_val >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6574) /* XORing two positive sign numbers gives a positive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6575) * so safe to cast u64 result into s64.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6576) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6577) dst_reg->smin_value = dst_reg->umin_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6578) dst_reg->smax_value = dst_reg->umax_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6579) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6580) dst_reg->smin_value = S64_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6581) dst_reg->smax_value = S64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6584) __update_reg_bounds(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6587) static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6588) u64 umin_val, u64 umax_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6590) /* We lose all sign bit information (except what we can pick
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6591) * up from var_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6592) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6593) dst_reg->s32_min_value = S32_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6594) dst_reg->s32_max_value = S32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6595) /* If we might shift our top bit out, then we know nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6596) if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6597) dst_reg->u32_min_value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6598) dst_reg->u32_max_value = U32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6599) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6600) dst_reg->u32_min_value <<= umin_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6601) dst_reg->u32_max_value <<= umax_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6605) static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6606) struct bpf_reg_state *src_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6608) u32 umax_val = src_reg->u32_max_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6609) u32 umin_val = src_reg->u32_min_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6610) /* u32 alu operation will zext upper bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6611) struct tnum subreg = tnum_subreg(dst_reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6613) __scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6614) dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6615) /* Not required but being careful mark reg64 bounds as unknown so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6616) * that we are forced to pick them up from tnum and zext later and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6617) * if some path skips this step we are still safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6618) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6619) __mark_reg64_unbounded(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6620) __update_reg32_bounds(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6623) static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6624) u64 umin_val, u64 umax_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6626) /* Special case <<32 because it is a common compiler pattern to sign
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6627) * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6628) * positive we know this shift will also be positive so we can track
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6629) * bounds correctly. Otherwise we lose all sign bit information except
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6630) * what we can pick up from var_off. Perhaps we can generalize this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6631) * later to shifts of any length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6633) if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6634) dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6635) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6636) dst_reg->smax_value = S64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6638) if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6639) dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6640) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6641) dst_reg->smin_value = S64_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6643) /* If we might shift our top bit out, then we know nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6644) if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6645) dst_reg->umin_value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6646) dst_reg->umax_value = U64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6647) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6648) dst_reg->umin_value <<= umin_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6649) dst_reg->umax_value <<= umax_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6653) static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6654) struct bpf_reg_state *src_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6656) u64 umax_val = src_reg->umax_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6657) u64 umin_val = src_reg->umin_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6659) /* scalar64 calc uses 32bit unshifted bounds so must be called first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6660) __scalar64_min_max_lsh(dst_reg, umin_val, umax_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6661) __scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6663) dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6664) /* We may learn something more from the var_off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6665) __update_reg_bounds(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6668) static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6669) struct bpf_reg_state *src_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6671) struct tnum subreg = tnum_subreg(dst_reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6672) u32 umax_val = src_reg->u32_max_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6673) u32 umin_val = src_reg->u32_min_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6675) /* BPF_RSH is an unsigned shift. If the value in dst_reg might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6676) * be negative, then either:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6677) * 1) src_reg might be zero, so the sign bit of the result is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6678) * unknown, so we lose our signed bounds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6679) * 2) it's known negative, thus the unsigned bounds capture the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6680) * signed bounds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6681) * 3) the signed bounds cross zero, so they tell us nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6682) * about the result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6683) * If the value in dst_reg is known nonnegative, then again the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6684) * unsigned bounts capture the signed bounds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6685) * Thus, in all cases it suffices to blow away our signed bounds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6686) * and rely on inferring new ones from the unsigned bounds and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6687) * var_off of the result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6688) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6689) dst_reg->s32_min_value = S32_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6690) dst_reg->s32_max_value = S32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6692) dst_reg->var_off = tnum_rshift(subreg, umin_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6693) dst_reg->u32_min_value >>= umax_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6694) dst_reg->u32_max_value >>= umin_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6696) __mark_reg64_unbounded(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6697) __update_reg32_bounds(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6700) static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6701) struct bpf_reg_state *src_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6703) u64 umax_val = src_reg->umax_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6704) u64 umin_val = src_reg->umin_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6706) /* BPF_RSH is an unsigned shift. If the value in dst_reg might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6707) * be negative, then either:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6708) * 1) src_reg might be zero, so the sign bit of the result is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6709) * unknown, so we lose our signed bounds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6710) * 2) it's known negative, thus the unsigned bounds capture the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6711) * signed bounds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6712) * 3) the signed bounds cross zero, so they tell us nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6713) * about the result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6714) * If the value in dst_reg is known nonnegative, then again the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6715) * unsigned bounts capture the signed bounds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6716) * Thus, in all cases it suffices to blow away our signed bounds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6717) * and rely on inferring new ones from the unsigned bounds and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6718) * var_off of the result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6719) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6720) dst_reg->smin_value = S64_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6721) dst_reg->smax_value = S64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6722) dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6723) dst_reg->umin_value >>= umax_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6724) dst_reg->umax_value >>= umin_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6726) /* Its not easy to operate on alu32 bounds here because it depends
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6727) * on bits being shifted in. Take easy way out and mark unbounded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6728) * so we can recalculate later from tnum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6730) __mark_reg32_unbounded(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6731) __update_reg_bounds(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6734) static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6735) struct bpf_reg_state *src_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6737) u64 umin_val = src_reg->u32_min_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6739) /* Upon reaching here, src_known is true and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6740) * umax_val is equal to umin_val.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6741) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6742) dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6743) dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6745) dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6747) /* blow away the dst_reg umin_value/umax_value and rely on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6748) * dst_reg var_off to refine the result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6749) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6750) dst_reg->u32_min_value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6751) dst_reg->u32_max_value = U32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6753) __mark_reg64_unbounded(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6754) __update_reg32_bounds(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6757) static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6758) struct bpf_reg_state *src_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6760) u64 umin_val = src_reg->umin_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6762) /* Upon reaching here, src_known is true and umax_val is equal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6763) * to umin_val.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6764) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6765) dst_reg->smin_value >>= umin_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6766) dst_reg->smax_value >>= umin_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6768) dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6770) /* blow away the dst_reg umin_value/umax_value and rely on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6771) * dst_reg var_off to refine the result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6772) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6773) dst_reg->umin_value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6774) dst_reg->umax_value = U64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6776) /* Its not easy to operate on alu32 bounds here because it depends
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6777) * on bits being shifted in from upper 32-bits. Take easy way out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6778) * and mark unbounded so we can recalculate later from tnum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6779) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6780) __mark_reg32_unbounded(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6781) __update_reg_bounds(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6784) /* WARNING: This function does calculations on 64-bit values, but the actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6785) * execution may occur on 32-bit values. Therefore, things like bitshifts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6786) * need extra checks in the 32-bit case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6787) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6788) static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6789) struct bpf_insn *insn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6790) struct bpf_reg_state *dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6791) struct bpf_reg_state src_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6793) struct bpf_reg_state *regs = cur_regs(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6794) u8 opcode = BPF_OP(insn->code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6795) bool src_known;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6796) s64 smin_val, smax_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6797) u64 umin_val, umax_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6798) s32 s32_min_val, s32_max_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6799) u32 u32_min_val, u32_max_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6800) u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6801) bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6802) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6804) smin_val = src_reg.smin_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6805) smax_val = src_reg.smax_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6806) umin_val = src_reg.umin_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6807) umax_val = src_reg.umax_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6809) s32_min_val = src_reg.s32_min_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6810) s32_max_val = src_reg.s32_max_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6811) u32_min_val = src_reg.u32_min_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6812) u32_max_val = src_reg.u32_max_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6814) if (alu32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6815) src_known = tnum_subreg_is_const(src_reg.var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6816) if ((src_known &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6817) (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6818) s32_min_val > s32_max_val || u32_min_val > u32_max_val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6819) /* Taint dst register if offset had invalid bounds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6820) * derived from e.g. dead branches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6821) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6822) __mark_reg_unknown(env, dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6823) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6825) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6826) src_known = tnum_is_const(src_reg.var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6827) if ((src_known &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6828) (smin_val != smax_val || umin_val != umax_val)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6829) smin_val > smax_val || umin_val > umax_val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6830) /* Taint dst register if offset had invalid bounds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6831) * derived from e.g. dead branches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6832) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6833) __mark_reg_unknown(env, dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6834) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6838) if (!src_known &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6839) opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6840) __mark_reg_unknown(env, dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6841) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6844) if (sanitize_needed(opcode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6845) ret = sanitize_val_alu(env, insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6846) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6847) return sanitize_err(env, insn, ret, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6850) /* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6851) * There are two classes of instructions: The first class we track both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6852) * alu32 and alu64 sign/unsigned bounds independently this provides the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6853) * greatest amount of precision when alu operations are mixed with jmp32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6854) * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6855) * and BPF_OR. This is possible because these ops have fairly easy to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6856) * understand and calculate behavior in both 32-bit and 64-bit alu ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6857) * See alu32 verifier tests for examples. The second class of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6858) * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6859) * with regards to tracking sign/unsigned bounds because the bits may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6860) * cross subreg boundaries in the alu64 case. When this happens we mark
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6861) * the reg unbounded in the subreg bound space and use the resulting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6862) * tnum to calculate an approximation of the sign/unsigned bounds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6863) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6864) switch (opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6865) case BPF_ADD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6866) scalar32_min_max_add(dst_reg, &src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6867) scalar_min_max_add(dst_reg, &src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6868) dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6869) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6870) case BPF_SUB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6871) scalar32_min_max_sub(dst_reg, &src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6872) scalar_min_max_sub(dst_reg, &src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6873) dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6874) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6875) case BPF_MUL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6876) dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6877) scalar32_min_max_mul(dst_reg, &src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6878) scalar_min_max_mul(dst_reg, &src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6879) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6880) case BPF_AND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6881) dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6882) scalar32_min_max_and(dst_reg, &src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6883) scalar_min_max_and(dst_reg, &src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6884) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6885) case BPF_OR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6886) dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6887) scalar32_min_max_or(dst_reg, &src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6888) scalar_min_max_or(dst_reg, &src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6889) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6890) case BPF_XOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6891) dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6892) scalar32_min_max_xor(dst_reg, &src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6893) scalar_min_max_xor(dst_reg, &src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6894) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6895) case BPF_LSH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6896) if (umax_val >= insn_bitness) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6897) /* Shifts greater than 31 or 63 are undefined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6898) * This includes shifts by a negative number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6899) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6900) mark_reg_unknown(env, regs, insn->dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6901) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6903) if (alu32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6904) scalar32_min_max_lsh(dst_reg, &src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6905) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6906) scalar_min_max_lsh(dst_reg, &src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6907) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6908) case BPF_RSH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6909) if (umax_val >= insn_bitness) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6910) /* Shifts greater than 31 or 63 are undefined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6911) * This includes shifts by a negative number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6912) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6913) mark_reg_unknown(env, regs, insn->dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6914) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6916) if (alu32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6917) scalar32_min_max_rsh(dst_reg, &src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6918) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6919) scalar_min_max_rsh(dst_reg, &src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6920) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6921) case BPF_ARSH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6922) if (umax_val >= insn_bitness) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6923) /* Shifts greater than 31 or 63 are undefined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6924) * This includes shifts by a negative number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6925) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6926) mark_reg_unknown(env, regs, insn->dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6927) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6929) if (alu32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6930) scalar32_min_max_arsh(dst_reg, &src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6931) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6932) scalar_min_max_arsh(dst_reg, &src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6933) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6934) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6935) mark_reg_unknown(env, regs, insn->dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6936) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6939) /* ALU32 ops are zero extended into 64bit register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6940) if (alu32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6941) zext_32_to_64(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6943) __update_reg_bounds(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6944) __reg_deduce_bounds(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6945) __reg_bound_offset(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6946) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6949) /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6950) * and var_off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6951) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6952) static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6953) struct bpf_insn *insn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6955) struct bpf_verifier_state *vstate = env->cur_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6956) struct bpf_func_state *state = vstate->frame[vstate->curframe];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6957) struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6958) struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6959) u8 opcode = BPF_OP(insn->code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6960) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6962) dst_reg = ®s[insn->dst_reg];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6963) src_reg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6964) if (dst_reg->type != SCALAR_VALUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6965) ptr_reg = dst_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6966) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6967) /* Make sure ID is cleared otherwise dst_reg min/max could be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6968) * incorrectly propagated into other registers by find_equal_scalars()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6969) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6970) dst_reg->id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6971) if (BPF_SRC(insn->code) == BPF_X) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6972) src_reg = ®s[insn->src_reg];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6973) if (src_reg->type != SCALAR_VALUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6974) if (dst_reg->type != SCALAR_VALUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6975) /* Combining two pointers by any ALU op yields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6976) * an arbitrary scalar. Disallow all math except
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6977) * pointer subtraction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6978) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6979) if (opcode == BPF_SUB && env->allow_ptr_leaks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6980) mark_reg_unknown(env, regs, insn->dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6981) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6983) verbose(env, "R%d pointer %s pointer prohibited\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6984) insn->dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6985) bpf_alu_string[opcode >> 4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6986) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6987) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6988) /* scalar += pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6989) * This is legal, but we have to reverse our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6990) * src/dest handling in computing the range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6991) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6992) err = mark_chain_precision(env, insn->dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6993) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6994) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6995) return adjust_ptr_min_max_vals(env, insn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6996) src_reg, dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6998) } else if (ptr_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6999) /* pointer += scalar */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7000) err = mark_chain_precision(env, insn->src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7001) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7002) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7003) return adjust_ptr_min_max_vals(env, insn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7004) dst_reg, src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7006) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7007) /* Pretend the src is a reg with a known value, since we only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7008) * need to be able to read from this state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7009) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7010) off_reg.type = SCALAR_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7011) __mark_reg_known(&off_reg, insn->imm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7012) src_reg = &off_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7013) if (ptr_reg) /* pointer += K */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7014) return adjust_ptr_min_max_vals(env, insn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7015) ptr_reg, src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7018) /* Got here implies adding two SCALAR_VALUEs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7019) if (WARN_ON_ONCE(ptr_reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7020) print_verifier_state(env, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7021) verbose(env, "verifier internal error: unexpected ptr_reg\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7022) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7024) if (WARN_ON(!src_reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7025) print_verifier_state(env, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7026) verbose(env, "verifier internal error: no src_reg\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7027) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7029) return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7032) /* check validity of 32-bit and 64-bit arithmetic operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7033) static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7035) struct bpf_reg_state *regs = cur_regs(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7036) u8 opcode = BPF_OP(insn->code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7037) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7039) if (opcode == BPF_END || opcode == BPF_NEG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7040) if (opcode == BPF_NEG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7041) if (BPF_SRC(insn->code) != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7042) insn->src_reg != BPF_REG_0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7043) insn->off != 0 || insn->imm != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7044) verbose(env, "BPF_NEG uses reserved fields\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7045) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7047) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7048) if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7049) (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7050) BPF_CLASS(insn->code) == BPF_ALU64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7051) verbose(env, "BPF_END uses reserved fields\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7052) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7056) /* check src operand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7057) err = check_reg_arg(env, insn->dst_reg, SRC_OP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7058) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7059) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7061) if (is_pointer_value(env, insn->dst_reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7062) verbose(env, "R%d pointer arithmetic prohibited\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7063) insn->dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7064) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7067) /* check dest operand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7068) err = check_reg_arg(env, insn->dst_reg, DST_OP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7069) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7070) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7072) } else if (opcode == BPF_MOV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7074) if (BPF_SRC(insn->code) == BPF_X) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7075) if (insn->imm != 0 || insn->off != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7076) verbose(env, "BPF_MOV uses reserved fields\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7077) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7080) /* check src operand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7081) err = check_reg_arg(env, insn->src_reg, SRC_OP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7082) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7083) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7084) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7085) if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7086) verbose(env, "BPF_MOV uses reserved fields\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7087) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7091) /* check dest operand, mark as required later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7092) err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7093) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7094) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7096) if (BPF_SRC(insn->code) == BPF_X) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7097) struct bpf_reg_state *src_reg = regs + insn->src_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7098) struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7100) if (BPF_CLASS(insn->code) == BPF_ALU64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7101) /* case: R1 = R2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7102) * copy register state to dest reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7104) if (src_reg->type == SCALAR_VALUE && !src_reg->id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7105) /* Assign src and dst registers the same ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7106) * that will be used by find_equal_scalars()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7107) * to propagate min/max range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7109) src_reg->id = ++env->id_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7110) *dst_reg = *src_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7111) dst_reg->live |= REG_LIVE_WRITTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7112) dst_reg->subreg_def = DEF_NOT_SUBREG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7113) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7114) /* R1 = (u32) R2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7115) if (is_pointer_value(env, insn->src_reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7116) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7117) "R%d partial copy of pointer\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7118) insn->src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7119) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7120) } else if (src_reg->type == SCALAR_VALUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7121) *dst_reg = *src_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7122) /* Make sure ID is cleared otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7123) * dst_reg min/max could be incorrectly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7124) * propagated into src_reg by find_equal_scalars()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7126) dst_reg->id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7127) dst_reg->live |= REG_LIVE_WRITTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7128) dst_reg->subreg_def = env->insn_idx + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7129) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7130) mark_reg_unknown(env, regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7131) insn->dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7133) zext_32_to_64(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7135) __update_reg_bounds(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7136) __reg_deduce_bounds(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7137) __reg_bound_offset(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7139) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7140) /* case: R = imm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7141) * remember the value we stored into this reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7143) /* clear any state __mark_reg_known doesn't set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7144) mark_reg_unknown(env, regs, insn->dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7145) regs[insn->dst_reg].type = SCALAR_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7146) if (BPF_CLASS(insn->code) == BPF_ALU64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7147) __mark_reg_known(regs + insn->dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7148) insn->imm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7149) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7150) __mark_reg_known(regs + insn->dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7151) (u32)insn->imm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7155) } else if (opcode > BPF_END) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7156) verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7157) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7159) } else { /* all other ALU ops: and, sub, xor, add, ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7161) if (BPF_SRC(insn->code) == BPF_X) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7162) if (insn->imm != 0 || insn->off != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7163) verbose(env, "BPF_ALU uses reserved fields\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7164) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7166) /* check src1 operand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7167) err = check_reg_arg(env, insn->src_reg, SRC_OP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7168) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7169) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7170) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7171) if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7172) verbose(env, "BPF_ALU uses reserved fields\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7173) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7177) /* check src2 operand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7178) err = check_reg_arg(env, insn->dst_reg, SRC_OP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7179) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7180) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7182) if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7183) BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7184) verbose(env, "div by zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7185) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7188) if ((opcode == BPF_LSH || opcode == BPF_RSH ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7189) opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7190) int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7192) if (insn->imm < 0 || insn->imm >= size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7193) verbose(env, "invalid shift %d\n", insn->imm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7194) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7198) /* check dest operand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7199) err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7200) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7201) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7203) return adjust_reg_min_max_vals(env, insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7206) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7209) static void __find_good_pkt_pointers(struct bpf_func_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7210) struct bpf_reg_state *dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7211) enum bpf_reg_type type, u16 new_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7213) struct bpf_reg_state *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7214) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7216) for (i = 0; i < MAX_BPF_REG; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7217) reg = &state->regs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7218) if (reg->type == type && reg->id == dst_reg->id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7219) /* keep the maximum range already checked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7220) reg->range = max(reg->range, new_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7223) bpf_for_each_spilled_reg(i, state, reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7224) if (!reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7225) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7226) if (reg->type == type && reg->id == dst_reg->id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7227) reg->range = max(reg->range, new_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7231) static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7232) struct bpf_reg_state *dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7233) enum bpf_reg_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7234) bool range_right_open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7236) u16 new_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7237) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7239) if (dst_reg->off < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7240) (dst_reg->off == 0 && range_right_open))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7241) /* This doesn't give us any range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7242) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7244) if (dst_reg->umax_value > MAX_PACKET_OFF ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7245) dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7246) /* Risk of overflow. For instance, ptr + (1<<63) may be less
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7247) * than pkt_end, but that's because it's also less than pkt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7249) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7251) new_range = dst_reg->off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7252) if (range_right_open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7253) new_range++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7255) /* Examples for register markings:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7256) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7257) * pkt_data in dst register:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7258) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7259) * r2 = r3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7260) * r2 += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7261) * if (r2 > pkt_end) goto <handle exception>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7262) * <access okay>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7263) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7264) * r2 = r3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7265) * r2 += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7266) * if (r2 < pkt_end) goto <access okay>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7267) * <handle exception>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7268) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7269) * Where:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7270) * r2 == dst_reg, pkt_end == src_reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7271) * r2=pkt(id=n,off=8,r=0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7272) * r3=pkt(id=n,off=0,r=0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7273) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7274) * pkt_data in src register:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7275) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7276) * r2 = r3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7277) * r2 += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7278) * if (pkt_end >= r2) goto <access okay>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7279) * <handle exception>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7280) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7281) * r2 = r3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7282) * r2 += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7283) * if (pkt_end <= r2) goto <handle exception>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7284) * <access okay>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7285) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7286) * Where:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7287) * pkt_end == dst_reg, r2 == src_reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7288) * r2=pkt(id=n,off=8,r=0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7289) * r3=pkt(id=n,off=0,r=0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7290) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7291) * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7292) * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7293) * and [r3, r3 + 8-1) respectively is safe to access depending on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7294) * the check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7295) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7297) /* If our ids match, then we must have the same max_value. And we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7298) * don't care about the other reg's fixed offset, since if it's too big
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7299) * the range won't allow anything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7300) * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7301) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7302) for (i = 0; i <= vstate->curframe; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7303) __find_good_pkt_pointers(vstate->frame[i], dst_reg, type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7304) new_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7307) static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7309) struct tnum subreg = tnum_subreg(reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7310) s32 sval = (s32)val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7312) switch (opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7313) case BPF_JEQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7314) if (tnum_is_const(subreg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7315) return !!tnum_equals_const(subreg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7316) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7317) case BPF_JNE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7318) if (tnum_is_const(subreg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7319) return !tnum_equals_const(subreg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7320) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7321) case BPF_JSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7322) if ((~subreg.mask & subreg.value) & val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7323) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7324) if (!((subreg.mask | subreg.value) & val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7325) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7326) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7327) case BPF_JGT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7328) if (reg->u32_min_value > val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7329) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7330) else if (reg->u32_max_value <= val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7331) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7332) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7333) case BPF_JSGT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7334) if (reg->s32_min_value > sval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7335) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7336) else if (reg->s32_max_value <= sval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7337) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7338) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7339) case BPF_JLT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7340) if (reg->u32_max_value < val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7341) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7342) else if (reg->u32_min_value >= val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7343) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7344) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7345) case BPF_JSLT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7346) if (reg->s32_max_value < sval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7347) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7348) else if (reg->s32_min_value >= sval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7349) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7350) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7351) case BPF_JGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7352) if (reg->u32_min_value >= val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7353) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7354) else if (reg->u32_max_value < val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7355) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7356) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7357) case BPF_JSGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7358) if (reg->s32_min_value >= sval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7359) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7360) else if (reg->s32_max_value < sval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7361) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7362) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7363) case BPF_JLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7364) if (reg->u32_max_value <= val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7365) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7366) else if (reg->u32_min_value > val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7367) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7368) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7369) case BPF_JSLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7370) if (reg->s32_max_value <= sval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7371) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7372) else if (reg->s32_min_value > sval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7373) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7374) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7377) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7381) static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7383) s64 sval = (s64)val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7385) switch (opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7386) case BPF_JEQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7387) if (tnum_is_const(reg->var_off))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7388) return !!tnum_equals_const(reg->var_off, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7389) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7390) case BPF_JNE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7391) if (tnum_is_const(reg->var_off))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7392) return !tnum_equals_const(reg->var_off, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7393) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7394) case BPF_JSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7395) if ((~reg->var_off.mask & reg->var_off.value) & val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7396) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7397) if (!((reg->var_off.mask | reg->var_off.value) & val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7398) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7399) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7400) case BPF_JGT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7401) if (reg->umin_value > val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7402) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7403) else if (reg->umax_value <= val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7404) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7405) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7406) case BPF_JSGT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7407) if (reg->smin_value > sval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7408) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7409) else if (reg->smax_value <= sval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7410) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7411) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7412) case BPF_JLT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7413) if (reg->umax_value < val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7414) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7415) else if (reg->umin_value >= val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7416) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7417) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7418) case BPF_JSLT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7419) if (reg->smax_value < sval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7420) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7421) else if (reg->smin_value >= sval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7422) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7423) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7424) case BPF_JGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7425) if (reg->umin_value >= val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7426) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7427) else if (reg->umax_value < val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7428) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7429) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7430) case BPF_JSGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7431) if (reg->smin_value >= sval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7432) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7433) else if (reg->smax_value < sval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7434) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7435) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7436) case BPF_JLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7437) if (reg->umax_value <= val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7438) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7439) else if (reg->umin_value > val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7440) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7441) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7442) case BPF_JSLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7443) if (reg->smax_value <= sval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7444) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7445) else if (reg->smin_value > sval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7446) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7447) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7450) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7453) /* compute branch direction of the expression "if (reg opcode val) goto target;"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7454) * and return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7455) * 1 - branch will be taken and "goto target" will be executed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7456) * 0 - branch will not be taken and fall-through to next insn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7457) * -1 - unknown. Example: "if (reg < 5)" is unknown when register value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7458) * range [0,10]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7460) static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7461) bool is_jmp32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7463) if (__is_pointer_value(false, reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7464) if (!reg_type_not_null(reg->type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7465) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7467) /* If pointer is valid tests against zero will fail so we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7468) * use this to direct branch taken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7469) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7470) if (val != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7471) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7473) switch (opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7474) case BPF_JEQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7475) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7476) case BPF_JNE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7477) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7478) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7479) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7483) if (is_jmp32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7484) return is_branch32_taken(reg, val, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7485) return is_branch64_taken(reg, val, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7488) /* Adjusts the register min/max values in the case that the dst_reg is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7489) * variable register that we are working on, and src_reg is a constant or we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7490) * simply doing a BPF_K check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7491) * In JEQ/JNE cases we also adjust the var_off values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7492) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7493) static void reg_set_min_max(struct bpf_reg_state *true_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7494) struct bpf_reg_state *false_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7495) u64 val, u32 val32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7496) u8 opcode, bool is_jmp32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7498) struct tnum false_32off = tnum_subreg(false_reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7499) struct tnum false_64off = false_reg->var_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7500) struct tnum true_32off = tnum_subreg(true_reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7501) struct tnum true_64off = true_reg->var_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7502) s64 sval = (s64)val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7503) s32 sval32 = (s32)val32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7505) /* If the dst_reg is a pointer, we can't learn anything about its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7506) * variable offset from the compare (unless src_reg were a pointer into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7507) * the same object, but we don't bother with that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7508) * Since false_reg and true_reg have the same type by construction, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7509) * only need to check one of them for pointerness.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7510) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7511) if (__is_pointer_value(false, false_reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7512) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7514) switch (opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7515) case BPF_JEQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7516) case BPF_JNE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7518) struct bpf_reg_state *reg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7519) opcode == BPF_JEQ ? true_reg : false_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7521) /* JEQ/JNE comparison doesn't change the register equivalence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7522) * r1 = r2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7523) * if (r1 == 42) goto label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7524) * ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7525) * label: // here both r1 and r2 are known to be 42.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7526) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7527) * Hence when marking register as known preserve it's ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7528) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7529) if (is_jmp32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7530) __mark_reg32_known(reg, val32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7531) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7532) ___mark_reg_known(reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7533) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7535) case BPF_JSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7536) if (is_jmp32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7537) false_32off = tnum_and(false_32off, tnum_const(~val32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7538) if (is_power_of_2(val32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7539) true_32off = tnum_or(true_32off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7540) tnum_const(val32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7541) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7542) false_64off = tnum_and(false_64off, tnum_const(~val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7543) if (is_power_of_2(val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7544) true_64off = tnum_or(true_64off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7545) tnum_const(val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7547) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7548) case BPF_JGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7549) case BPF_JGT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7551) if (is_jmp32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7552) u32 false_umax = opcode == BPF_JGT ? val32 : val32 - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7553) u32 true_umin = opcode == BPF_JGT ? val32 + 1 : val32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7555) false_reg->u32_max_value = min(false_reg->u32_max_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7556) false_umax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7557) true_reg->u32_min_value = max(true_reg->u32_min_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7558) true_umin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7559) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7560) u64 false_umax = opcode == BPF_JGT ? val : val - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7561) u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7563) false_reg->umax_value = min(false_reg->umax_value, false_umax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7564) true_reg->umin_value = max(true_reg->umin_value, true_umin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7566) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7568) case BPF_JSGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7569) case BPF_JSGT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7571) if (is_jmp32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7572) s32 false_smax = opcode == BPF_JSGT ? sval32 : sval32 - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7573) s32 true_smin = opcode == BPF_JSGT ? sval32 + 1 : sval32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7575) false_reg->s32_max_value = min(false_reg->s32_max_value, false_smax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7576) true_reg->s32_min_value = max(true_reg->s32_min_value, true_smin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7577) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7578) s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7579) s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7581) false_reg->smax_value = min(false_reg->smax_value, false_smax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7582) true_reg->smin_value = max(true_reg->smin_value, true_smin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7584) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7586) case BPF_JLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7587) case BPF_JLT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7589) if (is_jmp32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7590) u32 false_umin = opcode == BPF_JLT ? val32 : val32 + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7591) u32 true_umax = opcode == BPF_JLT ? val32 - 1 : val32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7593) false_reg->u32_min_value = max(false_reg->u32_min_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7594) false_umin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7595) true_reg->u32_max_value = min(true_reg->u32_max_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7596) true_umax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7597) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7598) u64 false_umin = opcode == BPF_JLT ? val : val + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7599) u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7601) false_reg->umin_value = max(false_reg->umin_value, false_umin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7602) true_reg->umax_value = min(true_reg->umax_value, true_umax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7604) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7606) case BPF_JSLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7607) case BPF_JSLT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7609) if (is_jmp32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7610) s32 false_smin = opcode == BPF_JSLT ? sval32 : sval32 + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7611) s32 true_smax = opcode == BPF_JSLT ? sval32 - 1 : sval32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7613) false_reg->s32_min_value = max(false_reg->s32_min_value, false_smin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7614) true_reg->s32_max_value = min(true_reg->s32_max_value, true_smax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7615) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7616) s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7617) s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7619) false_reg->smin_value = max(false_reg->smin_value, false_smin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7620) true_reg->smax_value = min(true_reg->smax_value, true_smax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7622) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7624) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7625) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7628) if (is_jmp32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7629) false_reg->var_off = tnum_or(tnum_clear_subreg(false_64off),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7630) tnum_subreg(false_32off));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7631) true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7632) tnum_subreg(true_32off));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7633) __reg_combine_32_into_64(false_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7634) __reg_combine_32_into_64(true_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7635) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7636) false_reg->var_off = false_64off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7637) true_reg->var_off = true_64off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7638) __reg_combine_64_into_32(false_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7639) __reg_combine_64_into_32(true_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7643) /* Same as above, but for the case that dst_reg holds a constant and src_reg is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7644) * the variable reg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7645) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7646) static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7647) struct bpf_reg_state *false_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7648) u64 val, u32 val32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7649) u8 opcode, bool is_jmp32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7651) /* How can we transform "a <op> b" into "b <op> a"? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7652) static const u8 opcode_flip[16] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7653) /* these stay the same */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7654) [BPF_JEQ >> 4] = BPF_JEQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7655) [BPF_JNE >> 4] = BPF_JNE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7656) [BPF_JSET >> 4] = BPF_JSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7657) /* these swap "lesser" and "greater" (L and G in the opcodes) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7658) [BPF_JGE >> 4] = BPF_JLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7659) [BPF_JGT >> 4] = BPF_JLT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7660) [BPF_JLE >> 4] = BPF_JGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7661) [BPF_JLT >> 4] = BPF_JGT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7662) [BPF_JSGE >> 4] = BPF_JSLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7663) [BPF_JSGT >> 4] = BPF_JSLT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7664) [BPF_JSLE >> 4] = BPF_JSGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7665) [BPF_JSLT >> 4] = BPF_JSGT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7666) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7667) opcode = opcode_flip[opcode >> 4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7668) /* This uses zero as "not present in table"; luckily the zero opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7669) * BPF_JA, can't get here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7670) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7671) if (opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7672) reg_set_min_max(true_reg, false_reg, val, val32, opcode, is_jmp32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7675) /* Regs are known to be equal, so intersect their min/max/var_off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7676) static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7677) struct bpf_reg_state *dst_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7679) src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7680) dst_reg->umin_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7681) src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7682) dst_reg->umax_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7683) src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7684) dst_reg->smin_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7685) src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7686) dst_reg->smax_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7687) src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7688) dst_reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7689) /* We might have learned new bounds from the var_off. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7690) __update_reg_bounds(src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7691) __update_reg_bounds(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7692) /* We might have learned something about the sign bit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7693) __reg_deduce_bounds(src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7694) __reg_deduce_bounds(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7695) /* We might have learned some bits from the bounds. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7696) __reg_bound_offset(src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7697) __reg_bound_offset(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7698) /* Intersecting with the old var_off might have improved our bounds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7699) * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7700) * then new var_off is (0; 0x7f...fc) which improves our umax.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7701) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7702) __update_reg_bounds(src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7703) __update_reg_bounds(dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7706) static void reg_combine_min_max(struct bpf_reg_state *true_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7707) struct bpf_reg_state *true_dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7708) struct bpf_reg_state *false_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7709) struct bpf_reg_state *false_dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7710) u8 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7712) switch (opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7713) case BPF_JEQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7714) __reg_combine_min_max(true_src, true_dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7715) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7716) case BPF_JNE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7717) __reg_combine_min_max(false_src, false_dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7718) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7722) static void mark_ptr_or_null_reg(struct bpf_func_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7723) struct bpf_reg_state *reg, u32 id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7724) bool is_null)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7726) if (reg_type_may_be_null(reg->type) && reg->id == id &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7727) !WARN_ON_ONCE(!reg->id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7728) if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7729) !tnum_equals_const(reg->var_off, 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7730) reg->off)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7731) /* Old offset (both fixed and variable parts) should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7732) * have been known-zero, because we don't allow pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7733) * arithmetic on pointers that might be NULL. If we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7734) * see this happening, don't convert the register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7735) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7736) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7738) if (is_null) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7739) reg->type = SCALAR_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7740) } else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7741) const struct bpf_map *map = reg->map_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7743) if (map->inner_map_meta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7744) reg->type = CONST_PTR_TO_MAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7745) reg->map_ptr = map->inner_map_meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7746) } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7747) reg->type = PTR_TO_XDP_SOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7748) } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7749) map->map_type == BPF_MAP_TYPE_SOCKHASH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7750) reg->type = PTR_TO_SOCKET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7751) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7752) reg->type = PTR_TO_MAP_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7754) } else if (reg->type == PTR_TO_SOCKET_OR_NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7755) reg->type = PTR_TO_SOCKET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7756) } else if (reg->type == PTR_TO_SOCK_COMMON_OR_NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7757) reg->type = PTR_TO_SOCK_COMMON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7758) } else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7759) reg->type = PTR_TO_TCP_SOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7760) } else if (reg->type == PTR_TO_BTF_ID_OR_NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7761) reg->type = PTR_TO_BTF_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7762) } else if (reg->type == PTR_TO_MEM_OR_NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7763) reg->type = PTR_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7764) } else if (reg->type == PTR_TO_RDONLY_BUF_OR_NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7765) reg->type = PTR_TO_RDONLY_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7766) } else if (reg->type == PTR_TO_RDWR_BUF_OR_NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7767) reg->type = PTR_TO_RDWR_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7769) if (is_null) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7770) /* We don't need id and ref_obj_id from this point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7771) * onwards anymore, thus we should better reset it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7772) * so that state pruning has chances to take effect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7773) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7774) reg->id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7775) reg->ref_obj_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7776) } else if (!reg_may_point_to_spin_lock(reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7777) /* For not-NULL ptr, reg->ref_obj_id will be reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7778) * in release_reg_references().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7779) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7780) * reg->id is still used by spin_lock ptr. Other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7781) * than spin_lock ptr type, reg->id can be reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7782) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7783) reg->id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7788) static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7789) bool is_null)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7791) struct bpf_reg_state *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7792) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7794) for (i = 0; i < MAX_BPF_REG; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7795) mark_ptr_or_null_reg(state, &state->regs[i], id, is_null);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7797) bpf_for_each_spilled_reg(i, state, reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7798) if (!reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7799) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7800) mark_ptr_or_null_reg(state, reg, id, is_null);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7804) /* The logic is similar to find_good_pkt_pointers(), both could eventually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7805) * be folded together at some point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7806) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7807) static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7808) bool is_null)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7810) struct bpf_func_state *state = vstate->frame[vstate->curframe];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7811) struct bpf_reg_state *regs = state->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7812) u32 ref_obj_id = regs[regno].ref_obj_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7813) u32 id = regs[regno].id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7814) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7816) if (ref_obj_id && ref_obj_id == id && is_null)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7817) /* regs[regno] is in the " == NULL" branch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7818) * No one could have freed the reference state before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7819) * doing the NULL check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7820) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7821) WARN_ON_ONCE(release_reference_state(state, id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7823) for (i = 0; i <= vstate->curframe; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7824) __mark_ptr_or_null_regs(vstate->frame[i], id, is_null);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7827) static bool try_match_pkt_pointers(const struct bpf_insn *insn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7828) struct bpf_reg_state *dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7829) struct bpf_reg_state *src_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7830) struct bpf_verifier_state *this_branch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7831) struct bpf_verifier_state *other_branch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7833) if (BPF_SRC(insn->code) != BPF_X)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7834) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7836) /* Pointers are always 64-bit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7837) if (BPF_CLASS(insn->code) == BPF_JMP32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7838) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7840) switch (BPF_OP(insn->code)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7841) case BPF_JGT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7842) if ((dst_reg->type == PTR_TO_PACKET &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7843) src_reg->type == PTR_TO_PACKET_END) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7844) (dst_reg->type == PTR_TO_PACKET_META &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7845) reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7846) /* pkt_data' > pkt_end, pkt_meta' > pkt_data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7847) find_good_pkt_pointers(this_branch, dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7848) dst_reg->type, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7849) } else if ((dst_reg->type == PTR_TO_PACKET_END &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7850) src_reg->type == PTR_TO_PACKET) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7851) (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7852) src_reg->type == PTR_TO_PACKET_META)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7853) /* pkt_end > pkt_data', pkt_data > pkt_meta' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7854) find_good_pkt_pointers(other_branch, src_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7855) src_reg->type, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7856) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7857) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7859) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7860) case BPF_JLT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7861) if ((dst_reg->type == PTR_TO_PACKET &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7862) src_reg->type == PTR_TO_PACKET_END) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7863) (dst_reg->type == PTR_TO_PACKET_META &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7864) reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7865) /* pkt_data' < pkt_end, pkt_meta' < pkt_data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7866) find_good_pkt_pointers(other_branch, dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7867) dst_reg->type, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7868) } else if ((dst_reg->type == PTR_TO_PACKET_END &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7869) src_reg->type == PTR_TO_PACKET) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7870) (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7871) src_reg->type == PTR_TO_PACKET_META)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7872) /* pkt_end < pkt_data', pkt_data > pkt_meta' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7873) find_good_pkt_pointers(this_branch, src_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7874) src_reg->type, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7875) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7876) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7878) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7879) case BPF_JGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7880) if ((dst_reg->type == PTR_TO_PACKET &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7881) src_reg->type == PTR_TO_PACKET_END) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7882) (dst_reg->type == PTR_TO_PACKET_META &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7883) reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7884) /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7885) find_good_pkt_pointers(this_branch, dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7886) dst_reg->type, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7887) } else if ((dst_reg->type == PTR_TO_PACKET_END &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7888) src_reg->type == PTR_TO_PACKET) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7889) (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7890) src_reg->type == PTR_TO_PACKET_META)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7891) /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7892) find_good_pkt_pointers(other_branch, src_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7893) src_reg->type, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7894) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7895) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7897) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7898) case BPF_JLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7899) if ((dst_reg->type == PTR_TO_PACKET &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7900) src_reg->type == PTR_TO_PACKET_END) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7901) (dst_reg->type == PTR_TO_PACKET_META &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7902) reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7903) /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7904) find_good_pkt_pointers(other_branch, dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7905) dst_reg->type, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7906) } else if ((dst_reg->type == PTR_TO_PACKET_END &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7907) src_reg->type == PTR_TO_PACKET) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7908) (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7909) src_reg->type == PTR_TO_PACKET_META)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7910) /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7911) find_good_pkt_pointers(this_branch, src_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7912) src_reg->type, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7913) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7914) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7916) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7917) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7918) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7921) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7924) static void find_equal_scalars(struct bpf_verifier_state *vstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7925) struct bpf_reg_state *known_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7927) struct bpf_func_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7928) struct bpf_reg_state *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7929) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7931) for (i = 0; i <= vstate->curframe; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7932) state = vstate->frame[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7933) for (j = 0; j < MAX_BPF_REG; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7934) reg = &state->regs[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7935) if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7936) *reg = *known_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7939) bpf_for_each_spilled_reg(j, state, reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7940) if (!reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7941) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7942) if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7943) *reg = *known_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7948) static int check_cond_jmp_op(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7949) struct bpf_insn *insn, int *insn_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7951) struct bpf_verifier_state *this_branch = env->cur_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7952) struct bpf_verifier_state *other_branch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7953) struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7954) struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7955) u8 opcode = BPF_OP(insn->code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7956) bool is_jmp32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7957) int pred = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7958) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7960) /* Only conditional jumps are expected to reach here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7961) if (opcode == BPF_JA || opcode > BPF_JSLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7962) verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7963) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7966) if (BPF_SRC(insn->code) == BPF_X) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7967) if (insn->imm != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7968) verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7969) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7972) /* check src1 operand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7973) err = check_reg_arg(env, insn->src_reg, SRC_OP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7974) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7975) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7977) if (is_pointer_value(env, insn->src_reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7978) verbose(env, "R%d pointer comparison prohibited\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7979) insn->src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7980) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7982) src_reg = ®s[insn->src_reg];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7983) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7984) if (insn->src_reg != BPF_REG_0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7985) verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7986) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7990) /* check src2 operand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7991) err = check_reg_arg(env, insn->dst_reg, SRC_OP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7992) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7993) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7995) dst_reg = ®s[insn->dst_reg];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7996) is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7998) if (BPF_SRC(insn->code) == BPF_K) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7999) pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8000) } else if (src_reg->type == SCALAR_VALUE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8001) is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8002) pred = is_branch_taken(dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8003) tnum_subreg(src_reg->var_off).value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8004) opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8005) is_jmp32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8006) } else if (src_reg->type == SCALAR_VALUE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8007) !is_jmp32 && tnum_is_const(src_reg->var_off)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8008) pred = is_branch_taken(dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8009) src_reg->var_off.value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8010) opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8011) is_jmp32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8014) if (pred >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8015) /* If we get here with a dst_reg pointer type it is because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8016) * above is_branch_taken() special cased the 0 comparison.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8017) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8018) if (!__is_pointer_value(false, dst_reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8019) err = mark_chain_precision(env, insn->dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8020) if (BPF_SRC(insn->code) == BPF_X && !err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8021) err = mark_chain_precision(env, insn->src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8022) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8023) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8026) if (pred == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8027) /* Only follow the goto, ignore fall-through. If needed, push
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8028) * the fall-through branch for simulation under speculative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8029) * execution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8030) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8031) if (!env->bypass_spec_v1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8032) !sanitize_speculative_path(env, insn, *insn_idx + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8033) *insn_idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8034) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8035) *insn_idx += insn->off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8036) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8037) } else if (pred == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8038) /* Only follow the fall-through branch, since that's where the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8039) * program will go. If needed, push the goto branch for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8040) * simulation under speculative execution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8041) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8042) if (!env->bypass_spec_v1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8043) !sanitize_speculative_path(env, insn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8044) *insn_idx + insn->off + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8045) *insn_idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8046) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8047) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8050) other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8051) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8052) if (!other_branch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8053) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8054) other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8056) /* detect if we are comparing against a constant value so we can adjust
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8057) * our min/max values for our dst register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8058) * this is only legit if both are scalars (or pointers to the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8059) * object, I suppose, but we don't support that right now), because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8060) * otherwise the different base pointers mean the offsets aren't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8061) * comparable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8062) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8063) if (BPF_SRC(insn->code) == BPF_X) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8064) struct bpf_reg_state *src_reg = ®s[insn->src_reg];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8066) if (dst_reg->type == SCALAR_VALUE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8067) src_reg->type == SCALAR_VALUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8068) if (tnum_is_const(src_reg->var_off) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8069) (is_jmp32 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8070) tnum_is_const(tnum_subreg(src_reg->var_off))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8071) reg_set_min_max(&other_branch_regs[insn->dst_reg],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8072) dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8073) src_reg->var_off.value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8074) tnum_subreg(src_reg->var_off).value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8075) opcode, is_jmp32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8076) else if (tnum_is_const(dst_reg->var_off) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8077) (is_jmp32 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8078) tnum_is_const(tnum_subreg(dst_reg->var_off))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8079) reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8080) src_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8081) dst_reg->var_off.value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8082) tnum_subreg(dst_reg->var_off).value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8083) opcode, is_jmp32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8084) else if (!is_jmp32 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8085) (opcode == BPF_JEQ || opcode == BPF_JNE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8086) /* Comparing for equality, we can combine knowledge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8087) reg_combine_min_max(&other_branch_regs[insn->src_reg],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8088) &other_branch_regs[insn->dst_reg],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8089) src_reg, dst_reg, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8090) if (src_reg->id &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8091) !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8092) find_equal_scalars(this_branch, src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8093) find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8097) } else if (dst_reg->type == SCALAR_VALUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8098) reg_set_min_max(&other_branch_regs[insn->dst_reg],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8099) dst_reg, insn->imm, (u32)insn->imm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8100) opcode, is_jmp32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8103) if (dst_reg->type == SCALAR_VALUE && dst_reg->id &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8104) !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8105) find_equal_scalars(this_branch, dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8106) find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8109) /* detect if R == 0 where R is returned from bpf_map_lookup_elem().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8110) * NOTE: these optimizations below are related with pointer comparison
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8111) * which will never be JMP32.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8113) if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8114) insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8115) reg_type_may_be_null(dst_reg->type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8116) /* Mark all identical registers in each branch as either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8117) * safe or unknown depending R == 0 or R != 0 conditional.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8119) mark_ptr_or_null_regs(this_branch, insn->dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8120) opcode == BPF_JNE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8121) mark_ptr_or_null_regs(other_branch, insn->dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8122) opcode == BPF_JEQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8123) } else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8124) this_branch, other_branch) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8125) is_pointer_value(env, insn->dst_reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8126) verbose(env, "R%d pointer comparison prohibited\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8127) insn->dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8128) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8130) if (env->log.level & BPF_LOG_LEVEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8131) print_verifier_state(env, this_branch->frame[this_branch->curframe]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8132) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8135) /* verify BPF_LD_IMM64 instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8136) static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8138) struct bpf_insn_aux_data *aux = cur_aux(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8139) struct bpf_reg_state *regs = cur_regs(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8140) struct bpf_reg_state *dst_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8141) struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8142) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8144) if (BPF_SIZE(insn->code) != BPF_DW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8145) verbose(env, "invalid BPF_LD_IMM insn\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8146) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8148) if (insn->off != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8149) verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8150) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8153) err = check_reg_arg(env, insn->dst_reg, DST_OP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8154) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8155) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8157) dst_reg = ®s[insn->dst_reg];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8158) if (insn->src_reg == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8159) u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8161) dst_reg->type = SCALAR_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8162) __mark_reg_known(®s[insn->dst_reg], imm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8163) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8166) if (insn->src_reg == BPF_PSEUDO_BTF_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8167) mark_reg_known_zero(env, regs, insn->dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8169) dst_reg->type = aux->btf_var.reg_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8170) switch (dst_reg->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8171) case PTR_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8172) dst_reg->mem_size = aux->btf_var.mem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8173) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8174) case PTR_TO_BTF_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8175) case PTR_TO_PERCPU_BTF_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8176) dst_reg->btf_id = aux->btf_var.btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8177) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8178) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8179) verbose(env, "bpf verifier is misconfigured\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8180) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8182) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8185) map = env->used_maps[aux->map_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8186) mark_reg_known_zero(env, regs, insn->dst_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8187) dst_reg->map_ptr = map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8189) if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8190) dst_reg->type = PTR_TO_MAP_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8191) dst_reg->off = aux->map_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8192) if (map_value_has_spin_lock(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8193) dst_reg->id = ++env->id_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8194) } else if (insn->src_reg == BPF_PSEUDO_MAP_FD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8195) dst_reg->type = CONST_PTR_TO_MAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8196) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8197) verbose(env, "bpf verifier is misconfigured\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8198) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8201) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8204) static bool may_access_skb(enum bpf_prog_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8206) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8207) case BPF_PROG_TYPE_SOCKET_FILTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8208) case BPF_PROG_TYPE_SCHED_CLS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8209) case BPF_PROG_TYPE_SCHED_ACT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8210) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8211) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8212) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8216) /* verify safety of LD_ABS|LD_IND instructions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8217) * - they can only appear in the programs where ctx == skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8218) * - since they are wrappers of function calls, they scratch R1-R5 registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8219) * preserve R6-R9, and store return value into R0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8220) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8221) * Implicit input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8222) * ctx == skb == R6 == CTX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8223) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8224) * Explicit input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8225) * SRC == any register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8226) * IMM == 32-bit immediate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8227) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8228) * Output:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8229) * R0 - 8/16/32-bit skb data converted to cpu endianness
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8231) static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8233) struct bpf_reg_state *regs = cur_regs(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8234) static const int ctx_reg = BPF_REG_6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8235) u8 mode = BPF_MODE(insn->code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8236) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8238) if (!may_access_skb(resolve_prog_type(env->prog))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8239) verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8240) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8243) if (!env->ops->gen_ld_abs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8244) verbose(env, "bpf verifier is misconfigured\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8245) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8248) if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8249) BPF_SIZE(insn->code) == BPF_DW ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8250) (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8251) verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8252) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8255) /* check whether implicit source operand (register R6) is readable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8256) err = check_reg_arg(env, ctx_reg, SRC_OP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8257) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8258) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8260) /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8261) * gen_ld_abs() may terminate the program at runtime, leading to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8262) * reference leak.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8264) err = check_reference_leak(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8265) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8266) verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8267) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8270) if (env->cur_state->active_spin_lock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8271) verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8272) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8275) if (regs[ctx_reg].type != PTR_TO_CTX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8276) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8277) "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8278) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8281) if (mode == BPF_IND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8282) /* check explicit source operand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8283) err = check_reg_arg(env, insn->src_reg, SRC_OP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8284) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8285) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8288) err = check_ctx_reg(env, ®s[ctx_reg], ctx_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8289) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8290) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8292) /* reset caller saved regs to unreadable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8293) for (i = 0; i < CALLER_SAVED_REGS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8294) mark_reg_not_init(env, regs, caller_saved[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8295) check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8298) /* mark destination R0 register as readable, since it contains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8299) * the value fetched from the packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8300) * Already marked as written above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8301) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8302) mark_reg_unknown(env, regs, BPF_REG_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8303) /* ld_abs load up to 32-bit skb data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8304) regs[BPF_REG_0].subreg_def = env->insn_idx + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8305) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8308) static int check_return_code(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8310) struct tnum enforce_attach_type_range = tnum_unknown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8311) const struct bpf_prog *prog = env->prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8312) struct bpf_reg_state *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8313) struct tnum range = tnum_range(0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8314) enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8315) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8316) const bool is_subprog = env->cur_state->frame[0]->subprogno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8318) /* LSM and struct_ops func-ptr's return type could be "void" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8319) if (!is_subprog &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8320) (prog_type == BPF_PROG_TYPE_STRUCT_OPS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8321) prog_type == BPF_PROG_TYPE_LSM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8322) !prog->aux->attach_func_proto->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8323) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8325) /* eBPF calling convetion is such that R0 is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8326) * to return the value from eBPF program.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8327) * Make sure that it's readable at this time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8328) * of bpf_exit, which means that program wrote
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8329) * something into it earlier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8330) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8331) err = check_reg_arg(env, BPF_REG_0, SRC_OP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8332) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8333) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8335) if (is_pointer_value(env, BPF_REG_0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8336) verbose(env, "R0 leaks addr as return value\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8337) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8340) reg = cur_regs(env) + BPF_REG_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8341) if (is_subprog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8342) if (reg->type != SCALAR_VALUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8343) verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8344) reg_type_str[reg->type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8345) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8347) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8350) switch (prog_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8351) case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8352) if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8353) env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8354) env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8355) env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8356) env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8357) env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8358) range = tnum_range(1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8359) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8360) case BPF_PROG_TYPE_CGROUP_SKB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8361) if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8362) range = tnum_range(0, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8363) enforce_attach_type_range = tnum_range(2, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8365) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8366) case BPF_PROG_TYPE_CGROUP_SOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8367) case BPF_PROG_TYPE_SOCK_OPS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8368) case BPF_PROG_TYPE_CGROUP_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8369) case BPF_PROG_TYPE_CGROUP_SYSCTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8370) case BPF_PROG_TYPE_CGROUP_SOCKOPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8371) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8372) case BPF_PROG_TYPE_RAW_TRACEPOINT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8373) if (!env->prog->aux->attach_btf_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8374) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8375) range = tnum_const(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8376) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8377) case BPF_PROG_TYPE_TRACING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8378) switch (env->prog->expected_attach_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8379) case BPF_TRACE_FENTRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8380) case BPF_TRACE_FEXIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8381) range = tnum_const(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8382) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8383) case BPF_TRACE_RAW_TP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8384) case BPF_MODIFY_RETURN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8385) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8386) case BPF_TRACE_ITER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8387) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8388) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8389) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8391) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8392) case BPF_PROG_TYPE_SK_LOOKUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8393) range = tnum_range(SK_DROP, SK_PASS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8394) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8395) case BPF_PROG_TYPE_EXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8396) /* freplace program can return anything as its return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8397) * depends on the to-be-replaced kernel func or bpf program.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8399) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8400) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8403) if (reg->type != SCALAR_VALUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8404) verbose(env, "At program exit the register R0 is not a known value (%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8405) reg_type_str[reg->type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8406) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8409) if (!tnum_in(range, reg->var_off)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8410) char tn_buf[48];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8412) verbose(env, "At program exit the register R0 ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8413) if (!tnum_is_unknown(reg->var_off)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8414) tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8415) verbose(env, "has value %s", tn_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8416) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8417) verbose(env, "has unknown scalar value");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8419) tnum_strn(tn_buf, sizeof(tn_buf), range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8420) verbose(env, " should have been in %s\n", tn_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8421) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8424) if (!tnum_is_unknown(enforce_attach_type_range) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8425) tnum_in(enforce_attach_type_range, reg->var_off))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8426) env->prog->enforce_expected_attach_type = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8427) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8430) /* non-recursive DFS pseudo code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8431) * 1 procedure DFS-iterative(G,v):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8432) * 2 label v as discovered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8433) * 3 let S be a stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8434) * 4 S.push(v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8435) * 5 while S is not empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8436) * 6 t <- S.pop()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8437) * 7 if t is what we're looking for:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8438) * 8 return t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8439) * 9 for all edges e in G.adjacentEdges(t) do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8440) * 10 if edge e is already labelled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8441) * 11 continue with the next edge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8442) * 12 w <- G.adjacentVertex(t,e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8443) * 13 if vertex w is not discovered and not explored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8444) * 14 label e as tree-edge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8445) * 15 label w as discovered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8446) * 16 S.push(w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8447) * 17 continue at 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8448) * 18 else if vertex w is discovered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8449) * 19 label e as back-edge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8450) * 20 else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8451) * 21 // vertex w is explored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8452) * 22 label e as forward- or cross-edge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8453) * 23 label t as explored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8454) * 24 S.pop()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8455) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8456) * convention:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8457) * 0x10 - discovered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8458) * 0x11 - discovered and fall-through edge labelled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8459) * 0x12 - discovered and fall-through and branch edges labelled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8460) * 0x20 - explored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8461) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8463) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8464) DISCOVERED = 0x10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8465) EXPLORED = 0x20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8466) FALLTHROUGH = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8467) BRANCH = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8468) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8470) static u32 state_htab_size(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8472) return env->prog->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8475) static struct bpf_verifier_state_list **explored_state(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8476) struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8477) int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8479) struct bpf_verifier_state *cur = env->cur_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8480) struct bpf_func_state *state = cur->frame[cur->curframe];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8482) return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8485) static void init_explored_state(struct bpf_verifier_env *env, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8487) env->insn_aux_data[idx].prune_point = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8490) /* t, w, e - match pseudo-code above:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8491) * t - index of current instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8492) * w - next instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8493) * e - edge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8494) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8495) static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8496) bool loop_ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8498) int *insn_stack = env->cfg.insn_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8499) int *insn_state = env->cfg.insn_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8501) if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8502) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8504) if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8505) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8507) if (w < 0 || w >= env->prog->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8508) verbose_linfo(env, t, "%d: ", t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8509) verbose(env, "jump out of range from insn %d to %d\n", t, w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8510) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8513) if (e == BRANCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8514) /* mark branch target for state pruning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8515) init_explored_state(env, w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8517) if (insn_state[w] == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8518) /* tree-edge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8519) insn_state[t] = DISCOVERED | e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8520) insn_state[w] = DISCOVERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8521) if (env->cfg.cur_stack >= env->prog->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8522) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8523) insn_stack[env->cfg.cur_stack++] = w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8524) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8525) } else if ((insn_state[w] & 0xF0) == DISCOVERED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8526) if (loop_ok && env->bpf_capable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8527) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8528) verbose_linfo(env, t, "%d: ", t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8529) verbose_linfo(env, w, "%d: ", w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8530) verbose(env, "back-edge from insn %d to %d\n", t, w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8531) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8532) } else if (insn_state[w] == EXPLORED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8533) /* forward- or cross-edge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8534) insn_state[t] = DISCOVERED | e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8535) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8536) verbose(env, "insn state internal bug\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8537) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8539) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8542) /* non-recursive depth-first-search to detect loops in BPF program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8543) * loop == back-edge in directed graph
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8544) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8545) static int check_cfg(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8547) struct bpf_insn *insns = env->prog->insnsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8548) int insn_cnt = env->prog->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8549) int *insn_stack, *insn_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8550) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8551) int i, t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8553) insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8554) if (!insn_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8555) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8557) insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8558) if (!insn_stack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8559) kvfree(insn_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8560) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8563) insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8564) insn_stack[0] = 0; /* 0 is the first instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8565) env->cfg.cur_stack = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8567) peek_stack:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8568) if (env->cfg.cur_stack == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8569) goto check_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8570) t = insn_stack[env->cfg.cur_stack - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8572) if (BPF_CLASS(insns[t].code) == BPF_JMP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8573) BPF_CLASS(insns[t].code) == BPF_JMP32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8574) u8 opcode = BPF_OP(insns[t].code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8576) if (opcode == BPF_EXIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8577) goto mark_explored;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8578) } else if (opcode == BPF_CALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8579) ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8580) if (ret == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8581) goto peek_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8582) else if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8583) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8584) if (t + 1 < insn_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8585) init_explored_state(env, t + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8586) if (insns[t].src_reg == BPF_PSEUDO_CALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8587) init_explored_state(env, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8588) ret = push_insn(t, t + insns[t].imm + 1, BRANCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8589) env, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8590) if (ret == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8591) goto peek_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8592) else if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8593) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8595) } else if (opcode == BPF_JA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8596) if (BPF_SRC(insns[t].code) != BPF_K) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8597) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8598) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8600) /* unconditional jump with single edge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8601) ret = push_insn(t, t + insns[t].off + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8602) FALLTHROUGH, env, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8603) if (ret == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8604) goto peek_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8605) else if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8606) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8607) /* unconditional jmp is not a good pruning point,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8608) * but it's marked, since backtracking needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8609) * to record jmp history in is_state_visited().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8610) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8611) init_explored_state(env, t + insns[t].off + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8612) /* tell verifier to check for equivalent states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8613) * after every call and jump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8614) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8615) if (t + 1 < insn_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8616) init_explored_state(env, t + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8617) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8618) /* conditional jump with two edges */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8619) init_explored_state(env, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8620) ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8621) if (ret == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8622) goto peek_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8623) else if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8624) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8626) ret = push_insn(t, t + insns[t].off + 1, BRANCH, env, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8627) if (ret == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8628) goto peek_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8629) else if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8630) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8632) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8633) /* all other non-branch instructions with single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8634) * fall-through edge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8635) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8636) ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8637) if (ret == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8638) goto peek_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8639) else if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8640) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8643) mark_explored:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8644) insn_state[t] = EXPLORED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8645) if (env->cfg.cur_stack-- <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8646) verbose(env, "pop stack internal bug\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8647) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8648) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8650) goto peek_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8652) check_state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8653) for (i = 0; i < insn_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8654) if (insn_state[i] != EXPLORED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8655) verbose(env, "unreachable insn %d\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8656) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8657) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8660) ret = 0; /* cfg looks good */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8662) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8663) kvfree(insn_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8664) kvfree(insn_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8665) env->cfg.insn_state = env->cfg.insn_stack = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8666) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8669) static int check_abnormal_return(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8671) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8673) for (i = 1; i < env->subprog_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8674) if (env->subprog_info[i].has_ld_abs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8675) verbose(env, "LD_ABS is not allowed in subprogs without BTF\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8676) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8678) if (env->subprog_info[i].has_tail_call) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8679) verbose(env, "tail_call is not allowed in subprogs without BTF\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8680) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8683) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8686) /* The minimum supported BTF func info size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8687) #define MIN_BPF_FUNCINFO_SIZE 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8688) #define MAX_FUNCINFO_REC_SIZE 252
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8690) static int check_btf_func(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8691) const union bpf_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8692) union bpf_attr __user *uattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8694) const struct btf_type *type, *func_proto, *ret_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8695) u32 i, nfuncs, urec_size, min_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8696) u32 krec_size = sizeof(struct bpf_func_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8697) struct bpf_func_info *krecord;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8698) struct bpf_func_info_aux *info_aux = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8699) struct bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8700) const struct btf *btf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8701) void __user *urecord;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8702) u32 prev_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8703) bool scalar_return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8704) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8706) nfuncs = attr->func_info_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8707) if (!nfuncs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8708) if (check_abnormal_return(env))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8709) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8710) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8713) if (nfuncs != env->subprog_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8714) verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8715) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8718) urec_size = attr->func_info_rec_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8719) if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8720) urec_size > MAX_FUNCINFO_REC_SIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8721) urec_size % sizeof(u32)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8722) verbose(env, "invalid func info rec size %u\n", urec_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8723) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8726) prog = env->prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8727) btf = prog->aux->btf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8729) urecord = u64_to_user_ptr(attr->func_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8730) min_size = min_t(u32, krec_size, urec_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8732) krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8733) if (!krecord)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8734) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8735) info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8736) if (!info_aux)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8737) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8739) for (i = 0; i < nfuncs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8740) ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8741) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8742) if (ret == -E2BIG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8743) verbose(env, "nonzero tailing record in func info");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8744) /* set the size kernel expects so loader can zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8745) * out the rest of the record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8746) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8747) if (put_user(min_size, &uattr->func_info_rec_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8748) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8750) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8753) if (copy_from_user(&krecord[i], urecord, min_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8754) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8755) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8758) /* check insn_off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8759) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8760) if (i == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8761) if (krecord[i].insn_off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8762) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8763) "nonzero insn_off %u for the first func info record",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8764) krecord[i].insn_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8765) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8767) } else if (krecord[i].insn_off <= prev_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8768) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8769) "same or smaller insn offset (%u) than previous func info record (%u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8770) krecord[i].insn_off, prev_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8771) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8774) if (env->subprog_info[i].start != krecord[i].insn_off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8775) verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8776) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8779) /* check type_id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8780) type = btf_type_by_id(btf, krecord[i].type_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8781) if (!type || !btf_type_is_func(type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8782) verbose(env, "invalid type id %d in func info",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8783) krecord[i].type_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8784) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8786) info_aux[i].linkage = BTF_INFO_VLEN(type->info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8788) func_proto = btf_type_by_id(btf, type->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8789) if (unlikely(!func_proto || !btf_type_is_func_proto(func_proto)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8790) /* btf_func_check() already verified it during BTF load */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8791) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8792) ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8793) scalar_return =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8794) btf_type_is_small_int(ret_type) || btf_type_is_enum(ret_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8795) if (i && !scalar_return && env->subprog_info[i].has_ld_abs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8796) verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8797) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8799) if (i && !scalar_return && env->subprog_info[i].has_tail_call) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8800) verbose(env, "tail_call is only allowed in functions that return 'int'.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8801) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8804) prev_offset = krecord[i].insn_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8805) urecord += urec_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8808) prog->aux->func_info = krecord;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8809) prog->aux->func_info_cnt = nfuncs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8810) prog->aux->func_info_aux = info_aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8811) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8813) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8814) kvfree(krecord);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8815) kfree(info_aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8816) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8819) static void adjust_btf_func(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8821) struct bpf_prog_aux *aux = env->prog->aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8822) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8824) if (!aux->func_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8825) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8827) for (i = 0; i < env->subprog_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8828) aux->func_info[i].insn_off = env->subprog_info[i].start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8831) #define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8832) sizeof(((struct bpf_line_info *)(0))->line_col))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8833) #define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8835) static int check_btf_line(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8836) const union bpf_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8837) union bpf_attr __user *uattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8839) u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8840) struct bpf_subprog_info *sub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8841) struct bpf_line_info *linfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8842) struct bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8843) const struct btf *btf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8844) void __user *ulinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8845) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8847) nr_linfo = attr->line_info_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8848) if (!nr_linfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8849) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8850) if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8851) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8853) rec_size = attr->line_info_rec_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8854) if (rec_size < MIN_BPF_LINEINFO_SIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8855) rec_size > MAX_LINEINFO_REC_SIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8856) rec_size & (sizeof(u32) - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8857) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8859) /* Need to zero it in case the userspace may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8860) * pass in a smaller bpf_line_info object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8861) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8862) linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8863) GFP_KERNEL | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8864) if (!linfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8865) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8867) prog = env->prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8868) btf = prog->aux->btf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8870) s = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8871) sub = env->subprog_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8872) ulinfo = u64_to_user_ptr(attr->line_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8873) expected_size = sizeof(struct bpf_line_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8874) ncopy = min_t(u32, expected_size, rec_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8875) for (i = 0; i < nr_linfo; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8876) err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8877) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8878) if (err == -E2BIG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8879) verbose(env, "nonzero tailing record in line_info");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8880) if (put_user(expected_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8881) &uattr->line_info_rec_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8882) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8884) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8887) if (copy_from_user(&linfo[i], ulinfo, ncopy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8888) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8889) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8892) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8893) * Check insn_off to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8894) * 1) strictly increasing AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8895) * 2) bounded by prog->len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8896) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8897) * The linfo[0].insn_off == 0 check logically falls into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8898) * the later "missing bpf_line_info for func..." case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8899) * because the first linfo[0].insn_off must be the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8900) * first sub also and the first sub must have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8901) * subprog_info[0].start == 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8902) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8903) if ((i && linfo[i].insn_off <= prev_offset) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8904) linfo[i].insn_off >= prog->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8905) verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8906) i, linfo[i].insn_off, prev_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8907) prog->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8908) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8909) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8912) if (!prog->insnsi[linfo[i].insn_off].code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8913) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8914) "Invalid insn code at line_info[%u].insn_off\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8915) i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8916) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8917) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8920) if (!btf_name_by_offset(btf, linfo[i].line_off) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8921) !btf_name_by_offset(btf, linfo[i].file_name_off)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8922) verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8923) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8924) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8927) if (s != env->subprog_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8928) if (linfo[i].insn_off == sub[s].start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8929) sub[s].linfo_idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8930) s++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8931) } else if (sub[s].start < linfo[i].insn_off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8932) verbose(env, "missing bpf_line_info for func#%u\n", s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8933) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8934) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8938) prev_offset = linfo[i].insn_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8939) ulinfo += rec_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8942) if (s != env->subprog_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8943) verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8944) env->subprog_cnt - s, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8945) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8946) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8949) prog->aux->linfo = linfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8950) prog->aux->nr_linfo = nr_linfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8952) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8954) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8955) kvfree(linfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8956) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8959) static int check_btf_info(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8960) const union bpf_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8961) union bpf_attr __user *uattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8963) struct btf *btf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8964) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8966) if (!attr->func_info_cnt && !attr->line_info_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8967) if (check_abnormal_return(env))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8968) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8969) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8972) btf = btf_get_by_fd(attr->prog_btf_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8973) if (IS_ERR(btf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8974) return PTR_ERR(btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8975) env->prog->aux->btf = btf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8977) err = check_btf_func(env, attr, uattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8978) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8979) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8981) err = check_btf_line(env, attr, uattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8982) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8983) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8985) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8988) /* check %cur's range satisfies %old's */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8989) static bool range_within(struct bpf_reg_state *old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8990) struct bpf_reg_state *cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8992) return old->umin_value <= cur->umin_value &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8993) old->umax_value >= cur->umax_value &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8994) old->smin_value <= cur->smin_value &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8995) old->smax_value >= cur->smax_value &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8996) old->u32_min_value <= cur->u32_min_value &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8997) old->u32_max_value >= cur->u32_max_value &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8998) old->s32_min_value <= cur->s32_min_value &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8999) old->s32_max_value >= cur->s32_max_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9002) /* If in the old state two registers had the same id, then they need to have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9003) * the same id in the new state as well. But that id could be different from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9004) * the old state, so we need to track the mapping from old to new ids.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9005) * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9006) * regs with old id 5 must also have new id 9 for the new state to be safe. But
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9007) * regs with a different old id could still have new id 9, we don't care about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9008) * that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9009) * So we look through our idmap to see if this old id has been seen before. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9010) * so, we require the new id to match; otherwise, we add the id pair to the map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9011) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9012) static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9014) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9016) for (i = 0; i < BPF_ID_MAP_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9017) if (!idmap[i].old) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9018) /* Reached an empty slot; haven't seen this id before */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9019) idmap[i].old = old_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9020) idmap[i].cur = cur_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9021) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9023) if (idmap[i].old == old_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9024) return idmap[i].cur == cur_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9026) /* We ran out of idmap slots, which should be impossible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9027) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9028) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9031) static void clean_func_state(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9032) struct bpf_func_state *st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9034) enum bpf_reg_liveness live;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9035) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9037) for (i = 0; i < BPF_REG_FP; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9038) live = st->regs[i].live;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9039) /* liveness must not touch this register anymore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9040) st->regs[i].live |= REG_LIVE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9041) if (!(live & REG_LIVE_READ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9042) /* since the register is unused, clear its state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9043) * to make further comparison simpler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9044) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9045) __mark_reg_not_init(env, &st->regs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9048) for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9049) live = st->stack[i].spilled_ptr.live;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9050) /* liveness must not touch this stack slot anymore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9051) st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9052) if (!(live & REG_LIVE_READ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9053) __mark_reg_not_init(env, &st->stack[i].spilled_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9054) for (j = 0; j < BPF_REG_SIZE; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9055) st->stack[i].slot_type[j] = STACK_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9060) static void clean_verifier_state(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9061) struct bpf_verifier_state *st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9063) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9065) if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9066) /* all regs in this state in all frames were already marked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9067) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9069) for (i = 0; i <= st->curframe; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9070) clean_func_state(env, st->frame[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9073) /* the parentage chains form a tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9074) * the verifier states are added to state lists at given insn and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9075) * pushed into state stack for future exploration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9076) * when the verifier reaches bpf_exit insn some of the verifer states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9077) * stored in the state lists have their final liveness state already,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9078) * but a lot of states will get revised from liveness point of view when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9079) * the verifier explores other branches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9080) * Example:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9081) * 1: r0 = 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9082) * 2: if r1 == 100 goto pc+1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9083) * 3: r0 = 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9084) * 4: exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9085) * when the verifier reaches exit insn the register r0 in the state list of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9086) * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9087) * of insn 2 and goes exploring further. At the insn 4 it will walk the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9088) * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9089) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9090) * Since the verifier pushes the branch states as it sees them while exploring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9091) * the program the condition of walking the branch instruction for the second
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9092) * time means that all states below this branch were already explored and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9093) * their final liveness markes are already propagated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9094) * Hence when the verifier completes the search of state list in is_state_visited()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9095) * we can call this clean_live_states() function to mark all liveness states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9096) * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9097) * will not be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9098) * This function also clears the registers and stack for states that !READ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9099) * to simplify state merging.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9100) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9101) * Important note here that walking the same branch instruction in the callee
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9102) * doesn't meant that the states are DONE. The verifier has to compare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9103) * the callsites
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9105) static void clean_live_states(struct bpf_verifier_env *env, int insn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9106) struct bpf_verifier_state *cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9108) struct bpf_verifier_state_list *sl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9109) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9111) sl = *explored_state(env, insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9112) while (sl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9113) if (sl->state.branches)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9114) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9115) if (sl->state.insn_idx != insn ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9116) sl->state.curframe != cur->curframe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9117) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9118) for (i = 0; i <= cur->curframe; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9119) if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9120) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9121) clean_verifier_state(env, &sl->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9122) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9123) sl = sl->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9127) /* Returns true if (rold safe implies rcur safe) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9128) static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9129) struct bpf_reg_state *rcur, struct bpf_id_pair *idmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9131) bool equal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9133) if (!(rold->live & REG_LIVE_READ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9134) /* explored state didn't use this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9135) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9137) equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9139) if (rold->type == PTR_TO_STACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9140) /* two stack pointers are equal only if they're pointing to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9141) * the same stack frame, since fp-8 in foo != fp-8 in bar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9143) return equal && rold->frameno == rcur->frameno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9145) if (equal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9146) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9148) if (rold->type == NOT_INIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9149) /* explored state can't have used this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9150) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9151) if (rcur->type == NOT_INIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9152) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9153) switch (rold->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9154) case SCALAR_VALUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9155) if (env->explore_alu_limits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9156) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9157) if (rcur->type == SCALAR_VALUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9158) if (!rold->precise && !rcur->precise)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9159) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9160) /* new val must satisfy old val knowledge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9161) return range_within(rold, rcur) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9162) tnum_in(rold->var_off, rcur->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9163) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9164) /* We're trying to use a pointer in place of a scalar.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9165) * Even if the scalar was unbounded, this could lead to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9166) * pointer leaks because scalars are allowed to leak
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9167) * while pointers are not. We could make this safe in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9168) * special cases if root is calling us, but it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9169) * probably not worth the hassle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9171) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9173) case PTR_TO_MAP_VALUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9174) /* If the new min/max/var_off satisfy the old ones and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9175) * everything else matches, we are OK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9176) * 'id' is not compared, since it's only used for maps with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9177) * bpf_spin_lock inside map element and in such cases if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9178) * the rest of the prog is valid for one map element then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9179) * it's valid for all map elements regardless of the key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9180) * used in bpf_map_lookup()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9182) return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9183) range_within(rold, rcur) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9184) tnum_in(rold->var_off, rcur->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9185) case PTR_TO_MAP_VALUE_OR_NULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9186) /* a PTR_TO_MAP_VALUE could be safe to use as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9187) * PTR_TO_MAP_VALUE_OR_NULL into the same map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9188) * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9189) * checked, doing so could have affected others with the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9190) * id, and we can't check for that because we lost the id when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9191) * we converted to a PTR_TO_MAP_VALUE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9193) if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9194) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9195) if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9196) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9197) /* Check our ids match any regs they're supposed to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9198) return check_ids(rold->id, rcur->id, idmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9199) case PTR_TO_PACKET_META:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9200) case PTR_TO_PACKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9201) if (rcur->type != rold->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9202) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9203) /* We must have at least as much range as the old ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9204) * did, so that any accesses which were safe before are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9205) * still safe. This is true even if old range < old off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9206) * since someone could have accessed through (ptr - k), or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9207) * even done ptr -= k in a register, to get a safe access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9209) if (rold->range > rcur->range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9210) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9211) /* If the offsets don't match, we can't trust our alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9212) * nor can we be sure that we won't fall out of range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9214) if (rold->off != rcur->off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9215) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9216) /* id relations must be preserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9217) if (rold->id && !check_ids(rold->id, rcur->id, idmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9218) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9219) /* new val must satisfy old val knowledge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9220) return range_within(rold, rcur) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9221) tnum_in(rold->var_off, rcur->var_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9222) case PTR_TO_CTX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9223) case CONST_PTR_TO_MAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9224) case PTR_TO_PACKET_END:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9225) case PTR_TO_FLOW_KEYS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9226) case PTR_TO_SOCKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9227) case PTR_TO_SOCKET_OR_NULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9228) case PTR_TO_SOCK_COMMON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9229) case PTR_TO_SOCK_COMMON_OR_NULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9230) case PTR_TO_TCP_SOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9231) case PTR_TO_TCP_SOCK_OR_NULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9232) case PTR_TO_XDP_SOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9233) /* Only valid matches are exact, which memcmp() above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9234) * would have accepted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9235) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9236) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9237) /* Don't know what's going on, just say it's not safe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9238) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9241) /* Shouldn't get here; if we do, say it's not safe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9242) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9243) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9246) static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9247) struct bpf_func_state *cur, struct bpf_id_pair *idmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9249) int i, spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9251) /* walk slots of the explored stack and ignore any additional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9252) * slots in the current stack, since explored(safe) state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9253) * didn't use them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9254) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9255) for (i = 0; i < old->allocated_stack; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9256) spi = i / BPF_REG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9258) if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9259) i += BPF_REG_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9260) /* explored state didn't use this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9261) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9264) if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9265) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9267) /* explored stack has more populated slots than current stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9268) * and these slots were used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9270) if (i >= cur->allocated_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9271) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9273) /* if old state was safe with misc data in the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9274) * it will be safe with zero-initialized stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9275) * The opposite is not true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9276) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9277) if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9278) cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9279) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9280) if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9281) cur->stack[spi].slot_type[i % BPF_REG_SIZE])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9282) /* Ex: old explored (safe) state has STACK_SPILL in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9283) * this stack slot, but current has STACK_MISC ->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9284) * this verifier states are not equivalent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9285) * return false to continue verification of this path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9287) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9288) if (i % BPF_REG_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9289) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9290) if (old->stack[spi].slot_type[0] != STACK_SPILL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9291) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9292) if (!regsafe(env, &old->stack[spi].spilled_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9293) &cur->stack[spi].spilled_ptr, idmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9294) /* when explored and current stack slot are both storing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9295) * spilled registers, check that stored pointers types
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9296) * are the same as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9297) * Ex: explored safe path could have stored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9298) * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9299) * but current path has stored:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9300) * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9301) * such verifier states are not equivalent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9302) * return false to continue verification of this path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9303) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9304) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9306) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9309) static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9311) if (old->acquired_refs != cur->acquired_refs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9312) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9313) return !memcmp(old->refs, cur->refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9314) sizeof(*old->refs) * old->acquired_refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9317) /* compare two verifier states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9318) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9319) * all states stored in state_list are known to be valid, since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9320) * verifier reached 'bpf_exit' instruction through them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9321) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9322) * this function is called when verifier exploring different branches of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9323) * execution popped from the state stack. If it sees an old state that has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9324) * more strict register state and more strict stack state then this execution
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9325) * branch doesn't need to be explored further, since verifier already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9326) * concluded that more strict state leads to valid finish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9327) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9328) * Therefore two states are equivalent if register state is more conservative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9329) * and explored stack state is more conservative than the current one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9330) * Example:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9331) * explored current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9332) * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9333) * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9334) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9335) * In other words if current stack state (one being explored) has more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9336) * valid slots than old one that already passed validation, it means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9337) * the verifier can stop exploring and conclude that current state is valid too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9338) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9339) * Similarly with registers. If explored state has register type as invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9340) * whereas register type in current state is meaningful, it means that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9341) * the current state will reach 'bpf_exit' instruction safely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9342) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9343) static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9344) struct bpf_func_state *cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9346) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9348) memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9349) for (i = 0; i < MAX_BPF_REG; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9350) if (!regsafe(env, &old->regs[i], &cur->regs[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9351) env->idmap_scratch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9352) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9354) if (!stacksafe(env, old, cur, env->idmap_scratch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9355) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9357) if (!refsafe(old, cur))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9358) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9360) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9363) static bool states_equal(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9364) struct bpf_verifier_state *old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9365) struct bpf_verifier_state *cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9367) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9369) if (old->curframe != cur->curframe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9370) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9372) /* Verification state from speculative execution simulation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9373) * must never prune a non-speculative execution one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9374) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9375) if (old->speculative && !cur->speculative)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9376) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9378) if (old->active_spin_lock != cur->active_spin_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9379) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9381) /* for states to be equal callsites have to be the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9382) * and all frame states need to be equivalent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9384) for (i = 0; i <= old->curframe; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9385) if (old->frame[i]->callsite != cur->frame[i]->callsite)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9386) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9387) if (!func_states_equal(env, old->frame[i], cur->frame[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9388) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9390) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9393) /* Return 0 if no propagation happened. Return negative error code if error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9394) * happened. Otherwise, return the propagated bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9395) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9396) static int propagate_liveness_reg(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9397) struct bpf_reg_state *reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9398) struct bpf_reg_state *parent_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9400) u8 parent_flag = parent_reg->live & REG_LIVE_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9401) u8 flag = reg->live & REG_LIVE_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9402) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9404) /* When comes here, read flags of PARENT_REG or REG could be any of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9405) * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9406) * of propagation if PARENT_REG has strongest REG_LIVE_READ64.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9407) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9408) if (parent_flag == REG_LIVE_READ64 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9409) /* Or if there is no read flag from REG. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9410) !flag ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9411) /* Or if the read flag from REG is the same as PARENT_REG. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9412) parent_flag == flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9413) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9415) err = mark_reg_read(env, reg, parent_reg, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9416) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9417) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9419) return flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9422) /* A write screens off any subsequent reads; but write marks come from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9423) * straight-line code between a state and its parent. When we arrive at an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9424) * equivalent state (jump target or such) we didn't arrive by the straight-line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9425) * code, so read marks in the state must propagate to the parent regardless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9426) * of the state's write marks. That's what 'parent == state->parent' comparison
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9427) * in mark_reg_read() is for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9428) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9429) static int propagate_liveness(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9430) const struct bpf_verifier_state *vstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9431) struct bpf_verifier_state *vparent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9433) struct bpf_reg_state *state_reg, *parent_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9434) struct bpf_func_state *state, *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9435) int i, frame, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9437) if (vparent->curframe != vstate->curframe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9438) WARN(1, "propagate_live: parent frame %d current frame %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9439) vparent->curframe, vstate->curframe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9440) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9442) /* Propagate read liveness of registers... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9443) BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9444) for (frame = 0; frame <= vstate->curframe; frame++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9445) parent = vparent->frame[frame];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9446) state = vstate->frame[frame];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9447) parent_reg = parent->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9448) state_reg = state->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9449) /* We don't need to worry about FP liveness, it's read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9450) for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9451) err = propagate_liveness_reg(env, &state_reg[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9452) &parent_reg[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9453) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9454) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9455) if (err == REG_LIVE_READ64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9456) mark_insn_zext(env, &parent_reg[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9459) /* Propagate stack slots. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9460) for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9461) i < parent->allocated_stack / BPF_REG_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9462) parent_reg = &parent->stack[i].spilled_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9463) state_reg = &state->stack[i].spilled_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9464) err = propagate_liveness_reg(env, state_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9465) parent_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9466) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9467) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9470) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9473) /* find precise scalars in the previous equivalent state and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9474) * propagate them into the current state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9475) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9476) static int propagate_precision(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9477) const struct bpf_verifier_state *old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9479) struct bpf_reg_state *state_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9480) struct bpf_func_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9481) int i, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9483) state = old->frame[old->curframe];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9484) state_reg = state->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9485) for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9486) if (state_reg->type != SCALAR_VALUE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9487) !state_reg->precise)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9488) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9489) if (env->log.level & BPF_LOG_LEVEL2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9490) verbose(env, "propagating r%d\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9491) err = mark_chain_precision(env, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9492) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9493) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9496) for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9497) if (state->stack[i].slot_type[0] != STACK_SPILL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9498) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9499) state_reg = &state->stack[i].spilled_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9500) if (state_reg->type != SCALAR_VALUE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9501) !state_reg->precise)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9502) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9503) if (env->log.level & BPF_LOG_LEVEL2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9504) verbose(env, "propagating fp%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9505) (-i - 1) * BPF_REG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9506) err = mark_chain_precision_stack(env, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9507) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9508) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9510) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9513) static bool states_maybe_looping(struct bpf_verifier_state *old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9514) struct bpf_verifier_state *cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9516) struct bpf_func_state *fold, *fcur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9517) int i, fr = cur->curframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9519) if (old->curframe != fr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9520) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9522) fold = old->frame[fr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9523) fcur = cur->frame[fr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9524) for (i = 0; i < MAX_BPF_REG; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9525) if (memcmp(&fold->regs[i], &fcur->regs[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9526) offsetof(struct bpf_reg_state, parent)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9527) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9528) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9532) static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9534) struct bpf_verifier_state_list *new_sl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9535) struct bpf_verifier_state_list *sl, **pprev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9536) struct bpf_verifier_state *cur = env->cur_state, *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9537) int i, j, err, states_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9538) bool add_new_state = env->test_state_freq ? true : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9540) cur->last_insn_idx = env->prev_insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9541) if (!env->insn_aux_data[insn_idx].prune_point)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9542) /* this 'insn_idx' instruction wasn't marked, so we will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9543) * be doing state search here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9544) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9545) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9547) /* bpf progs typically have pruning point every 4 instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9548) * http://vger.kernel.org/bpfconf2019.html#session-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9549) * Do not add new state for future pruning if the verifier hasn't seen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9550) * at least 2 jumps and at least 8 instructions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9551) * This heuristics helps decrease 'total_states' and 'peak_states' metric.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9552) * In tests that amounts to up to 50% reduction into total verifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9553) * memory consumption and 20% verifier time speedup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9554) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9555) if (env->jmps_processed - env->prev_jmps_processed >= 2 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9556) env->insn_processed - env->prev_insn_processed >= 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9557) add_new_state = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9559) pprev = explored_state(env, insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9560) sl = *pprev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9562) clean_live_states(env, insn_idx, cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9564) while (sl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9565) states_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9566) if (sl->state.insn_idx != insn_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9567) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9568) if (sl->state.branches) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9569) if (states_maybe_looping(&sl->state, cur) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9570) states_equal(env, &sl->state, cur)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9571) verbose_linfo(env, insn_idx, "; ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9572) verbose(env, "infinite loop detected at insn %d\n", insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9573) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9575) /* if the verifier is processing a loop, avoid adding new state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9576) * too often, since different loop iterations have distinct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9577) * states and may not help future pruning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9578) * This threshold shouldn't be too low to make sure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9579) * a loop with large bound will be rejected quickly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9580) * The most abusive loop will be:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9581) * r1 += 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9582) * if r1 < 1000000 goto pc-2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9583) * 1M insn_procssed limit / 100 == 10k peak states.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9584) * This threshold shouldn't be too high either, since states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9585) * at the end of the loop are likely to be useful in pruning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9586) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9587) if (env->jmps_processed - env->prev_jmps_processed < 20 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9588) env->insn_processed - env->prev_insn_processed < 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9589) add_new_state = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9590) goto miss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9592) if (states_equal(env, &sl->state, cur)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9593) sl->hit_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9594) /* reached equivalent register/stack state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9595) * prune the search.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9596) * Registers read by the continuation are read by us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9597) * If we have any write marks in env->cur_state, they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9598) * will prevent corresponding reads in the continuation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9599) * from reaching our parent (an explored_state). Our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9600) * own state will get the read marks recorded, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9601) * they'll be immediately forgotten as we're pruning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9602) * this state and will pop a new one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9603) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9604) err = propagate_liveness(env, &sl->state, cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9606) /* if previous state reached the exit with precision and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9607) * current state is equivalent to it (except precsion marks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9608) * the precision needs to be propagated back in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9609) * the current state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9610) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9611) err = err ? : push_jmp_history(env, cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9612) err = err ? : propagate_precision(env, &sl->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9613) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9614) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9615) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9617) miss:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9618) /* when new state is not going to be added do not increase miss count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9619) * Otherwise several loop iterations will remove the state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9620) * recorded earlier. The goal of these heuristics is to have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9621) * states from some iterations of the loop (some in the beginning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9622) * and some at the end) to help pruning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9623) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9624) if (add_new_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9625) sl->miss_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9626) /* heuristic to determine whether this state is beneficial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9627) * to keep checking from state equivalence point of view.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9628) * Higher numbers increase max_states_per_insn and verification time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9629) * but do not meaningfully decrease insn_processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9630) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9631) if (sl->miss_cnt > sl->hit_cnt * 3 + 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9632) /* the state is unlikely to be useful. Remove it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9633) * speed up verification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9634) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9635) *pprev = sl->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9636) if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9637) u32 br = sl->state.branches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9639) WARN_ONCE(br,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9640) "BUG live_done but branches_to_explore %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9641) br);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9642) free_verifier_state(&sl->state, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9643) kfree(sl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9644) env->peak_states--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9645) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9646) /* cannot free this state, since parentage chain may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9647) * walk it later. Add it for free_list instead to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9648) * be freed at the end of verification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9649) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9650) sl->next = env->free_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9651) env->free_list = sl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9653) sl = *pprev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9654) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9656) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9657) pprev = &sl->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9658) sl = *pprev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9661) if (env->max_states_per_insn < states_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9662) env->max_states_per_insn = states_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9664) if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9665) return push_jmp_history(env, cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9667) if (!add_new_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9668) return push_jmp_history(env, cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9670) /* There were no equivalent states, remember the current one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9671) * Technically the current state is not proven to be safe yet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9672) * but it will either reach outer most bpf_exit (which means it's safe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9673) * or it will be rejected. When there are no loops the verifier won't be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9674) * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9675) * again on the way to bpf_exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9676) * When looping the sl->state.branches will be > 0 and this state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9677) * will not be considered for equivalence until branches == 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9678) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9679) new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9680) if (!new_sl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9681) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9682) env->total_states++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9683) env->peak_states++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9684) env->prev_jmps_processed = env->jmps_processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9685) env->prev_insn_processed = env->insn_processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9687) /* add new state to the head of linked list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9688) new = &new_sl->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9689) err = copy_verifier_state(new, cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9690) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9691) free_verifier_state(new, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9692) kfree(new_sl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9693) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9695) new->insn_idx = insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9696) WARN_ONCE(new->branches != 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9697) "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9699) cur->parent = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9700) cur->first_insn_idx = insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9701) clear_jmp_history(cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9702) new_sl->next = *explored_state(env, insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9703) *explored_state(env, insn_idx) = new_sl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9704) /* connect new state to parentage chain. Current frame needs all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9705) * registers connected. Only r6 - r9 of the callers are alive (pushed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9706) * to the stack implicitly by JITs) so in callers' frames connect just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9707) * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9708) * the state of the call instruction (with WRITTEN set), and r0 comes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9709) * from callee with its full parentage chain, anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9710) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9711) /* clear write marks in current state: the writes we did are not writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9712) * our child did, so they don't screen off its reads from us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9713) * (There are no read marks in current state, because reads always mark
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9714) * their parent and current state never has children yet. Only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9715) * explored_states can get read marks.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9716) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9717) for (j = 0; j <= cur->curframe; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9718) for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9719) cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9720) for (i = 0; i < BPF_REG_FP; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9721) cur->frame[j]->regs[i].live = REG_LIVE_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9724) /* all stack frames are accessible from callee, clear them all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9725) for (j = 0; j <= cur->curframe; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9726) struct bpf_func_state *frame = cur->frame[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9727) struct bpf_func_state *newframe = new->frame[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9729) for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9730) frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9731) frame->stack[i].spilled_ptr.parent =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9732) &newframe->stack[i].spilled_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9735) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9738) /* Return true if it's OK to have the same insn return a different type. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9739) static bool reg_type_mismatch_ok(enum bpf_reg_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9741) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9742) case PTR_TO_CTX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9743) case PTR_TO_SOCKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9744) case PTR_TO_SOCKET_OR_NULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9745) case PTR_TO_SOCK_COMMON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9746) case PTR_TO_SOCK_COMMON_OR_NULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9747) case PTR_TO_TCP_SOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9748) case PTR_TO_TCP_SOCK_OR_NULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9749) case PTR_TO_XDP_SOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9750) case PTR_TO_BTF_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9751) case PTR_TO_BTF_ID_OR_NULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9752) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9753) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9754) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9758) /* If an instruction was previously used with particular pointer types, then we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9759) * need to be careful to avoid cases such as the below, where it may be ok
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9760) * for one branch accessing the pointer, but not ok for the other branch:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9761) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9762) * R1 = sock_ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9763) * goto X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9764) * ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9765) * R1 = some_other_valid_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9766) * goto X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9767) * ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9768) * R2 = *(u32 *)(R1 + 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9769) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9770) static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9772) return src != prev && (!reg_type_mismatch_ok(src) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9773) !reg_type_mismatch_ok(prev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9776) static int do_check(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9778) bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9779) struct bpf_verifier_state *state = env->cur_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9780) struct bpf_insn *insns = env->prog->insnsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9781) struct bpf_reg_state *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9782) int insn_cnt = env->prog->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9783) bool do_print_state = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9784) int prev_insn_idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9786) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9787) struct bpf_insn *insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9788) u8 class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9789) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9791) env->prev_insn_idx = prev_insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9792) if (env->insn_idx >= insn_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9793) verbose(env, "invalid insn idx %d insn_cnt %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9794) env->insn_idx, insn_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9795) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9798) insn = &insns[env->insn_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9799) class = BPF_CLASS(insn->code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9801) if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9802) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9803) "BPF program is too large. Processed %d insn\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9804) env->insn_processed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9805) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9808) err = is_state_visited(env, env->insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9809) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9810) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9811) if (err == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9812) /* found equivalent state, can prune the search */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9813) if (env->log.level & BPF_LOG_LEVEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9814) if (do_print_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9815) verbose(env, "\nfrom %d to %d%s: safe\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9816) env->prev_insn_idx, env->insn_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9817) env->cur_state->speculative ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9818) " (speculative execution)" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9819) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9820) verbose(env, "%d: safe\n", env->insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9822) goto process_bpf_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9825) if (signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9826) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9828) if (need_resched())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9829) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9831) if (env->log.level & BPF_LOG_LEVEL2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9832) (env->log.level & BPF_LOG_LEVEL && do_print_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9833) if (env->log.level & BPF_LOG_LEVEL2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9834) verbose(env, "%d:", env->insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9835) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9836) verbose(env, "\nfrom %d to %d%s:",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9837) env->prev_insn_idx, env->insn_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9838) env->cur_state->speculative ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9839) " (speculative execution)" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9840) print_verifier_state(env, state->frame[state->curframe]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9841) do_print_state = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9844) if (env->log.level & BPF_LOG_LEVEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9845) const struct bpf_insn_cbs cbs = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9846) .cb_print = verbose,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9847) .private_data = env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9848) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9850) verbose_linfo(env, env->insn_idx, "; ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9851) verbose(env, "%d: ", env->insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9852) print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9855) if (bpf_prog_is_dev_bound(env->prog->aux)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9856) err = bpf_prog_offload_verify_insn(env, env->insn_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9857) env->prev_insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9858) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9859) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9862) regs = cur_regs(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9863) sanitize_mark_insn_seen(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9864) prev_insn_idx = env->insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9866) if (class == BPF_ALU || class == BPF_ALU64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9867) err = check_alu_op(env, insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9868) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9869) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9871) } else if (class == BPF_LDX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9872) enum bpf_reg_type *prev_src_type, src_reg_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9874) /* check for reserved fields is already done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9876) /* check src operand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9877) err = check_reg_arg(env, insn->src_reg, SRC_OP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9878) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9879) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9881) err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9882) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9883) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9885) src_reg_type = regs[insn->src_reg].type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9887) /* check that memory (src_reg + off) is readable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9888) * the state of dst_reg will be updated by this func
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9889) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9890) err = check_mem_access(env, env->insn_idx, insn->src_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9891) insn->off, BPF_SIZE(insn->code),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9892) BPF_READ, insn->dst_reg, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9893) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9894) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9896) prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9898) if (*prev_src_type == NOT_INIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9899) /* saw a valid insn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9900) * dst_reg = *(u32 *)(src_reg + off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9901) * save type to validate intersecting paths
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9902) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9903) *prev_src_type = src_reg_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9905) } else if (reg_type_mismatch(src_reg_type, *prev_src_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9906) /* ABuser program is trying to use the same insn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9907) * dst_reg = *(u32*) (src_reg + off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9908) * with different pointer types:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9909) * src_reg == ctx in one branch and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9910) * src_reg == stack|map in some other branch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9911) * Reject it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9912) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9913) verbose(env, "same insn cannot be used with different pointers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9914) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9917) } else if (class == BPF_STX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9918) enum bpf_reg_type *prev_dst_type, dst_reg_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9920) if (BPF_MODE(insn->code) == BPF_XADD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9921) err = check_xadd(env, env->insn_idx, insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9922) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9923) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9924) env->insn_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9925) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9928) /* check src1 operand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9929) err = check_reg_arg(env, insn->src_reg, SRC_OP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9930) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9931) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9932) /* check src2 operand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9933) err = check_reg_arg(env, insn->dst_reg, SRC_OP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9934) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9935) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9937) dst_reg_type = regs[insn->dst_reg].type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9939) /* check that memory (dst_reg + off) is writeable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9940) err = check_mem_access(env, env->insn_idx, insn->dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9941) insn->off, BPF_SIZE(insn->code),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9942) BPF_WRITE, insn->src_reg, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9943) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9944) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9946) prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9948) if (*prev_dst_type == NOT_INIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9949) *prev_dst_type = dst_reg_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9950) } else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9951) verbose(env, "same insn cannot be used with different pointers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9952) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9955) } else if (class == BPF_ST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9956) if (BPF_MODE(insn->code) != BPF_MEM ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9957) insn->src_reg != BPF_REG_0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9958) verbose(env, "BPF_ST uses reserved fields\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9959) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9961) /* check src operand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9962) err = check_reg_arg(env, insn->dst_reg, SRC_OP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9963) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9964) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9966) if (is_ctx_reg(env, insn->dst_reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9967) verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9968) insn->dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9969) reg_type_str[reg_state(env, insn->dst_reg)->type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9970) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9973) /* check that memory (dst_reg + off) is writeable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9974) err = check_mem_access(env, env->insn_idx, insn->dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9975) insn->off, BPF_SIZE(insn->code),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9976) BPF_WRITE, -1, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9977) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9978) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9980) } else if (class == BPF_JMP || class == BPF_JMP32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9981) u8 opcode = BPF_OP(insn->code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9983) env->jmps_processed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9984) if (opcode == BPF_CALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9985) if (BPF_SRC(insn->code) != BPF_K ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9986) insn->off != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9987) (insn->src_reg != BPF_REG_0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9988) insn->src_reg != BPF_PSEUDO_CALL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9989) insn->dst_reg != BPF_REG_0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9990) class == BPF_JMP32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9991) verbose(env, "BPF_CALL uses reserved fields\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9992) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9995) if (env->cur_state->active_spin_lock &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9996) (insn->src_reg == BPF_PSEUDO_CALL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9997) insn->imm != BPF_FUNC_spin_unlock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9998) verbose(env, "function calls are not allowed while holding a lock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9999) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10001) if (insn->src_reg == BPF_PSEUDO_CALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10002) err = check_func_call(env, insn, &env->insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10003) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10004) err = check_helper_call(env, insn->imm, env->insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10005) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10006) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10008) } else if (opcode == BPF_JA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10009) if (BPF_SRC(insn->code) != BPF_K ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10010) insn->imm != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10011) insn->src_reg != BPF_REG_0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10012) insn->dst_reg != BPF_REG_0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10013) class == BPF_JMP32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10014) verbose(env, "BPF_JA uses reserved fields\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10015) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10018) env->insn_idx += insn->off + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10019) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10021) } else if (opcode == BPF_EXIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10022) if (BPF_SRC(insn->code) != BPF_K ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10023) insn->imm != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10024) insn->src_reg != BPF_REG_0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10025) insn->dst_reg != BPF_REG_0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10026) class == BPF_JMP32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10027) verbose(env, "BPF_EXIT uses reserved fields\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10028) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10031) if (env->cur_state->active_spin_lock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10032) verbose(env, "bpf_spin_unlock is missing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10033) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10036) if (state->curframe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10037) /* exit from nested function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10038) err = prepare_func_exit(env, &env->insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10039) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10040) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10041) do_print_state = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10042) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10045) err = check_reference_leak(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10046) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10047) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10049) err = check_return_code(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10050) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10051) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10052) process_bpf_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10053) update_branch_counts(env, env->cur_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10054) err = pop_stack(env, &prev_insn_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10055) &env->insn_idx, pop_log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10056) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10057) if (err != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10058) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10059) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10060) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10061) do_print_state = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10062) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10064) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10065) err = check_cond_jmp_op(env, insn, &env->insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10066) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10067) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10069) } else if (class == BPF_LD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10070) u8 mode = BPF_MODE(insn->code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10072) if (mode == BPF_ABS || mode == BPF_IND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10073) err = check_ld_abs(env, insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10074) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10075) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10077) } else if (mode == BPF_IMM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10078) err = check_ld_imm(env, insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10079) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10080) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10082) env->insn_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10083) sanitize_mark_insn_seen(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10084) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10085) verbose(env, "invalid BPF_LD mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10086) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10088) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10089) verbose(env, "unknown insn class %d\n", class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10090) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10093) env->insn_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10096) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10099) /* replace pseudo btf_id with kernel symbol address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10100) static int check_pseudo_btf_id(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10101) struct bpf_insn *insn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10102) struct bpf_insn_aux_data *aux)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10104) const struct btf_var_secinfo *vsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10105) const struct btf_type *datasec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10106) const struct btf_type *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10107) const char *sym_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10108) bool percpu = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10109) u32 type, id = insn->imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10110) s32 datasec_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10111) u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10112) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10114) if (!btf_vmlinux) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10115) verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10116) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10119) if (insn[1].imm != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10120) verbose(env, "reserved field (insn[1].imm) is used in pseudo_btf_id ldimm64 insn.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10121) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10124) t = btf_type_by_id(btf_vmlinux, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10125) if (!t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10126) verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10127) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10130) if (!btf_type_is_var(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10131) verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10132) id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10133) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10136) sym_name = btf_name_by_offset(btf_vmlinux, t->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10137) addr = kallsyms_lookup_name(sym_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10138) if (!addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10139) verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10140) sym_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10141) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10144) datasec_id = btf_find_by_name_kind(btf_vmlinux, ".data..percpu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10145) BTF_KIND_DATASEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10146) if (datasec_id > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10147) datasec = btf_type_by_id(btf_vmlinux, datasec_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10148) for_each_vsi(i, datasec, vsi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10149) if (vsi->type == id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10150) percpu = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10151) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10156) insn[0].imm = (u32)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10157) insn[1].imm = addr >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10159) type = t->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10160) t = btf_type_skip_modifiers(btf_vmlinux, type, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10161) if (percpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10162) aux->btf_var.reg_type = PTR_TO_PERCPU_BTF_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10163) aux->btf_var.btf_id = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10164) } else if (!btf_type_is_struct(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10165) const struct btf_type *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10166) const char *tname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10167) u32 tsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10169) /* resolve the type size of ksym. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10170) ret = btf_resolve_size(btf_vmlinux, t, &tsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10171) if (IS_ERR(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10172) tname = btf_name_by_offset(btf_vmlinux, t->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10173) verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10174) tname, PTR_ERR(ret));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10175) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10177) aux->btf_var.reg_type = PTR_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10178) aux->btf_var.mem_size = tsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10179) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10180) aux->btf_var.reg_type = PTR_TO_BTF_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10181) aux->btf_var.btf_id = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10183) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10186) static int check_map_prealloc(struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10188) return (map->map_type != BPF_MAP_TYPE_HASH &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10189) map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10190) map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10191) !(map->map_flags & BPF_F_NO_PREALLOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10194) static bool is_tracing_prog_type(enum bpf_prog_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10196) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10197) case BPF_PROG_TYPE_KPROBE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10198) case BPF_PROG_TYPE_TRACEPOINT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10199) case BPF_PROG_TYPE_PERF_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10200) case BPF_PROG_TYPE_RAW_TRACEPOINT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10201) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10202) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10203) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10207) static bool is_preallocated_map(struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10209) if (!check_map_prealloc(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10210) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10211) if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10212) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10213) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10216) static int check_map_prog_compatibility(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10217) struct bpf_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10218) struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10221) enum bpf_prog_type prog_type = resolve_prog_type(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10222) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10223) * Validate that trace type programs use preallocated hash maps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10224) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10225) * For programs attached to PERF events this is mandatory as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10226) * perf NMI can hit any arbitrary code sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10227) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10228) * All other trace types using preallocated hash maps are unsafe as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10229) * well because tracepoint or kprobes can be inside locked regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10230) * of the memory allocator or at a place where a recursion into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10231) * memory allocator would see inconsistent state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10232) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10233) * On RT enabled kernels run-time allocation of all trace type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10234) * programs is strictly prohibited due to lock type constraints. On
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10235) * !RT kernels it is allowed for backwards compatibility reasons for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10236) * now, but warnings are emitted so developers are made aware of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10237) * the unsafety and can fix their programs before this is enforced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10239) if (is_tracing_prog_type(prog_type) && !is_preallocated_map(map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10240) if (prog_type == BPF_PROG_TYPE_PERF_EVENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10241) verbose(env, "perf_event programs can only use preallocated hash map\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10242) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10244) if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10245) verbose(env, "trace type programs can only use preallocated hash map\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10246) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10248) WARN_ONCE(1, "trace type BPF program uses run-time allocation\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10249) verbose(env, "trace type programs with run-time allocated hash maps are unsafe. Switch to preallocated hash maps.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10252) if ((is_tracing_prog_type(prog_type) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10253) prog_type == BPF_PROG_TYPE_SOCKET_FILTER) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10254) map_value_has_spin_lock(map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10255) verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10256) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10259) if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10260) !bpf_offload_prog_map_match(prog, map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10261) verbose(env, "offload device mismatch between prog and map\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10262) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10265) if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10266) verbose(env, "bpf_struct_ops map cannot be used in prog\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10267) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10270) if (prog->aux->sleepable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10271) switch (map->map_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10272) case BPF_MAP_TYPE_HASH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10273) case BPF_MAP_TYPE_LRU_HASH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10274) case BPF_MAP_TYPE_ARRAY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10275) if (!is_preallocated_map(map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10276) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10277) "Sleepable programs can only use preallocated hash maps\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10278) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10280) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10281) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10282) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10283) "Sleepable programs can only use array and hash maps\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10284) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10287) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10290) static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10292) return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10293) map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10296) /* find and rewrite pseudo imm in ld_imm64 instructions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10297) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10298) * 1. if it accesses map FD, replace it with actual map pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10299) * 2. if it accesses btf_id of a VAR, replace it with pointer to the var.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10300) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10301) * NOTE: btf_vmlinux is required for converting pseudo btf_id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10302) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10303) static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10305) struct bpf_insn *insn = env->prog->insnsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10306) int insn_cnt = env->prog->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10307) int i, j, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10309) err = bpf_prog_calc_tag(env->prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10310) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10311) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10313) for (i = 0; i < insn_cnt; i++, insn++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10314) if (BPF_CLASS(insn->code) == BPF_LDX &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10315) (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10316) verbose(env, "BPF_LDX uses reserved fields\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10317) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10320) if (BPF_CLASS(insn->code) == BPF_STX &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10321) ((BPF_MODE(insn->code) != BPF_MEM &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10322) BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10323) verbose(env, "BPF_STX uses reserved fields\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10324) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10327) if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10328) struct bpf_insn_aux_data *aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10329) struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10330) struct fd f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10331) u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10333) if (i == insn_cnt - 1 || insn[1].code != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10334) insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10335) insn[1].off != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10336) verbose(env, "invalid bpf_ld_imm64 insn\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10337) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10340) if (insn[0].src_reg == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10341) /* valid generic load 64-bit imm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10342) goto next_insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10344) if (insn[0].src_reg == BPF_PSEUDO_BTF_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10345) aux = &env->insn_aux_data[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10346) err = check_pseudo_btf_id(env, insn, aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10347) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10348) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10349) goto next_insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10352) /* In final convert_pseudo_ld_imm64() step, this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10353) * converted into regular 64-bit imm load insn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10355) if ((insn[0].src_reg != BPF_PSEUDO_MAP_FD &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10356) insn[0].src_reg != BPF_PSEUDO_MAP_VALUE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10357) (insn[0].src_reg == BPF_PSEUDO_MAP_FD &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10358) insn[1].imm != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10359) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10360) "unrecognized bpf_ld_imm64 insn\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10361) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10364) f = fdget(insn[0].imm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10365) map = __bpf_map_get(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10366) if (IS_ERR(map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10367) verbose(env, "fd %d is not pointing to valid bpf_map\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10368) insn[0].imm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10369) return PTR_ERR(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10372) err = check_map_prog_compatibility(env, map, env->prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10373) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10374) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10375) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10378) aux = &env->insn_aux_data[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10379) if (insn->src_reg == BPF_PSEUDO_MAP_FD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10380) addr = (unsigned long)map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10381) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10382) u32 off = insn[1].imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10384) if (off >= BPF_MAX_VAR_OFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10385) verbose(env, "direct value offset of %u is not allowed\n", off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10386) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10387) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10390) if (!map->ops->map_direct_value_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10391) verbose(env, "no direct value access support for this map type\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10392) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10393) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10396) err = map->ops->map_direct_value_addr(map, &addr, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10397) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10398) verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10399) map->value_size, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10400) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10401) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10404) aux->map_off = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10405) addr += off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10408) insn[0].imm = (u32)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10409) insn[1].imm = addr >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10411) /* check whether we recorded this map already */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10412) for (j = 0; j < env->used_map_cnt; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10413) if (env->used_maps[j] == map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10414) aux->map_index = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10415) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10416) goto next_insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10420) if (env->used_map_cnt >= MAX_USED_MAPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10421) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10422) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10425) /* hold the map. If the program is rejected by verifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10426) * the map will be released by release_maps() or it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10427) * will be used by the valid program until it's unloaded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10428) * and all maps are released in free_used_maps()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10429) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10430) bpf_map_inc(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10432) aux->map_index = env->used_map_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10433) env->used_maps[env->used_map_cnt++] = map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10435) if (bpf_map_is_cgroup_storage(map) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10436) bpf_cgroup_storage_assign(env->prog->aux, map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10437) verbose(env, "only one cgroup storage of each type is allowed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10438) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10439) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10442) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10443) next_insn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10444) insn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10445) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10446) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10449) /* Basic sanity check before we invest more work here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10450) if (!bpf_opcode_in_insntable(insn->code)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10451) verbose(env, "unknown opcode %02x\n", insn->code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10452) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10456) /* now all pseudo BPF_LD_IMM64 instructions load valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10457) * 'struct bpf_map *' into a register instead of user map_fd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10458) * These pointers will be used later by verifier to validate map access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10460) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10463) /* drop refcnt of maps used by the rejected program */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10464) static void release_maps(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10466) __bpf_free_used_maps(env->prog->aux, env->used_maps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10467) env->used_map_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10470) /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10471) static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10473) struct bpf_insn *insn = env->prog->insnsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10474) int insn_cnt = env->prog->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10475) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10477) for (i = 0; i < insn_cnt; i++, insn++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10478) if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10479) insn->src_reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10482) /* single env->prog->insni[off] instruction was replaced with the range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10483) * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10484) * [0, off) and [off, end) to new locations, so the patched range stays zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10485) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10486) static void adjust_insn_aux_data(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10487) struct bpf_insn_aux_data *new_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10488) struct bpf_prog *new_prog, u32 off, u32 cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10490) struct bpf_insn_aux_data *old_data = env->insn_aux_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10491) struct bpf_insn *insn = new_prog->insnsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10492) u32 old_seen = old_data[off].seen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10493) u32 prog_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10494) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10496) /* aux info at OFF always needs adjustment, no matter fast path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10497) * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10498) * original insn at old prog.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10499) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10500) old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10502) if (cnt == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10503) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10504) prog_len = new_prog->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10506) memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10507) memcpy(new_data + off + cnt - 1, old_data + off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10508) sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10509) for (i = off; i < off + cnt - 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10510) /* Expand insni[off]'s seen count to the patched range. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10511) new_data[i].seen = old_seen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10512) new_data[i].zext_dst = insn_has_def32(env, insn + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10514) env->insn_aux_data = new_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10515) vfree(old_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10518) static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10520) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10522) if (len == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10523) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10524) /* NOTE: fake 'exit' subprog should be updated as well. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10525) for (i = 0; i <= env->subprog_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10526) if (env->subprog_info[i].start <= off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10527) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10528) env->subprog_info[i].start += len - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10532) static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10534) struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10535) int i, sz = prog->aux->size_poke_tab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10536) struct bpf_jit_poke_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10538) for (i = 0; i < sz; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10539) desc = &tab[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10540) if (desc->insn_idx <= off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10541) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10542) desc->insn_idx += len - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10546) static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10547) const struct bpf_insn *patch, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10549) struct bpf_prog *new_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10550) struct bpf_insn_aux_data *new_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10552) if (len > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10553) new_data = vzalloc(array_size(env->prog->len + len - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10554) sizeof(struct bpf_insn_aux_data)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10555) if (!new_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10556) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10559) new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10560) if (IS_ERR(new_prog)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10561) if (PTR_ERR(new_prog) == -ERANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10562) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10563) "insn %d cannot be patched due to 16-bit range\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10564) env->insn_aux_data[off].orig_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10565) vfree(new_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10566) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10568) adjust_insn_aux_data(env, new_data, new_prog, off, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10569) adjust_subprog_starts(env, off, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10570) adjust_poke_descs(new_prog, off, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10571) return new_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10574) static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10575) u32 off, u32 cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10577) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10579) /* find first prog starting at or after off (first to remove) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10580) for (i = 0; i < env->subprog_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10581) if (env->subprog_info[i].start >= off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10582) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10583) /* find first prog starting at or after off + cnt (first to stay) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10584) for (j = i; j < env->subprog_cnt; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10585) if (env->subprog_info[j].start >= off + cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10586) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10587) /* if j doesn't start exactly at off + cnt, we are just removing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10588) * the front of previous prog
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10589) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10590) if (env->subprog_info[j].start != off + cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10591) j--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10593) if (j > i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10594) struct bpf_prog_aux *aux = env->prog->aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10595) int move;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10597) /* move fake 'exit' subprog as well */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10598) move = env->subprog_cnt + 1 - j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10600) memmove(env->subprog_info + i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10601) env->subprog_info + j,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10602) sizeof(*env->subprog_info) * move);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10603) env->subprog_cnt -= j - i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10605) /* remove func_info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10606) if (aux->func_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10607) move = aux->func_info_cnt - j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10609) memmove(aux->func_info + i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10610) aux->func_info + j,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10611) sizeof(*aux->func_info) * move);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10612) aux->func_info_cnt -= j - i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10613) /* func_info->insn_off is set after all code rewrites,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10614) * in adjust_btf_func() - no need to adjust
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10615) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10617) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10618) /* convert i from "first prog to remove" to "first to adjust" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10619) if (env->subprog_info[i].start == off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10620) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10623) /* update fake 'exit' subprog as well */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10624) for (; i <= env->subprog_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10625) env->subprog_info[i].start -= cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10627) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10630) static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10631) u32 cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10633) struct bpf_prog *prog = env->prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10634) u32 i, l_off, l_cnt, nr_linfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10635) struct bpf_line_info *linfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10637) nr_linfo = prog->aux->nr_linfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10638) if (!nr_linfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10639) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10641) linfo = prog->aux->linfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10643) /* find first line info to remove, count lines to be removed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10644) for (i = 0; i < nr_linfo; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10645) if (linfo[i].insn_off >= off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10646) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10648) l_off = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10649) l_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10650) for (; i < nr_linfo; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10651) if (linfo[i].insn_off < off + cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10652) l_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10653) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10654) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10656) /* First live insn doesn't match first live linfo, it needs to "inherit"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10657) * last removed linfo. prog is already modified, so prog->len == off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10658) * means no live instructions after (tail of the program was removed).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10659) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10660) if (prog->len != off && l_cnt &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10661) (i == nr_linfo || linfo[i].insn_off != off + cnt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10662) l_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10663) linfo[--i].insn_off = off + cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10666) /* remove the line info which refer to the removed instructions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10667) if (l_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10668) memmove(linfo + l_off, linfo + i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10669) sizeof(*linfo) * (nr_linfo - i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10671) prog->aux->nr_linfo -= l_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10672) nr_linfo = prog->aux->nr_linfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10675) /* pull all linfo[i].insn_off >= off + cnt in by cnt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10676) for (i = l_off; i < nr_linfo; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10677) linfo[i].insn_off -= cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10679) /* fix up all subprogs (incl. 'exit') which start >= off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10680) for (i = 0; i <= env->subprog_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10681) if (env->subprog_info[i].linfo_idx > l_off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10682) /* program may have started in the removed region but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10683) * may not be fully removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10685) if (env->subprog_info[i].linfo_idx >= l_off + l_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10686) env->subprog_info[i].linfo_idx -= l_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10687) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10688) env->subprog_info[i].linfo_idx = l_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10691) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10694) static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10696) struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10697) unsigned int orig_prog_len = env->prog->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10698) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10700) if (bpf_prog_is_dev_bound(env->prog->aux))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10701) bpf_prog_offload_remove_insns(env, off, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10703) err = bpf_remove_insns(env->prog, off, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10704) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10705) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10707) err = adjust_subprog_starts_after_remove(env, off, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10708) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10709) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10711) err = bpf_adj_linfo_after_remove(env, off, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10712) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10713) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10715) memmove(aux_data + off, aux_data + off + cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10716) sizeof(*aux_data) * (orig_prog_len - off - cnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10718) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10721) /* The verifier does more data flow analysis than llvm and will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10722) * explore branches that are dead at run time. Malicious programs can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10723) * have dead code too. Therefore replace all dead at-run-time code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10724) * with 'ja -1'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10725) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10726) * Just nops are not optimal, e.g. if they would sit at the end of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10727) * program and through another bug we would manage to jump there, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10728) * we'd execute beyond program memory otherwise. Returning exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10729) * code also wouldn't work since we can have subprogs where the dead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10730) * code could be located.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10731) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10732) static void sanitize_dead_code(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10734) struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10735) struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10736) struct bpf_insn *insn = env->prog->insnsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10737) const int insn_cnt = env->prog->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10738) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10740) for (i = 0; i < insn_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10741) if (aux_data[i].seen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10742) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10743) memcpy(insn + i, &trap, sizeof(trap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10744) aux_data[i].zext_dst = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10748) static bool insn_is_cond_jump(u8 code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10750) u8 op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10752) if (BPF_CLASS(code) == BPF_JMP32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10753) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10755) if (BPF_CLASS(code) != BPF_JMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10756) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10758) op = BPF_OP(code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10759) return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10762) static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10764) struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10765) struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10766) struct bpf_insn *insn = env->prog->insnsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10767) const int insn_cnt = env->prog->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10768) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10770) for (i = 0; i < insn_cnt; i++, insn++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10771) if (!insn_is_cond_jump(insn->code))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10772) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10774) if (!aux_data[i + 1].seen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10775) ja.off = insn->off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10776) else if (!aux_data[i + 1 + insn->off].seen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10777) ja.off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10778) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10779) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10781) if (bpf_prog_is_dev_bound(env->prog->aux))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10782) bpf_prog_offload_replace_insn(env, i, &ja);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10784) memcpy(insn, &ja, sizeof(ja));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10788) static int opt_remove_dead_code(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10790) struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10791) int insn_cnt = env->prog->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10792) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10794) for (i = 0; i < insn_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10795) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10797) j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10798) while (i + j < insn_cnt && !aux_data[i + j].seen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10799) j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10800) if (!j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10801) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10803) err = verifier_remove_insns(env, i, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10804) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10805) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10806) insn_cnt = env->prog->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10809) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10812) static int opt_remove_nops(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10814) const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10815) struct bpf_insn *insn = env->prog->insnsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10816) int insn_cnt = env->prog->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10817) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10819) for (i = 0; i < insn_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10820) if (memcmp(&insn[i], &ja, sizeof(ja)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10821) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10823) err = verifier_remove_insns(env, i, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10824) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10825) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10826) insn_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10827) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10830) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10833) static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10834) const union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10836) struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10837) struct bpf_insn_aux_data *aux = env->insn_aux_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10838) int i, patch_len, delta = 0, len = env->prog->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10839) struct bpf_insn *insns = env->prog->insnsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10840) struct bpf_prog *new_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10841) bool rnd_hi32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10843) rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10844) zext_patch[1] = BPF_ZEXT_REG(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10845) rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10846) rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10847) rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10848) for (i = 0; i < len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10849) int adj_idx = i + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10850) struct bpf_insn insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10852) insn = insns[adj_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10853) if (!aux[adj_idx].zext_dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10854) u8 code, class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10855) u32 imm_rnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10857) if (!rnd_hi32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10858) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10860) code = insn.code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10861) class = BPF_CLASS(code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10862) if (insn_no_def(&insn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10863) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10865) /* NOTE: arg "reg" (the fourth one) is only used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10866) * BPF_STX which has been ruled out in above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10867) * check, it is safe to pass NULL here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10868) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10869) if (is_reg64(env, &insn, insn.dst_reg, NULL, DST_OP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10870) if (class == BPF_LD &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10871) BPF_MODE(code) == BPF_IMM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10872) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10873) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10876) /* ctx load could be transformed into wider load. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10877) if (class == BPF_LDX &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10878) aux[adj_idx].ptr_type == PTR_TO_CTX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10879) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10881) imm_rnd = get_random_int();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10882) rnd_hi32_patch[0] = insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10883) rnd_hi32_patch[1].imm = imm_rnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10884) rnd_hi32_patch[3].dst_reg = insn.dst_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10885) patch = rnd_hi32_patch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10886) patch_len = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10887) goto apply_patch_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10890) if (!bpf_jit_needs_zext())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10891) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10893) zext_patch[0] = insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10894) zext_patch[1].dst_reg = insn.dst_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10895) zext_patch[1].src_reg = insn.dst_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10896) patch = zext_patch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10897) patch_len = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10898) apply_patch_buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10899) new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10900) if (!new_prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10901) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10902) env->prog = new_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10903) insns = new_prog->insnsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10904) aux = env->insn_aux_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10905) delta += patch_len - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10908) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10911) /* convert load instructions that access fields of a context type into a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10912) * sequence of instructions that access fields of the underlying structure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10913) * struct __sk_buff -> struct sk_buff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10914) * struct bpf_sock_ops -> struct sock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10915) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10916) static int convert_ctx_accesses(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10918) const struct bpf_verifier_ops *ops = env->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10919) int i, cnt, size, ctx_field_size, delta = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10920) const int insn_cnt = env->prog->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10921) struct bpf_insn insn_buf[16], *insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10922) u32 target_size, size_default, off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10923) struct bpf_prog *new_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10924) enum bpf_access_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10925) bool is_narrower_load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10927) if (ops->gen_prologue || env->seen_direct_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10928) if (!ops->gen_prologue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10929) verbose(env, "bpf verifier is misconfigured\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10930) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10932) cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10933) env->prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10934) if (cnt >= ARRAY_SIZE(insn_buf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10935) verbose(env, "bpf verifier is misconfigured\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10936) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10937) } else if (cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10938) new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10939) if (!new_prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10940) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10942) env->prog = new_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10943) delta += cnt - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10947) if (bpf_prog_is_dev_bound(env->prog->aux))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10948) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10950) insn = env->prog->insnsi + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10952) for (i = 0; i < insn_cnt; i++, insn++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10953) bpf_convert_ctx_access_t convert_ctx_access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10954) bool ctx_access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10956) if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10957) insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10958) insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10959) insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10960) type = BPF_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10961) ctx_access = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10962) } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10963) insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10964) insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10965) insn->code == (BPF_STX | BPF_MEM | BPF_DW) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10966) insn->code == (BPF_ST | BPF_MEM | BPF_B) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10967) insn->code == (BPF_ST | BPF_MEM | BPF_H) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10968) insn->code == (BPF_ST | BPF_MEM | BPF_W) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10969) insn->code == (BPF_ST | BPF_MEM | BPF_DW)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10970) type = BPF_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10971) ctx_access = BPF_CLASS(insn->code) == BPF_STX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10972) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10973) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10976) if (type == BPF_WRITE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10977) env->insn_aux_data[i + delta].sanitize_stack_spill) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10978) struct bpf_insn patch[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10979) *insn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10980) BPF_ST_NOSPEC(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10981) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10983) cnt = ARRAY_SIZE(patch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10984) new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10985) if (!new_prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10986) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10988) delta += cnt - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10989) env->prog = new_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10990) insn = new_prog->insnsi + i + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10991) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10994) if (!ctx_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10995) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10997) switch (env->insn_aux_data[i + delta].ptr_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10998) case PTR_TO_CTX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10999) if (!ops->convert_ctx_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11000) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11001) convert_ctx_access = ops->convert_ctx_access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11002) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11003) case PTR_TO_SOCKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11004) case PTR_TO_SOCK_COMMON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11005) convert_ctx_access = bpf_sock_convert_ctx_access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11006) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11007) case PTR_TO_TCP_SOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11008) convert_ctx_access = bpf_tcp_sock_convert_ctx_access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11009) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11010) case PTR_TO_XDP_SOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11011) convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11012) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11013) case PTR_TO_BTF_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11014) if (type == BPF_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11015) insn->code = BPF_LDX | BPF_PROBE_MEM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11016) BPF_SIZE((insn)->code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11017) env->prog->aux->num_exentries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11018) } else if (resolve_prog_type(env->prog) != BPF_PROG_TYPE_STRUCT_OPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11019) verbose(env, "Writes through BTF pointers are not allowed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11020) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11022) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11023) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11024) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11027) ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11028) size = BPF_LDST_BYTES(insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11030) /* If the read access is a narrower load of the field,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11031) * convert to a 4/8-byte load, to minimum program type specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11032) * convert_ctx_access changes. If conversion is successful,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11033) * we will apply proper mask to the result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11034) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11035) is_narrower_load = size < ctx_field_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11036) size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11037) off = insn->off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11038) if (is_narrower_load) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11039) u8 size_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11041) if (type == BPF_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11042) verbose(env, "bpf verifier narrow ctx access misconfigured\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11043) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11046) size_code = BPF_H;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11047) if (ctx_field_size == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11048) size_code = BPF_W;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11049) else if (ctx_field_size == 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11050) size_code = BPF_DW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11052) insn->off = off & ~(size_default - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11053) insn->code = BPF_LDX | BPF_MEM | size_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11056) target_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11057) cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11058) &target_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11059) if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11060) (ctx_field_size && !target_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11061) verbose(env, "bpf verifier is misconfigured\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11062) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11065) if (is_narrower_load && size < target_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11066) u8 shift = bpf_ctx_narrow_access_offset(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11067) off, size, size_default) * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11068) if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11069) verbose(env, "bpf verifier narrow ctx load misconfigured\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11070) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11072) if (ctx_field_size <= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11073) if (shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11074) insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11075) insn->dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11076) shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11077) insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11078) (1 << size * 8) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11079) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11080) if (shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11081) insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11082) insn->dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11083) shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11084) insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11085) (1ULL << size * 8) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11089) new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11090) if (!new_prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11091) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11093) delta += cnt - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11095) /* keep walking new program and skip insns we just inserted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11096) env->prog = new_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11097) insn = new_prog->insnsi + i + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11100) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11103) static int jit_subprogs(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11105) struct bpf_prog *prog = env->prog, **func, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11106) int i, j, subprog_start, subprog_end = 0, len, subprog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11107) struct bpf_map *map_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11108) struct bpf_insn *insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11109) void *old_bpf_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11110) int err, num_exentries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11112) if (env->subprog_cnt <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11113) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11115) for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11116) if (insn->code != (BPF_JMP | BPF_CALL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11117) insn->src_reg != BPF_PSEUDO_CALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11118) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11119) /* Upon error here we cannot fall back to interpreter but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11120) * need a hard reject of the program. Thus -EFAULT is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11121) * propagated in any case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11123) subprog = find_subprog(env, i + insn->imm + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11124) if (subprog < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11125) WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11126) i + insn->imm + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11127) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11129) /* temporarily remember subprog id inside insn instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11130) * aux_data, since next loop will split up all insns into funcs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11132) insn->off = subprog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11133) /* remember original imm in case JIT fails and fallback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11134) * to interpreter will be needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11136) env->insn_aux_data[i].call_imm = insn->imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11137) /* point imm to __bpf_call_base+1 from JITs point of view */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11138) insn->imm = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11141) err = bpf_prog_alloc_jited_linfo(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11142) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11143) goto out_undo_insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11145) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11146) func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11147) if (!func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11148) goto out_undo_insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11150) for (i = 0; i < env->subprog_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11151) subprog_start = subprog_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11152) subprog_end = env->subprog_info[i + 1].start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11154) len = subprog_end - subprog_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11155) /* BPF_PROG_RUN doesn't call subprogs directly,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11156) * hence main prog stats include the runtime of subprogs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11157) * subprogs don't have IDs and not reachable via prog_get_next_id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11158) * func[i]->aux->stats will never be accessed and stays NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11160) func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11161) if (!func[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11162) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11163) memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11164) len * sizeof(struct bpf_insn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11165) func[i]->type = prog->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11166) func[i]->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11167) if (bpf_prog_calc_tag(func[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11168) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11169) func[i]->is_func = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11170) func[i]->aux->func_idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11171) /* the btf and func_info will be freed only at prog->aux */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11172) func[i]->aux->btf = prog->aux->btf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11173) func[i]->aux->func_info = prog->aux->func_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11175) for (j = 0; j < prog->aux->size_poke_tab; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11176) u32 insn_idx = prog->aux->poke_tab[j].insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11177) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11179) if (!(insn_idx >= subprog_start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11180) insn_idx <= subprog_end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11181) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11183) ret = bpf_jit_add_poke_descriptor(func[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11184) &prog->aux->poke_tab[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11185) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11186) verbose(env, "adding tail call poke descriptor failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11187) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11190) func[i]->insnsi[insn_idx - subprog_start].imm = ret + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11192) map_ptr = func[i]->aux->poke_tab[ret].tail_call.map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11193) ret = map_ptr->ops->map_poke_track(map_ptr, func[i]->aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11194) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11195) verbose(env, "tracking tail call prog failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11196) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11200) /* Use bpf_prog_F_tag to indicate functions in stack traces.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11201) * Long term would need debug info to populate names
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11202) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11203) func[i]->aux->name[0] = 'F';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11204) func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11205) func[i]->jit_requested = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11206) func[i]->aux->linfo = prog->aux->linfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11207) func[i]->aux->nr_linfo = prog->aux->nr_linfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11208) func[i]->aux->jited_linfo = prog->aux->jited_linfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11209) func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11210) num_exentries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11211) insn = func[i]->insnsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11212) for (j = 0; j < func[i]->len; j++, insn++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11213) if (BPF_CLASS(insn->code) == BPF_LDX &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11214) BPF_MODE(insn->code) == BPF_PROBE_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11215) num_exentries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11217) func[i]->aux->num_exentries = num_exentries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11218) func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11219) func[i] = bpf_int_jit_compile(func[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11220) if (!func[i]->jited) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11221) err = -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11222) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11224) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11227) /* Untrack main program's aux structs so that during map_poke_run()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11228) * we will not stumble upon the unfilled poke descriptors; each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11229) * of the main program's poke descs got distributed across subprogs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11230) * and got tracked onto map, so we are sure that none of them will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11231) * be missed after the operation below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11232) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11233) for (i = 0; i < prog->aux->size_poke_tab; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11234) map_ptr = prog->aux->poke_tab[i].tail_call.map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11236) map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11239) /* at this point all bpf functions were successfully JITed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11240) * now populate all bpf_calls with correct addresses and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11241) * run last pass of JIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11242) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11243) for (i = 0; i < env->subprog_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11244) insn = func[i]->insnsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11245) for (j = 0; j < func[i]->len; j++, insn++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11246) if (insn->code != (BPF_JMP | BPF_CALL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11247) insn->src_reg != BPF_PSEUDO_CALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11248) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11249) subprog = insn->off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11250) insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11251) __bpf_call_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11254) /* we use the aux data to keep a list of the start addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11255) * of the JITed images for each function in the program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11256) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11257) * for some architectures, such as powerpc64, the imm field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11258) * might not be large enough to hold the offset of the start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11259) * address of the callee's JITed image from __bpf_call_base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11260) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11261) * in such cases, we can lookup the start address of a callee
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11262) * by using its subprog id, available from the off field of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11263) * the call instruction, as an index for this list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11264) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11265) func[i]->aux->func = func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11266) func[i]->aux->func_cnt = env->subprog_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11268) for (i = 0; i < env->subprog_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11269) old_bpf_func = func[i]->bpf_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11270) tmp = bpf_int_jit_compile(func[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11271) if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11272) verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11273) err = -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11274) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11276) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11279) /* finally lock prog and jit images for all functions and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11280) * populate kallsysm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11282) for (i = 0; i < env->subprog_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11283) bpf_prog_lock_ro(func[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11284) bpf_prog_kallsyms_add(func[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11287) /* Last step: make now unused interpreter insns from main
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11288) * prog consistent for later dump requests, so they can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11289) * later look the same as if they were interpreted only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11291) for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11292) if (insn->code != (BPF_JMP | BPF_CALL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11293) insn->src_reg != BPF_PSEUDO_CALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11294) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11295) insn->off = env->insn_aux_data[i].call_imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11296) subprog = find_subprog(env, i + insn->off + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11297) insn->imm = subprog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11300) prog->jited = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11301) prog->bpf_func = func[0]->bpf_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11302) prog->aux->func = func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11303) prog->aux->func_cnt = env->subprog_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11304) bpf_prog_free_unused_jited_linfo(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11305) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11306) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11307) for (i = 0; i < env->subprog_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11308) if (!func[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11309) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11311) for (j = 0; j < func[i]->aux->size_poke_tab; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11312) map_ptr = func[i]->aux->poke_tab[j].tail_call.map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11313) map_ptr->ops->map_poke_untrack(map_ptr, func[i]->aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11315) bpf_jit_free(func[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11317) kfree(func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11318) out_undo_insn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11319) /* cleanup main prog to be interpreted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11320) prog->jit_requested = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11321) for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11322) if (insn->code != (BPF_JMP | BPF_CALL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11323) insn->src_reg != BPF_PSEUDO_CALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11324) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11325) insn->off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11326) insn->imm = env->insn_aux_data[i].call_imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11328) bpf_prog_free_jited_linfo(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11329) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11332) static int fixup_call_args(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11334) #ifndef CONFIG_BPF_JIT_ALWAYS_ON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11335) struct bpf_prog *prog = env->prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11336) struct bpf_insn *insn = prog->insnsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11337) int i, depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11338) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11339) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11341) if (env->prog->jit_requested &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11342) !bpf_prog_is_dev_bound(env->prog->aux)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11343) err = jit_subprogs(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11344) if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11345) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11346) if (err == -EFAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11347) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11349) #ifndef CONFIG_BPF_JIT_ALWAYS_ON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11350) if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11351) /* When JIT fails the progs with bpf2bpf calls and tail_calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11352) * have to be rejected, since interpreter doesn't support them yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11354) verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11355) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11357) for (i = 0; i < prog->len; i++, insn++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11358) if (insn->code != (BPF_JMP | BPF_CALL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11359) insn->src_reg != BPF_PSEUDO_CALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11360) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11361) depth = get_callee_stack_depth(env, insn, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11362) if (depth < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11363) return depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11364) bpf_patch_call_args(insn, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11366) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11367) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11368) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11371) /* fixup insn->imm field of bpf_call instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11372) * and inline eligible helpers as explicit sequence of BPF instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11373) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11374) * this function is called after eBPF program passed verification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11375) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11376) static int fixup_bpf_calls(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11378) struct bpf_prog *prog = env->prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11379) bool expect_blinding = bpf_jit_blinding_enabled(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11380) struct bpf_insn *insn = prog->insnsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11381) const struct bpf_func_proto *fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11382) const int insn_cnt = prog->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11383) const struct bpf_map_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11384) struct bpf_insn_aux_data *aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11385) struct bpf_insn insn_buf[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11386) struct bpf_prog *new_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11387) struct bpf_map *map_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11388) int i, ret, cnt, delta = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11390) for (i = 0; i < insn_cnt; i++, insn++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11391) if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11392) insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11393) insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11394) insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11395) bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11396) bool isdiv = BPF_OP(insn->code) == BPF_DIV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11397) struct bpf_insn *patchlet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11398) struct bpf_insn chk_and_div[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11399) /* [R,W]x div 0 -> 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11400) BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11401) BPF_JNE | BPF_K, insn->src_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11402) 0, 2, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11403) BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11404) BPF_JMP_IMM(BPF_JA, 0, 0, 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11405) *insn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11406) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11407) struct bpf_insn chk_and_mod[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11408) /* [R,W]x mod 0 -> [R,W]x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11409) BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11410) BPF_JEQ | BPF_K, insn->src_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11411) 0, 1 + (is64 ? 0 : 1), 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11412) *insn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11413) BPF_JMP_IMM(BPF_JA, 0, 0, 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11414) BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11415) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11417) patchlet = isdiv ? chk_and_div : chk_and_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11418) cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11419) ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11421) new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11422) if (!new_prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11423) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11425) delta += cnt - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11426) env->prog = prog = new_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11427) insn = new_prog->insnsi + i + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11428) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11431) if (BPF_CLASS(insn->code) == BPF_LD &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11432) (BPF_MODE(insn->code) == BPF_ABS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11433) BPF_MODE(insn->code) == BPF_IND)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11434) cnt = env->ops->gen_ld_abs(insn, insn_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11435) if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11436) verbose(env, "bpf verifier is misconfigured\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11437) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11440) new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11441) if (!new_prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11442) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11444) delta += cnt - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11445) env->prog = prog = new_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11446) insn = new_prog->insnsi + i + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11447) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11450) if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11451) insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11452) const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11453) const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11454) struct bpf_insn insn_buf[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11455) struct bpf_insn *patch = &insn_buf[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11456) bool issrc, isneg, isimm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11457) u32 off_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11459) aux = &env->insn_aux_data[i + delta];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11460) if (!aux->alu_state ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11461) aux->alu_state == BPF_ALU_NON_POINTER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11462) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11464) isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11465) issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11466) BPF_ALU_SANITIZE_SRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11467) isimm = aux->alu_state & BPF_ALU_IMMEDIATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11469) off_reg = issrc ? insn->src_reg : insn->dst_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11470) if (isimm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11471) *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11472) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11473) if (isneg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11474) *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11475) *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11476) *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11477) *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11478) *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11479) *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11480) *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11482) if (!issrc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11483) *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11484) insn->src_reg = BPF_REG_AX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11485) if (isneg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11486) insn->code = insn->code == code_add ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11487) code_sub : code_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11488) *patch++ = *insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11489) if (issrc && isneg && !isimm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11490) *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11491) cnt = patch - insn_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11493) new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11494) if (!new_prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11495) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11497) delta += cnt - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11498) env->prog = prog = new_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11499) insn = new_prog->insnsi + i + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11500) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11503) if (insn->code != (BPF_JMP | BPF_CALL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11504) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11505) if (insn->src_reg == BPF_PSEUDO_CALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11506) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11508) if (insn->imm == BPF_FUNC_get_route_realm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11509) prog->dst_needed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11510) if (insn->imm == BPF_FUNC_get_prandom_u32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11511) bpf_user_rnd_init_once();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11512) if (insn->imm == BPF_FUNC_override_return)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11513) prog->kprobe_override = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11514) if (insn->imm == BPF_FUNC_tail_call) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11515) /* If we tail call into other programs, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11516) * cannot make any assumptions since they can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11517) * be replaced dynamically during runtime in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11518) * the program array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11519) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11520) prog->cb_access = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11521) if (!allow_tail_call_in_subprogs(env))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11522) prog->aux->stack_depth = MAX_BPF_STACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11523) prog->aux->max_pkt_offset = MAX_PACKET_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11525) /* mark bpf_tail_call as different opcode to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11526) * conditional branch in the interpeter for every normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11527) * call and to prevent accidental JITing by JIT compiler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11528) * that doesn't support bpf_tail_call yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11529) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11530) insn->imm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11531) insn->code = BPF_JMP | BPF_TAIL_CALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11533) aux = &env->insn_aux_data[i + delta];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11534) if (env->bpf_capable && !expect_blinding &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11535) prog->jit_requested &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11536) !bpf_map_key_poisoned(aux) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11537) !bpf_map_ptr_poisoned(aux) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11538) !bpf_map_ptr_unpriv(aux)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11539) struct bpf_jit_poke_descriptor desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11540) .reason = BPF_POKE_REASON_TAIL_CALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11541) .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11542) .tail_call.key = bpf_map_key_immediate(aux),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11543) .insn_idx = i + delta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11544) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11546) ret = bpf_jit_add_poke_descriptor(prog, &desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11547) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11548) verbose(env, "adding tail call poke descriptor failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11549) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11552) insn->imm = ret + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11553) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11556) if (!bpf_map_ptr_unpriv(aux))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11557) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11559) /* instead of changing every JIT dealing with tail_call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11560) * emit two extra insns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11561) * if (index >= max_entries) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11562) * index &= array->index_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11563) * to avoid out-of-bounds cpu speculation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11564) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11565) if (bpf_map_ptr_poisoned(aux)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11566) verbose(env, "tail_call abusing map_ptr\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11567) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11570) map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11571) insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11572) map_ptr->max_entries, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11573) insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11574) container_of(map_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11575) struct bpf_array,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11576) map)->index_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11577) insn_buf[2] = *insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11578) cnt = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11579) new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11580) if (!new_prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11581) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11583) delta += cnt - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11584) env->prog = prog = new_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11585) insn = new_prog->insnsi + i + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11586) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11589) /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11590) * and other inlining handlers are currently limited to 64 bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11591) * only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11592) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11593) if (prog->jit_requested && BITS_PER_LONG == 64 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11594) (insn->imm == BPF_FUNC_map_lookup_elem ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11595) insn->imm == BPF_FUNC_map_update_elem ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11596) insn->imm == BPF_FUNC_map_delete_elem ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11597) insn->imm == BPF_FUNC_map_push_elem ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11598) insn->imm == BPF_FUNC_map_pop_elem ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11599) insn->imm == BPF_FUNC_map_peek_elem)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11600) aux = &env->insn_aux_data[i + delta];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11601) if (bpf_map_ptr_poisoned(aux))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11602) goto patch_call_imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11604) map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11605) ops = map_ptr->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11606) if (insn->imm == BPF_FUNC_map_lookup_elem &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11607) ops->map_gen_lookup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11608) cnt = ops->map_gen_lookup(map_ptr, insn_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11609) if (cnt == -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11610) goto patch_map_ops_generic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11611) if (cnt <= 0 || cnt >= ARRAY_SIZE(insn_buf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11612) verbose(env, "bpf verifier is misconfigured\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11613) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11616) new_prog = bpf_patch_insn_data(env, i + delta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11617) insn_buf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11618) if (!new_prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11619) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11621) delta += cnt - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11622) env->prog = prog = new_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11623) insn = new_prog->insnsi + i + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11624) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11627) BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11628) (void *(*)(struct bpf_map *map, void *key))NULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11629) BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11630) (int (*)(struct bpf_map *map, void *key))NULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11631) BUILD_BUG_ON(!__same_type(ops->map_update_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11632) (int (*)(struct bpf_map *map, void *key, void *value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11633) u64 flags))NULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11634) BUILD_BUG_ON(!__same_type(ops->map_push_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11635) (int (*)(struct bpf_map *map, void *value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11636) u64 flags))NULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11637) BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11638) (int (*)(struct bpf_map *map, void *value))NULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11639) BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11640) (int (*)(struct bpf_map *map, void *value))NULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11641) patch_map_ops_generic:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11642) switch (insn->imm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11643) case BPF_FUNC_map_lookup_elem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11644) insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11645) __bpf_call_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11646) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11647) case BPF_FUNC_map_update_elem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11648) insn->imm = BPF_CAST_CALL(ops->map_update_elem) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11649) __bpf_call_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11650) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11651) case BPF_FUNC_map_delete_elem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11652) insn->imm = BPF_CAST_CALL(ops->map_delete_elem) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11653) __bpf_call_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11654) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11655) case BPF_FUNC_map_push_elem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11656) insn->imm = BPF_CAST_CALL(ops->map_push_elem) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11657) __bpf_call_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11658) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11659) case BPF_FUNC_map_pop_elem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11660) insn->imm = BPF_CAST_CALL(ops->map_pop_elem) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11661) __bpf_call_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11662) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11663) case BPF_FUNC_map_peek_elem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11664) insn->imm = BPF_CAST_CALL(ops->map_peek_elem) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11665) __bpf_call_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11666) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11669) goto patch_call_imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11672) if (prog->jit_requested && BITS_PER_LONG == 64 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11673) insn->imm == BPF_FUNC_jiffies64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11674) struct bpf_insn ld_jiffies_addr[2] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11675) BPF_LD_IMM64(BPF_REG_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11676) (unsigned long)&jiffies),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11677) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11679) insn_buf[0] = ld_jiffies_addr[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11680) insn_buf[1] = ld_jiffies_addr[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11681) insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11682) BPF_REG_0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11683) cnt = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11685) new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11686) cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11687) if (!new_prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11688) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11690) delta += cnt - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11691) env->prog = prog = new_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11692) insn = new_prog->insnsi + i + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11693) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11696) patch_call_imm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11697) fn = env->ops->get_func_proto(insn->imm, env->prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11698) /* all functions that have prototype and verifier allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11699) * programs to call them, must be real in-kernel functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11700) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11701) if (!fn->func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11702) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11703) "kernel subsystem misconfigured func %s#%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11704) func_id_name(insn->imm), insn->imm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11705) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11707) insn->imm = fn->func - __bpf_call_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11710) /* Since poke tab is now finalized, publish aux to tracker. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11711) for (i = 0; i < prog->aux->size_poke_tab; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11712) map_ptr = prog->aux->poke_tab[i].tail_call.map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11713) if (!map_ptr->ops->map_poke_track ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11714) !map_ptr->ops->map_poke_untrack ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11715) !map_ptr->ops->map_poke_run) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11716) verbose(env, "bpf verifier is misconfigured\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11717) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11720) ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11721) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11722) verbose(env, "tracking tail call prog failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11723) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11727) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11730) static void free_states(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11732) struct bpf_verifier_state_list *sl, *sln;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11733) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11735) sl = env->free_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11736) while (sl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11737) sln = sl->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11738) free_verifier_state(&sl->state, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11739) kfree(sl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11740) sl = sln;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11742) env->free_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11744) if (!env->explored_states)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11745) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11747) for (i = 0; i < state_htab_size(env); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11748) sl = env->explored_states[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11750) while (sl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11751) sln = sl->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11752) free_verifier_state(&sl->state, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11753) kfree(sl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11754) sl = sln;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11756) env->explored_states[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11760) static int do_check_common(struct bpf_verifier_env *env, int subprog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11762) bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11763) struct bpf_verifier_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11764) struct bpf_reg_state *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11765) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11767) env->prev_linfo = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11768) env->pass_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11770) state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11771) if (!state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11772) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11773) state->curframe = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11774) state->speculative = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11775) state->branches = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11776) state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11777) if (!state->frame[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11778) kfree(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11779) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11781) env->cur_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11782) init_func_state(env, state->frame[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11783) BPF_MAIN_FUNC /* callsite */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11784) 0 /* frameno */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11785) subprog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11787) regs = state->frame[state->curframe]->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11788) if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11789) ret = btf_prepare_func_args(env, subprog, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11790) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11791) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11792) for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11793) if (regs[i].type == PTR_TO_CTX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11794) mark_reg_known_zero(env, regs, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11795) else if (regs[i].type == SCALAR_VALUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11796) mark_reg_unknown(env, regs, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11798) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11799) /* 1st arg to a function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11800) regs[BPF_REG_1].type = PTR_TO_CTX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11801) mark_reg_known_zero(env, regs, BPF_REG_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11802) ret = btf_check_func_arg_match(env, subprog, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11803) if (ret == -EFAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11804) /* unlikely verifier bug. abort.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11805) * ret == 0 and ret < 0 are sadly acceptable for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11806) * main() function due to backward compatibility.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11807) * Like socket filter program may be written as:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11808) * int bpf_prog(struct pt_regs *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11809) * and never dereference that ctx in the program.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11810) * 'struct pt_regs' is a type mismatch for socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11811) * filter that should be using 'struct __sk_buff'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11812) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11813) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11816) ret = do_check(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11817) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11818) /* check for NULL is necessary, since cur_state can be freed inside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11819) * do_check() under memory pressure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11820) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11821) if (env->cur_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11822) free_verifier_state(env->cur_state, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11823) env->cur_state = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11825) while (!pop_stack(env, NULL, NULL, false));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11826) if (!ret && pop_log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11827) bpf_vlog_reset(&env->log, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11828) free_states(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11829) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11832) /* Verify all global functions in a BPF program one by one based on their BTF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11833) * All global functions must pass verification. Otherwise the whole program is rejected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11834) * Consider:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11835) * int bar(int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11836) * int foo(int f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11837) * {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11838) * return bar(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11839) * }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11840) * int bar(int b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11841) * {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11842) * ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11843) * }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11844) * foo() will be verified first for R1=any_scalar_value. During verification it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11845) * will be assumed that bar() already verified successfully and call to bar()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11846) * from foo() will be checked for type match only. Later bar() will be verified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11847) * independently to check that it's safe for R1=any_scalar_value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11848) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11849) static int do_check_subprogs(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11851) struct bpf_prog_aux *aux = env->prog->aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11852) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11854) if (!aux->func_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11855) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11857) for (i = 1; i < env->subprog_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11858) if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11859) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11860) env->insn_idx = env->subprog_info[i].start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11861) WARN_ON_ONCE(env->insn_idx == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11862) ret = do_check_common(env, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11863) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11864) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11865) } else if (env->log.level & BPF_LOG_LEVEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11866) verbose(env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11867) "Func#%d is safe for any args that match its prototype\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11868) i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11871) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11874) static int do_check_main(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11876) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11878) env->insn_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11879) ret = do_check_common(env, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11880) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11881) env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11882) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11886) static void print_verification_stats(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11888) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11890) if (env->log.level & BPF_LOG_STATS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11891) verbose(env, "verification time %lld usec\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11892) div_u64(env->verification_time, 1000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11893) verbose(env, "stack depth ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11894) for (i = 0; i < env->subprog_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11895) u32 depth = env->subprog_info[i].stack_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11897) verbose(env, "%d", depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11898) if (i + 1 < env->subprog_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11899) verbose(env, "+");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11901) verbose(env, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11903) verbose(env, "processed %d insns (limit %d) max_states_per_insn %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11904) "total_states %d peak_states %d mark_read %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11905) env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11906) env->max_states_per_insn, env->total_states,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11907) env->peak_states, env->longest_mark_read_walk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11910) static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11912) const struct btf_type *t, *func_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11913) const struct bpf_struct_ops *st_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11914) const struct btf_member *member;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11915) struct bpf_prog *prog = env->prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11916) u32 btf_id, member_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11917) const char *mname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11919) if (!prog->gpl_compatible) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11920) verbose(env, "struct ops programs must have a GPL compatible license\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11921) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11924) btf_id = prog->aux->attach_btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11925) st_ops = bpf_struct_ops_find(btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11926) if (!st_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11927) verbose(env, "attach_btf_id %u is not a supported struct\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11928) btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11929) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11932) t = st_ops->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11933) member_idx = prog->expected_attach_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11934) if (member_idx >= btf_type_vlen(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11935) verbose(env, "attach to invalid member idx %u of struct %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11936) member_idx, st_ops->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11937) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11940) member = &btf_type_member(t)[member_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11941) mname = btf_name_by_offset(btf_vmlinux, member->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11942) func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11943) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11944) if (!func_proto) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11945) verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11946) mname, member_idx, st_ops->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11947) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11950) if (st_ops->check_member) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11951) int err = st_ops->check_member(t, member);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11953) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11954) verbose(env, "attach to unsupported member %s of struct %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11955) mname, st_ops->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11956) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11960) prog->aux->attach_func_proto = func_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11961) prog->aux->attach_func_name = mname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11962) env->ops = st_ops->verifier_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11964) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11966) #define SECURITY_PREFIX "security_"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11968) static int check_attach_modify_return(unsigned long addr, const char *func_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11970) if (within_error_injection_list(addr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11971) !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11972) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11974) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11977) /* non exhaustive list of sleepable bpf_lsm_*() functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11978) BTF_SET_START(btf_sleepable_lsm_hooks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11979) #ifdef CONFIG_BPF_LSM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11980) BTF_ID(func, bpf_lsm_bprm_committed_creds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11981) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11982) BTF_ID_UNUSED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11983) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11984) BTF_SET_END(btf_sleepable_lsm_hooks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11986) static int check_sleepable_lsm_hook(u32 btf_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11988) return btf_id_set_contains(&btf_sleepable_lsm_hooks, btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11991) /* list of non-sleepable functions that are otherwise on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11992) * ALLOW_ERROR_INJECTION list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11993) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11994) BTF_SET_START(btf_non_sleepable_error_inject)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11995) /* Three functions below can be called from sleepable and non-sleepable context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11996) * Assume non-sleepable from bpf safety point of view.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11997) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11998) BTF_ID(func, __add_to_page_cache_locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11999) BTF_ID(func, should_fail_alloc_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12000) BTF_ID(func, should_failslab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12001) BTF_SET_END(btf_non_sleepable_error_inject)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12003) static int check_non_sleepable_error_inject(u32 btf_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12005) return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12008) int bpf_check_attach_target(struct bpf_verifier_log *log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12009) const struct bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12010) const struct bpf_prog *tgt_prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12011) u32 btf_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12012) struct bpf_attach_target_info *tgt_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12014) bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12015) const char prefix[] = "btf_trace_";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12016) int ret = 0, subprog = -1, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12017) const struct btf_type *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12018) bool conservative = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12019) const char *tname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12020) struct btf *btf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12021) long addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12023) if (!btf_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12024) bpf_log(log, "Tracing programs must provide btf_id\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12025) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12027) btf = tgt_prog ? tgt_prog->aux->btf : btf_vmlinux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12028) if (!btf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12029) bpf_log(log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12030) "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12031) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12033) t = btf_type_by_id(btf, btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12034) if (!t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12035) bpf_log(log, "attach_btf_id %u is invalid\n", btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12036) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12038) tname = btf_name_by_offset(btf, t->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12039) if (!tname) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12040) bpf_log(log, "attach_btf_id %u doesn't have a name\n", btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12041) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12043) if (tgt_prog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12044) struct bpf_prog_aux *aux = tgt_prog->aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12046) for (i = 0; i < aux->func_info_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12047) if (aux->func_info[i].type_id == btf_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12048) subprog = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12049) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12051) if (subprog == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12052) bpf_log(log, "Subprog %s doesn't exist\n", tname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12053) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12055) conservative = aux->func_info_aux[subprog].unreliable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12056) if (prog_extension) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12057) if (conservative) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12058) bpf_log(log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12059) "Cannot replace static functions\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12060) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12062) if (!prog->jit_requested) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12063) bpf_log(log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12064) "Extension programs should be JITed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12065) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12068) if (!tgt_prog->jited) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12069) bpf_log(log, "Can attach to only JITed progs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12070) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12072) if (tgt_prog->type == prog->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12073) /* Cannot fentry/fexit another fentry/fexit program.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12074) * Cannot attach program extension to another extension.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12075) * It's ok to attach fentry/fexit to extension program.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12076) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12077) bpf_log(log, "Cannot recursively attach\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12078) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12080) if (tgt_prog->type == BPF_PROG_TYPE_TRACING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12081) prog_extension &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12082) (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12083) tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12084) /* Program extensions can extend all program types
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12085) * except fentry/fexit. The reason is the following.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12086) * The fentry/fexit programs are used for performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12087) * analysis, stats and can be attached to any program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12088) * type except themselves. When extension program is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12089) * replacing XDP function it is necessary to allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12090) * performance analysis of all functions. Both original
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12091) * XDP program and its program extension. Hence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12092) * attaching fentry/fexit to BPF_PROG_TYPE_EXT is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12093) * allowed. If extending of fentry/fexit was allowed it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12094) * would be possible to create long call chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12095) * fentry->extension->fentry->extension beyond
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12096) * reasonable stack size. Hence extending fentry is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12097) * allowed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12098) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12099) bpf_log(log, "Cannot extend fentry/fexit\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12100) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12102) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12103) if (prog_extension) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12104) bpf_log(log, "Cannot replace kernel functions\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12105) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12109) switch (prog->expected_attach_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12110) case BPF_TRACE_RAW_TP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12111) if (tgt_prog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12112) bpf_log(log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12113) "Only FENTRY/FEXIT progs are attachable to another BPF prog\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12114) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12116) if (!btf_type_is_typedef(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12117) bpf_log(log, "attach_btf_id %u is not a typedef\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12118) btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12119) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12121) if (strncmp(prefix, tname, sizeof(prefix) - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12122) bpf_log(log, "attach_btf_id %u points to wrong type name %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12123) btf_id, tname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12124) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12126) tname += sizeof(prefix) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12127) t = btf_type_by_id(btf, t->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12128) if (!btf_type_is_ptr(t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12129) /* should never happen in valid vmlinux build */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12130) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12131) t = btf_type_by_id(btf, t->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12132) if (!btf_type_is_func_proto(t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12133) /* should never happen in valid vmlinux build */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12134) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12136) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12137) case BPF_TRACE_ITER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12138) if (!btf_type_is_func(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12139) bpf_log(log, "attach_btf_id %u is not a function\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12140) btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12141) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12143) t = btf_type_by_id(btf, t->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12144) if (!btf_type_is_func_proto(t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12145) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12146) ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12147) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12148) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12149) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12150) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12151) if (!prog_extension)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12152) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12153) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12154) case BPF_MODIFY_RETURN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12155) case BPF_LSM_MAC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12156) case BPF_TRACE_FENTRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12157) case BPF_TRACE_FEXIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12158) if (!btf_type_is_func(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12159) bpf_log(log, "attach_btf_id %u is not a function\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12160) btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12161) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12163) if (prog_extension &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12164) btf_check_type_match(log, prog, btf, t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12165) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12166) t = btf_type_by_id(btf, t->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12167) if (!btf_type_is_func_proto(t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12168) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12170) if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12171) (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12172) prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12173) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12175) if (tgt_prog && conservative)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12176) t = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12178) ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12179) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12180) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12182) if (tgt_prog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12183) if (subprog == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12184) addr = (long) tgt_prog->bpf_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12185) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12186) addr = (long) tgt_prog->aux->func[subprog]->bpf_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12187) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12188) addr = kallsyms_lookup_name(tname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12189) if (!addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12190) bpf_log(log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12191) "The address of function %s cannot be found\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12192) tname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12193) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12197) if (prog->aux->sleepable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12198) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12199) switch (prog->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12200) case BPF_PROG_TYPE_TRACING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12201) /* fentry/fexit/fmod_ret progs can be sleepable only if they are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12202) * attached to ALLOW_ERROR_INJECTION and are not in denylist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12204) if (!check_non_sleepable_error_inject(btf_id) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12205) within_error_injection_list(addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12206) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12207) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12208) case BPF_PROG_TYPE_LSM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12209) /* LSM progs check that they are attached to bpf_lsm_*() funcs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12210) * Only some of them are sleepable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12212) if (check_sleepable_lsm_hook(btf_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12213) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12214) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12215) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12216) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12218) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12219) bpf_log(log, "%s is not sleepable\n", tname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12220) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12222) } else if (prog->expected_attach_type == BPF_MODIFY_RETURN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12223) if (tgt_prog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12224) bpf_log(log, "can't modify return codes of BPF programs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12225) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12227) ret = check_attach_modify_return(addr, tname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12228) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12229) bpf_log(log, "%s() is not modifiable\n", tname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12230) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12234) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12236) tgt_info->tgt_addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12237) tgt_info->tgt_name = tname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12238) tgt_info->tgt_type = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12239) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12242) static int check_attach_btf_id(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12244) struct bpf_prog *prog = env->prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12245) struct bpf_prog *tgt_prog = prog->aux->dst_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12246) struct bpf_attach_target_info tgt_info = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12247) u32 btf_id = prog->aux->attach_btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12248) struct bpf_trampoline *tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12249) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12250) u64 key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12252) if (prog->aux->sleepable && prog->type != BPF_PROG_TYPE_TRACING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12253) prog->type != BPF_PROG_TYPE_LSM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12254) verbose(env, "Only fentry/fexit/fmod_ret and lsm programs can be sleepable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12255) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12258) if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12259) return check_struct_ops_btf_id(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12261) if (prog->type != BPF_PROG_TYPE_TRACING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12262) prog->type != BPF_PROG_TYPE_LSM &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12263) prog->type != BPF_PROG_TYPE_EXT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12264) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12266) ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12267) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12268) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12270) if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12271) /* to make freplace equivalent to their targets, they need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12272) * inherit env->ops and expected_attach_type for the rest of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12273) * verification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12274) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12275) env->ops = bpf_verifier_ops[tgt_prog->type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12276) prog->expected_attach_type = tgt_prog->expected_attach_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12279) /* store info about the attachment target that will be used later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12280) prog->aux->attach_func_proto = tgt_info.tgt_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12281) prog->aux->attach_func_name = tgt_info.tgt_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12283) if (tgt_prog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12284) prog->aux->saved_dst_prog_type = tgt_prog->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12285) prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12288) if (prog->expected_attach_type == BPF_TRACE_RAW_TP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12289) prog->aux->attach_btf_trace = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12290) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12291) } else if (prog->expected_attach_type == BPF_TRACE_ITER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12292) if (!bpf_iter_prog_supported(prog))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12293) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12294) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12297) if (prog->type == BPF_PROG_TYPE_LSM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12298) ret = bpf_lsm_verify_prog(&env->log, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12299) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12300) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12303) key = bpf_trampoline_compute_key(tgt_prog, btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12304) tr = bpf_trampoline_get(key, &tgt_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12305) if (!tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12306) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12308) prog->aux->dst_trampoline = tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12309) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12312) struct btf *bpf_get_btf_vmlinux(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12314) if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12315) mutex_lock(&bpf_verifier_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12316) if (!btf_vmlinux)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12317) btf_vmlinux = btf_parse_vmlinux();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12318) mutex_unlock(&bpf_verifier_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12320) return btf_vmlinux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12323) int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12324) union bpf_attr __user *uattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12326) u64 start_time = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12327) struct bpf_verifier_env *env;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12328) struct bpf_verifier_log *log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12329) int i, len, ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12330) bool is_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12332) /* no program is valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12333) if (ARRAY_SIZE(bpf_verifier_ops) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12334) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12336) /* 'struct bpf_verifier_env' can be global, but since it's not small,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12337) * allocate/free it every time bpf_check() is called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12338) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12339) env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12340) if (!env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12341) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12342) log = &env->log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12344) len = (*prog)->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12345) env->insn_aux_data =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12346) vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12347) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12348) if (!env->insn_aux_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12349) goto err_free_env;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12350) for (i = 0; i < len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12351) env->insn_aux_data[i].orig_idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12352) env->prog = *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12353) env->ops = bpf_verifier_ops[env->prog->type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12354) is_priv = bpf_capable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12356) bpf_get_btf_vmlinux();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12358) /* grab the mutex to protect few globals used by verifier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12359) if (!is_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12360) mutex_lock(&bpf_verifier_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12362) if (attr->log_level || attr->log_buf || attr->log_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12363) /* user requested verbose verifier output
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12364) * and supplied buffer to store the verification trace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12365) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12366) log->level = attr->log_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12367) log->ubuf = (char __user *) (unsigned long) attr->log_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12368) log->len_total = attr->log_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12370) /* log attributes have to be sane */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12371) if (!bpf_verifier_log_attr_valid(log)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12372) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12373) goto err_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12377) if (IS_ERR(btf_vmlinux)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12378) /* Either gcc or pahole or kernel are broken. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12379) verbose(env, "in-kernel BTF is malformed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12380) ret = PTR_ERR(btf_vmlinux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12381) goto skip_full_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12384) env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12385) if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12386) env->strict_alignment = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12387) if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12388) env->strict_alignment = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12390) env->allow_ptr_leaks = bpf_allow_ptr_leaks();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12391) env->allow_uninit_stack = bpf_allow_uninit_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12392) env->allow_ptr_to_map_access = bpf_allow_ptr_to_map_access();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12393) env->bypass_spec_v1 = bpf_bypass_spec_v1();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12394) env->bypass_spec_v4 = bpf_bypass_spec_v4();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12395) env->bpf_capable = bpf_capable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12397) if (is_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12398) env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12400) env->explored_states = kvcalloc(state_htab_size(env),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12401) sizeof(struct bpf_verifier_state_list *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12402) GFP_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12403) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12404) if (!env->explored_states)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12405) goto skip_full_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12407) ret = check_subprogs(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12408) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12409) goto skip_full_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12411) ret = check_btf_info(env, attr, uattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12412) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12413) goto skip_full_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12415) ret = check_attach_btf_id(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12416) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12417) goto skip_full_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12419) ret = resolve_pseudo_ldimm64(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12420) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12421) goto skip_full_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12423) if (bpf_prog_is_dev_bound(env->prog->aux)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12424) ret = bpf_prog_offload_verifier_prep(env->prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12425) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12426) goto skip_full_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12429) ret = check_cfg(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12430) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12431) goto skip_full_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12433) ret = do_check_subprogs(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12434) ret = ret ?: do_check_main(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12436) if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12437) ret = bpf_prog_offload_finalize(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12439) skip_full_check:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12440) kvfree(env->explored_states);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12442) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12443) ret = check_max_stack_depth(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12445) /* instruction rewrites happen after this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12446) if (is_priv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12447) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12448) opt_hard_wire_dead_code_branches(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12449) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12450) ret = opt_remove_dead_code(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12451) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12452) ret = opt_remove_nops(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12453) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12454) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12455) sanitize_dead_code(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12458) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12459) /* program is valid, convert *(u32*)(ctx + off) accesses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12460) ret = convert_ctx_accesses(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12462) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12463) ret = fixup_bpf_calls(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12465) /* do 32-bit optimization after insn patching has done so those patched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12466) * insns could be handled correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12467) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12468) if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12469) ret = opt_subreg_zext_lo32_rnd_hi32(env, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12470) env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12471) : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12474) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12475) ret = fixup_call_args(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12477) env->verification_time = ktime_get_ns() - start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12478) print_verification_stats(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12480) if (log->level && bpf_verifier_log_full(log))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12481) ret = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12482) if (log->level && !log->ubuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12483) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12484) goto err_release_maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12487) if (ret == 0 && env->used_map_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12488) /* if program passed verifier, update used_maps in bpf_prog_info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12489) env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12490) sizeof(env->used_maps[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12491) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12493) if (!env->prog->aux->used_maps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12494) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12495) goto err_release_maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12498) memcpy(env->prog->aux->used_maps, env->used_maps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12499) sizeof(env->used_maps[0]) * env->used_map_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12500) env->prog->aux->used_map_cnt = env->used_map_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12502) /* program is valid. Convert pseudo bpf_ld_imm64 into generic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12503) * bpf_ld_imm64 instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12504) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12505) convert_pseudo_ld_imm64(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12508) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12509) adjust_btf_func(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12511) err_release_maps:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12512) if (!env->prog->aux->used_maps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12513) /* if we didn't copy map pointers into bpf_prog_info, release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12514) * them now. Otherwise free_used_maps() will release them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12515) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12516) release_maps(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12518) /* extension progs temporarily inherit the attach_type of their targets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12519) for verification purposes, so set it back to zero before returning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12520) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12521) if (env->prog->type == BPF_PROG_TYPE_EXT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12522) env->prog->expected_attach_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12524) *prog = env->prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12525) err_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12526) if (!is_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12527) mutex_unlock(&bpf_verifier_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12528) vfree(env->insn_aux_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12529) err_free_env:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12530) kfree(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12531) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12532) }