^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef __BPF_HELPERS__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define __BPF_HELPERS__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Note that bpf programs need to include either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * vmlinux.h (auto-generated from BTF) or linux/types.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * in advance since bpf_helper_defs.h uses such types
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * as __u64.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "bpf_helper_defs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define __uint(name, val) int (*name)[val]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define __type(name, val) typeof(val) *name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define __array(name, val) typeof(val) *name[]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /* Helper macro to print out debug messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define bpf_printk(fmt, ...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) char ____fmt[] = fmt; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) bpf_trace_printk(____fmt, sizeof(____fmt), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) ##__VA_ARGS__); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * Helper macro to place programs, maps, license in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * different sections in elf_bpf file. Section names
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * are interpreted by elf_bpf loader
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define SEC(NAME) __attribute__((section(NAME), used))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #ifndef __always_inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define __always_inline __attribute__((always_inline))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #ifndef __noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define __noinline __attribute__((noinline))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #ifndef __weak
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define __weak __attribute__((weak))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * Helper macro to manipulate data structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #ifndef offsetof
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #ifndef container_of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define container_of(ptr, type, member) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) void *__mptr = (void *)(ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) ((type *)(__mptr - offsetof(type, member))); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * Helper macro to throw a compilation error if __bpf_unreachable() gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * built into the resulting code. This works given BPF back end does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * implement __builtin_trap(). This is useful to assert that certain paths
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * of the program code are never used and hence eliminated by the compiler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * For example, consider a switch statement that covers known cases used by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * the program. __bpf_unreachable() can then reside in the default case. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * the program gets extended such that a case is not covered in the switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * statement, then it will throw a build error due to the default case not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * being compiled out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #ifndef __bpf_unreachable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) # define __bpf_unreachable() __builtin_trap()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * Helper function to perform a tail call with a constant/immediate map slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #if __clang_major__ >= 8 && defined(__bpf__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static __always_inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (!__builtin_constant_p(slot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) __bpf_unreachable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * Provide a hard guarantee that LLVM won't optimize setting r2 (map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * pointer) and r3 (constant map index) from _different paths_ ending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * up at the _same_ call insn as otherwise we won't be able to use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * jmpq/nopl retpoline-free patching by the x86-64 JIT in the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * given they mismatch. See also d2e4c1e6c294 ("bpf: Constant map key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * tracking for prog array pokes") for details on verifier tracking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * Note on clobber list: we need to stay in-line with BPF calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * convention, so even if we don't end up using r0, r4, r5, we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * to mark them as clobber so that LLVM doesn't end up using them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * before / after the call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) asm volatile("r1 = %[ctx]\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) "r2 = %[map]\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) "r3 = %[slot]\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) "call 12"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) :: [ctx]"r"(ctx), [map]"r"(map), [slot]"i"(slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) : "r0", "r1", "r2", "r3", "r4", "r5");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * Helper structure used by eBPF C program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * to describe BPF map attributes to libbpf loader
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct bpf_map_def {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) unsigned int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) unsigned int key_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) unsigned int value_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned int max_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) unsigned int map_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) enum libbpf_pin_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) LIBBPF_PIN_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) LIBBPF_PIN_BY_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) enum libbpf_tristate {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) TRI_NO = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) TRI_YES = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) TRI_MODULE = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define __kconfig __attribute__((section(".kconfig")))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define __ksym __attribute__((section(".ksyms")))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #endif