^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * bpf_jit64.h: BPF JIT compiler for PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #ifndef _BPF_JIT64_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define _BPF_JIT64_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "bpf_jit.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Stack layout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Ensure the top half (upto local_tmp_var) stays consistent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * with our redzone usage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * [ prev sp ] <-------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * [ nv gpr save area ] 5*8 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * [ tail_call_cnt ] 8 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * [ local_tmp_var ] 16 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * fp (r31) --> [ ebpf stack space ] upto 512 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * [ frame header ] 32/112 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * sp (r1) ---> [ stack pointer ] --------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /* for gpr non volatile registers BPG_REG_6 to 10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define BPF_PPC_STACK_SAVE (5*8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /* for bpf JIT code internal usage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define BPF_PPC_STACK_LOCALS 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /* stack frame excluding BPF stack, ensure this is quadword aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* BPF register usage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* BPF to ppc register mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static const int b2p[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* function return value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) [BPF_REG_0] = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* function arguments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) [BPF_REG_1] = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) [BPF_REG_2] = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) [BPF_REG_3] = 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) [BPF_REG_4] = 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) [BPF_REG_5] = 7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* non volatile registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) [BPF_REG_6] = 27,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) [BPF_REG_7] = 28,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) [BPF_REG_8] = 29,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) [BPF_REG_9] = 30,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* frame pointer aka BPF_REG_10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) [BPF_REG_FP] = 31,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /* eBPF jit internal registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) [BPF_REG_AX] = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) [TMP_REG_1] = 9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) [TMP_REG_2] = 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /* PPC NVR range -- update this if we ever use NVRs below r27 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define BPF_PPC_NVR_MIN 27
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * WARNING: These can use TMP_REG_2 if the offset is not at word boundary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * so ensure that it isn't in use already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define PPC_BPF_LL(r, base, i) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if ((i) % 4) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) EMIT(PPC_RAW_LI(b2p[TMP_REG_2], (i)));\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) EMIT(PPC_RAW_LDX(r, base, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) b2p[TMP_REG_2])); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) } else \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) EMIT(PPC_RAW_LD(r, base, i)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) } while(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define PPC_BPF_STL(r, base, i) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if ((i) % 4) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) EMIT(PPC_RAW_LI(b2p[TMP_REG_2], (i)));\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) EMIT(PPC_RAW_STDX(r, base, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) b2p[TMP_REG_2])); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) } else \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) EMIT(PPC_RAW_STD(r, base, i)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) } while(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define PPC_BPF_STLU(r, base, i) do { EMIT(PPC_RAW_STDU(r, base, i)); } while(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define SEEN_FUNC 0x1000 /* might call external helpers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define SEEN_STACK 0x2000 /* uses BPF stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define SEEN_TAILCALL 0x4000 /* uses tail calls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct codegen_context {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * This is used to track register usage as well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * as calls to external helpers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * - register usage is tracked with corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * bits (r3-r10 and r27-r31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * - rest of the bits can be used to track other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * things -- for now, we use bits 16 to 23
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * encoded in SEEN_* macros above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) unsigned int seen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) unsigned int stack_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #endif /* !__ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #endif