^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef __ASM_EXTABLE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define __ASM_EXTABLE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * The exception table consists of pairs of relative offsets: the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * is the relative offset to an instruction that is allowed to fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * and the second is the relative offset at which the program should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * continue. No registers are modified, so it is entirely up to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * continuation code to figure out what to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * All the routines below use bits of fixup code that are out of line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * with the main instruction path. This means when everything is well,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * we don't even have to jump over them. Further, they do not intrude
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * on our cache or tlb entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) struct exception_table_entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) int insn, fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define ARCH_HAS_RELATIVE_EXTABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static inline bool in_bpf_jit(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) if (!IS_ENABLED(CONFIG_BPF_JIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) return regs->pc >= BPF_JIT_REGION_START &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) regs->pc < BPF_JIT_REGION_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #ifdef CONFIG_BPF_JIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) int arm64_bpf_fixup_exception(const struct exception_table_entry *ex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct pt_regs *regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #else /* !CONFIG_BPF_JIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) int arm64_bpf_fixup_exception(const struct exception_table_entry *ex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #endif /* !CONFIG_BPF_JIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) extern int fixup_exception(struct pt_regs *regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #endif