Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  * Just-In-Time compiler for BPF filters on MIPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (c) 2014 Imagination Technologies Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Author: Markos Chandras <markos.chandras@imgtec.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * This program is free software; you can redistribute it and/or modify it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * under the terms of the GNU General Public License as published by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Free Software Foundation; version 2 of the License.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/filter.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/moduleloader.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <asm/asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <asm/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <asm/cpu-features.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <asm/uasm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include "bpf_jit.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) /* ABI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  * r_skb_hl	SKB header length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  * r_data	SKB data pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  * r_off	Offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  * r_A		BPF register A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  * r_X		BPF register X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  * r_skb	*skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  * r_M		*scratch memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  * r_skb_len	SKB length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  * On entry (*bpf_func)(*skb, *filter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  * a0 = MIPS_R_A0 = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  * a1 = MIPS_R_A1 = filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  * Stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  * ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  * M[15]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  * M[14]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  * M[13]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  * ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  * M[0] <-- r_M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  * saved reg k-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  * saved reg k-2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53)  * ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54)  * saved reg 0 <-- r_sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  * <no argument area>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57)  *                     Packet layout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59)  * <--------------------- len ------------------------>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60)  * <--skb-len(r_skb_hl)-->< ----- skb->data_len ------>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61)  * ----------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62)  * |                  skb->data                       |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63)  * ----------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #define ptr typeof(unsigned long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #define SCRATCH_OFF(k)		(4 * (k))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) /* JIT flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #define SEEN_CALL		(1 << BPF_MEMWORDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #define SEEN_SREG_SFT		(BPF_MEMWORDS + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #define SEEN_SREG_BASE		(1 << SEEN_SREG_SFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #define SEEN_SREG(x)		(SEEN_SREG_BASE << (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #define SEEN_OFF		SEEN_SREG(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #define SEEN_A			SEEN_SREG(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #define SEEN_X			SEEN_SREG(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #define SEEN_SKB		SEEN_SREG(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) #define SEEN_MEM		SEEN_SREG(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) /* SEEN_SK_DATA also implies skb_hl an skb_len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #define SEEN_SKB_DATA		(SEEN_SREG(7) | SEEN_SREG(1) | SEEN_SREG(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) /* Arguments used by JIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #define ARGS_USED_BY_JIT	2 /* only applicable to 64-bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) #define SBIT(x)			(1 << (x)) /* Signed version of BIT() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89)  * struct jit_ctx - JIT context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90)  * @skf:		The sk_filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91)  * @prologue_bytes:	Number of bytes for prologue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92)  * @idx:		Instruction index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93)  * @flags:		JIT flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94)  * @offsets:		Instruction offsets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95)  * @target:		Memory location for the compiled filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) struct jit_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	const struct bpf_prog *skf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	unsigned int prologue_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	u32 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	u32 *offsets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	u32 *target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) static inline int optimize_div(u32 *k)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	/* power of 2 divides can be implemented with right shift */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	if (!(*k & (*k-1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		*k = ilog2(*k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) /* Simply emit the instruction if the JIT memory space has been allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) #define emit_instr(ctx, func, ...)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) do {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	if ((ctx)->target != NULL) {			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 		u32 *p = &(ctx)->target[ctx->idx];	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		uasm_i_##func(&p, ##__VA_ARGS__);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	}						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	(ctx)->idx++;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131)  * Similar to emit_instr but it must be used when we need to emit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132)  * 32-bit or 64-bit instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) #define emit_long_instr(ctx, func, ...)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) do {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	if ((ctx)->target != NULL) {			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		u32 *p = &(ctx)->target[ctx->idx];	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		UASM_i_##func(&p, ##__VA_ARGS__);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	}						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	(ctx)->idx++;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) /* Determine if immediate is within the 16-bit signed range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) static inline bool is_range16(s32 imm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	return !(imm >= SBIT(15) || imm < -SBIT(15));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) static inline void emit_addu(unsigned int dst, unsigned int src1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 			     unsigned int src2, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	emit_instr(ctx, addu, dst, src1, src2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) static inline void emit_nop(struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	emit_instr(ctx, nop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) /* Load a u32 immediate to a register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	if (ctx->target != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 		/* addiu can only handle s16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 		if (!is_range16(imm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 			u32 *p = &ctx->target[ctx->idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 			uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 			p = &ctx->target[ctx->idx + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 			uasm_i_ori(&p, dst, r_tmp_imm, imm & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 			u32 *p = &ctx->target[ctx->idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 			uasm_i_addiu(&p, dst, r_zero, imm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	ctx->idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	if (!is_range16(imm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		ctx->idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) static inline void emit_or(unsigned int dst, unsigned int src1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 			   unsigned int src2, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	emit_instr(ctx, or, dst, src1, src2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) static inline void emit_ori(unsigned int dst, unsigned src, u32 imm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 			    struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	if (imm >= BIT(16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		emit_load_imm(r_tmp, imm, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		emit_or(dst, src, r_tmp, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		emit_instr(ctx, ori, dst, src, imm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) static inline void emit_daddiu(unsigned int dst, unsigned int src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 			       int imm, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	 * Only used for stack, so the imm is relatively small
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	 * and it fits in 15-bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	emit_instr(ctx, daddiu, dst, src, imm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) static inline void emit_addiu(unsigned int dst, unsigned int src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 			      u32 imm, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	if (!is_range16(imm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 		emit_load_imm(r_tmp, imm, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		emit_addu(dst, r_tmp, src, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 		emit_instr(ctx, addiu, dst, src, imm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) static inline void emit_and(unsigned int dst, unsigned int src1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 			    unsigned int src2, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	emit_instr(ctx, and, dst, src1, src2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) static inline void emit_andi(unsigned int dst, unsigned int src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 			     u32 imm, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	/* If imm does not fit in u16 then load it to register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	if (imm >= BIT(16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 		emit_load_imm(r_tmp, imm, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		emit_and(dst, src, r_tmp, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		emit_instr(ctx, andi, dst, src, imm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) static inline void emit_xor(unsigned int dst, unsigned int src1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 			    unsigned int src2, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	emit_instr(ctx, xor, dst, src1, src2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) static inline void emit_xori(ptr dst, ptr src, u32 imm, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	/* If imm does not fit in u16 then load it to register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	if (imm >= BIT(16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		emit_load_imm(r_tmp, imm, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		emit_xor(dst, src, r_tmp, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		emit_instr(ctx, xori, dst, src, imm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) static inline void emit_stack_offset(int offset, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	emit_long_instr(ctx, ADDIU, r_sp, r_sp, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) static inline void emit_subu(unsigned int dst, unsigned int src1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 			     unsigned int src2, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	emit_instr(ctx, subu, dst, src1, src2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) static inline void emit_neg(unsigned int reg, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	emit_subu(reg, r_zero, reg, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) static inline void emit_sllv(unsigned int dst, unsigned int src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 			     unsigned int sa, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	emit_instr(ctx, sllv, dst, src, sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) static inline void emit_sll(unsigned int dst, unsigned int src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 			    unsigned int sa, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	/* sa is 5-bits long */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	if (sa >= BIT(5))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		/* Shifting >= 32 results in zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		emit_jit_reg_move(dst, r_zero, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		emit_instr(ctx, sll, dst, src, sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) static inline void emit_srlv(unsigned int dst, unsigned int src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 			     unsigned int sa, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	emit_instr(ctx, srlv, dst, src, sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) static inline void emit_srl(unsigned int dst, unsigned int src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 			    unsigned int sa, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	/* sa is 5-bits long */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	if (sa >= BIT(5))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		/* Shifting >= 32 results in zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		emit_jit_reg_move(dst, r_zero, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		emit_instr(ctx, srl, dst, src, sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) static inline void emit_slt(unsigned int dst, unsigned int src1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 			    unsigned int src2, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	emit_instr(ctx, slt, dst, src1, src2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) static inline void emit_sltu(unsigned int dst, unsigned int src1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 			     unsigned int src2, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	emit_instr(ctx, sltu, dst, src1, src2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) static inline void emit_sltiu(unsigned dst, unsigned int src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 			      unsigned int imm, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	/* 16 bit immediate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	if (!is_range16((s32)imm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		emit_load_imm(r_tmp, imm, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		emit_sltu(dst, src, r_tmp, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		emit_instr(ctx, sltiu, dst, src, imm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) /* Store register on the stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) static inline void emit_store_stack_reg(ptr reg, ptr base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 					unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 					struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	emit_long_instr(ctx, SW, reg, offset, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) static inline void emit_store(ptr reg, ptr base, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 			      struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	emit_instr(ctx, sw, reg, offset, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) static inline void emit_load_stack_reg(ptr reg, ptr base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 				       unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 				       struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	emit_long_instr(ctx, LW, reg, offset, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) static inline void emit_load(unsigned int reg, unsigned int base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 			     unsigned int offset, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	emit_instr(ctx, lw, reg, offset, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) static inline void emit_load_byte(unsigned int reg, unsigned int base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 				  unsigned int offset, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	emit_instr(ctx, lb, reg, offset, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) static inline void emit_half_load(unsigned int reg, unsigned int base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 				  unsigned int offset, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	emit_instr(ctx, lh, reg, offset, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) static inline void emit_half_load_unsigned(unsigned int reg, unsigned int base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 					   unsigned int offset, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	emit_instr(ctx, lhu, reg, offset, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) static inline void emit_mul(unsigned int dst, unsigned int src1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 			    unsigned int src2, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	emit_instr(ctx, mul, dst, src1, src2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) static inline void emit_div(unsigned int dst, unsigned int src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 			    struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	if (ctx->target != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		u32 *p = &ctx->target[ctx->idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		uasm_i_divu(&p, dst, src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		p = &ctx->target[ctx->idx + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		uasm_i_mflo(&p, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	ctx->idx += 2; /* 2 insts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) static inline void emit_mod(unsigned int dst, unsigned int src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 			    struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	if (ctx->target != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		u32 *p = &ctx->target[ctx->idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		uasm_i_divu(&p, dst, src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		p = &ctx->target[ctx->idx + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		uasm_i_mfhi(&p, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	ctx->idx += 2; /* 2 insts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) static inline void emit_dsll(unsigned int dst, unsigned int src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 			     unsigned int sa, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	emit_instr(ctx, dsll, dst, src, sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) static inline void emit_dsrl32(unsigned int dst, unsigned int src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 			       unsigned int sa, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	emit_instr(ctx, dsrl32, dst, src, sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) static inline void emit_wsbh(unsigned int dst, unsigned int src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 			     struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	emit_instr(ctx, wsbh, dst, src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) /* load pointer to register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) static inline void emit_load_ptr(unsigned int dst, unsigned int src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 				     int imm, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	/* src contains the base addr of the 32/64-pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	emit_long_instr(ctx, LW, dst, imm, src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) /* load a function pointer to register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) static inline void emit_load_func(unsigned int reg, ptr imm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 				  struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	if (IS_ENABLED(CONFIG_64BIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		/* At this point imm is always 64-bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		emit_load_imm(r_tmp, (u64)imm >> 32, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		emit_ori(r_tmp, r_tmp_imm, (imm >> 16) & 0xffff, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		emit_ori(reg, r_tmp_imm, imm & 0xffff, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		emit_load_imm(reg, imm, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) /* Move to real MIPS register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) static inline void emit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	emit_long_instr(ctx, ADDU, dst, src, r_zero);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) /* Move to JIT (32-bit) register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	emit_addu(dst, src, r_zero, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) /* Compute the immediate value for PC-relative branches. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) static inline u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	if (ctx->target == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	 * We want a pc-relative branch. We only do forward branches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	 * so tgt is always after pc. tgt is the instruction offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	 * we want to jump to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	 * Branch on MIPS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	 * I: target_offset <- sign_extend(offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	 * I+1: PC += target_offset (delay slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	 * ctx->idx currently points to the branch instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	 * but the offset is added to the delay slot so we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	 * to subtract 4.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	return ctx->offsets[tgt] -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		(ctx->idx * 4 - ctx->prologue_bytes) - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) static inline void emit_bcond(int cond, unsigned int reg1, unsigned int reg2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 			     unsigned int imm, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	if (ctx->target != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		u32 *p = &ctx->target[ctx->idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		switch (cond) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		case MIPS_COND_EQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 			uasm_i_beq(&p, reg1, reg2, imm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		case MIPS_COND_NE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 			uasm_i_bne(&p, reg1, reg2, imm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		case MIPS_COND_ALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 			uasm_i_b(&p, imm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 			pr_warn("%s: Unhandled branch conditional: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 				__func__, cond);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	ctx->idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) static inline void emit_b(unsigned int imm, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	emit_bcond(MIPS_COND_ALL, r_zero, r_zero, imm, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) static inline void emit_jalr(unsigned int link, unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 			     struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	emit_instr(ctx, jalr, link, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) static inline void emit_jr(unsigned int reg, struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	emit_instr(ctx, jr, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) static inline u16 align_sp(unsigned int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	/* Double word alignment for 32-bit, quadword for 64-bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	unsigned int align = IS_ENABLED(CONFIG_64BIT) ? 16 : 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	num = (num + (align - 1)) & -align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	return num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	int i = 0, real_off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	u32 sflags, tmp_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	/* Adjust the stack pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	if (offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		emit_stack_offset(-align_sp(offset), ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	/* sflags is essentially a bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	while (tmp_flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		if ((sflags >> i) & 0x1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 			emit_store_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 					     ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 			real_off += SZREG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		tmp_flags >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	/* save return address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	if (ctx->flags & SEEN_CALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		emit_store_stack_reg(r_ra, r_sp, real_off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		real_off += SZREG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	/* Setup r_M leaving the alignment gap if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	if (ctx->flags & SEEN_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		if (real_off % (SZREG * 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 			real_off += SZREG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		emit_long_instr(ctx, ADDIU, r_M, r_sp, real_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) static void restore_bpf_jit_regs(struct jit_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 				 unsigned int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	int i, real_off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	u32 sflags, tmp_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	/* sflags is a bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	while (tmp_flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		if ((sflags >> i) & 0x1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 			emit_load_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 					    ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 			real_off += SZREG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		tmp_flags >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	/* restore return address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	if (ctx->flags & SEEN_CALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		emit_load_stack_reg(r_ra, r_sp, real_off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	/* Restore the sp and discard the scrach memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	if (offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		emit_stack_offset(align_sp(offset), ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) static unsigned int get_stack_depth(struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	int sp_off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	/* How may s* regs do we need to preserved? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	sp_off += hweight32(ctx->flags >> SEEN_SREG_SFT) * SZREG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	if (ctx->flags & SEEN_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		sp_off += 4 * BPF_MEMWORDS; /* BPF_MEMWORDS are 32-bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	if (ctx->flags & SEEN_CALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		sp_off += SZREG; /* Space for our ra register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	return sp_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) static void build_prologue(struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	int sp_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	/* Calculate the total offset for the stack pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	sp_off = get_stack_depth(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	save_bpf_jit_regs(ctx, sp_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	if (ctx->flags & SEEN_SKB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		emit_reg_move(r_skb, MIPS_R_A0, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	if (ctx->flags & SEEN_SKB_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		/* Load packet length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		emit_load(r_skb_len, r_skb, offsetof(struct sk_buff, len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 			  ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		emit_load(r_tmp, r_skb, offsetof(struct sk_buff, data_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 			  ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		/* Load the data pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		emit_load_ptr(r_skb_data, r_skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 			      offsetof(struct sk_buff, data), ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		/* Load the header length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		emit_subu(r_skb_hl, r_skb_len, r_tmp, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	if (ctx->flags & SEEN_X)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		emit_jit_reg_move(r_X, r_zero, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	 * Do not leak kernel data to userspace, we only need to clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	 * r_A if it is ever used.  In fact if it is never used, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	 * will not save/restore it, so clearing it in this case would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	 * corrupt the state of the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	if (bpf_needs_clear_a(&ctx->skf->insns[0]) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	    (ctx->flags & SEEN_A))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		emit_jit_reg_move(r_A, r_zero, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) static void build_epilogue(struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	unsigned int sp_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	/* Calculate the total offset for the stack pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	sp_off = get_stack_depth(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	restore_bpf_jit_regs(ctx, sp_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	/* Return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	emit_jr(r_ra, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	emit_nop(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) #define CHOOSE_LOAD_FUNC(K, func) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	 func##_positive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) static bool is_bad_offset(int b_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	return b_off > 0x1ffff || b_off < -0x20000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) static int build_body(struct jit_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	const struct bpf_prog *prog = ctx->skf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	const struct sock_filter *inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	unsigned int i, off, condt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	u32 k, b_off __maybe_unused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	u8 (*sk_load_func)(unsigned long *skb, int offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	for (i = 0; i < prog->len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		u16 code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		inst = &(prog->insns[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 			 __func__, inst->code, inst->jt, inst->jf, inst->k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		k = inst->k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		code = bpf_anc_helper(inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		if (ctx->target == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 			ctx->offsets[i] = ctx->idx * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		switch (code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		case BPF_LD | BPF_IMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 			/* A <- k ==> li r_A, k */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 			ctx->flags |= SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 			emit_load_imm(r_A, k, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		case BPF_LD | BPF_W | BPF_LEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 			BUILD_BUG_ON(sizeof_field(struct sk_buff, len) != 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 			/* A <- len ==> lw r_A, offset(skb) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 			ctx->flags |= SEEN_SKB | SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 			off = offsetof(struct sk_buff, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 			emit_load(r_A, r_skb, off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		case BPF_LD | BPF_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 			/* A <- M[k] ==> lw r_A, offset(M) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 			ctx->flags |= SEEN_MEM | SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 			emit_load(r_A, r_M, SCRATCH_OFF(k), ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		case BPF_LD | BPF_W | BPF_ABS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 			/* A <- P[k:4] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 			sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 			goto load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		case BPF_LD | BPF_H | BPF_ABS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 			/* A <- P[k:2] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 			sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_half);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 			goto load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		case BPF_LD | BPF_B | BPF_ABS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 			/* A <- P[k:1] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 			sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_byte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) load:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 			emit_load_imm(r_off, k, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) load_common:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 			ctx->flags |= SEEN_CALL | SEEN_OFF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 				SEEN_SKB | SEEN_A | SEEN_SKB_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 			emit_load_func(r_s0, (ptr)sk_load_func, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 			emit_reg_move(MIPS_R_A0, r_skb, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 			emit_jalr(MIPS_R_RA, r_s0, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 			/* Load second argument to delay slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 			emit_reg_move(MIPS_R_A1, r_off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 			/* Check the error value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 			emit_bcond(MIPS_COND_EQ, r_ret, 0, b_imm(i + 1, ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 				   ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 			/* Load return register on DS for failures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 			emit_reg_move(r_ret, r_zero, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 			/* Return with error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 			b_off = b_imm(prog->len, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 			if (is_bad_offset(b_off))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 				return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 			emit_b(b_off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 			emit_nop(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		case BPF_LD | BPF_W | BPF_IND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 			/* A <- P[X + k:4] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 			sk_load_func = sk_load_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 			goto load_ind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		case BPF_LD | BPF_H | BPF_IND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 			/* A <- P[X + k:2] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 			sk_load_func = sk_load_half;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 			goto load_ind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		case BPF_LD | BPF_B | BPF_IND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 			/* A <- P[X + k:1] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 			sk_load_func = sk_load_byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) load_ind:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 			ctx->flags |= SEEN_OFF | SEEN_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 			emit_addiu(r_off, r_X, k, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 			goto load_common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		case BPF_LDX | BPF_IMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 			/* X <- k */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 			ctx->flags |= SEEN_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 			emit_load_imm(r_X, k, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		case BPF_LDX | BPF_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 			/* X <- M[k] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 			ctx->flags |= SEEN_X | SEEN_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 			emit_load(r_X, r_M, SCRATCH_OFF(k), ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		case BPF_LDX | BPF_W | BPF_LEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 			/* X <- len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 			ctx->flags |= SEEN_X | SEEN_SKB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 			off = offsetof(struct sk_buff, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 			emit_load(r_X, r_skb, off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		case BPF_LDX | BPF_B | BPF_MSH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 			/* X <- 4 * (P[k:1] & 0xf) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 			ctx->flags |= SEEN_X | SEEN_CALL | SEEN_SKB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 			/* Load offset to a1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 			emit_load_func(r_s0, (ptr)sk_load_byte, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 			 * This may emit two instructions so it may not fit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 			 * in the delay slot. So use a0 in the delay slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 			emit_load_imm(MIPS_R_A1, k, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 			emit_jalr(MIPS_R_RA, r_s0, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 			emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 			/* Check the error value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 			b_off = b_imm(prog->len, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 			if (is_bad_offset(b_off))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 				return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 			emit_bcond(MIPS_COND_NE, r_ret, 0, b_off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 			emit_reg_move(r_ret, r_zero, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 			/* We are good */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 			/* X <- P[1:K] & 0xf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 			emit_andi(r_X, r_A, 0xf, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 			/* X << 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 			emit_b(b_imm(i + 1, ctx), ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 			emit_sll(r_X, r_X, 2, ctx); /* delay slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		case BPF_ST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 			/* M[k] <- A */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 			ctx->flags |= SEEN_MEM | SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 			emit_store(r_A, r_M, SCRATCH_OFF(k), ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		case BPF_STX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 			/* M[k] <- X */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 			ctx->flags |= SEEN_MEM | SEEN_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 			emit_store(r_X, r_M, SCRATCH_OFF(k), ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		case BPF_ALU | BPF_ADD | BPF_K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 			/* A += K */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 			ctx->flags |= SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 			emit_addiu(r_A, r_A, k, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		case BPF_ALU | BPF_ADD | BPF_X:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 			/* A += X */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 			ctx->flags |= SEEN_A | SEEN_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 			emit_addu(r_A, r_A, r_X, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		case BPF_ALU | BPF_SUB | BPF_K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 			/* A -= K */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 			ctx->flags |= SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 			emit_addiu(r_A, r_A, -k, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		case BPF_ALU | BPF_SUB | BPF_X:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 			/* A -= X */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 			ctx->flags |= SEEN_A | SEEN_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 			emit_subu(r_A, r_A, r_X, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		case BPF_ALU | BPF_MUL | BPF_K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 			/* A *= K */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 			/* Load K to scratch register before MUL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 			ctx->flags |= SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 			emit_load_imm(r_s0, k, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 			emit_mul(r_A, r_A, r_s0, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		case BPF_ALU | BPF_MUL | BPF_X:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 			/* A *= X */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 			ctx->flags |= SEEN_A | SEEN_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 			emit_mul(r_A, r_A, r_X, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		case BPF_ALU | BPF_DIV | BPF_K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 			/* A /= k */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 			if (k == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 			if (optimize_div(&k)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 				ctx->flags |= SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 				emit_srl(r_A, r_A, k, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 			ctx->flags |= SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 			emit_load_imm(r_s0, k, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 			emit_div(r_A, r_s0, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		case BPF_ALU | BPF_MOD | BPF_K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 			/* A %= k */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 			if (k == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 				ctx->flags |= SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 				emit_jit_reg_move(r_A, r_zero, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 				ctx->flags |= SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 				emit_load_imm(r_s0, k, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 				emit_mod(r_A, r_s0, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		case BPF_ALU | BPF_DIV | BPF_X:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 			/* A /= X */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 			ctx->flags |= SEEN_X | SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 			/* Check if r_X is zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 			b_off = b_imm(prog->len, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 			if (is_bad_offset(b_off))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 				return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 			emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 			emit_load_imm(r_ret, 0, ctx); /* delay slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 			emit_div(r_A, r_X, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		case BPF_ALU | BPF_MOD | BPF_X:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 			/* A %= X */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 			ctx->flags |= SEEN_X | SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 			/* Check if r_X is zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 			b_off = b_imm(prog->len, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 			if (is_bad_offset(b_off))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 				return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 			emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 			emit_load_imm(r_ret, 0, ctx); /* delay slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 			emit_mod(r_A, r_X, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		case BPF_ALU | BPF_OR | BPF_K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 			/* A |= K */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 			ctx->flags |= SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 			emit_ori(r_A, r_A, k, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		case BPF_ALU | BPF_OR | BPF_X:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 			/* A |= X */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 			ctx->flags |= SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 			emit_ori(r_A, r_A, r_X, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		case BPF_ALU | BPF_XOR | BPF_K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 			/* A ^= k */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 			ctx->flags |= SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 			emit_xori(r_A, r_A, k, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		case BPF_ANC | SKF_AD_ALU_XOR_X:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		case BPF_ALU | BPF_XOR | BPF_X:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 			/* A ^= X */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 			ctx->flags |= SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 			emit_xor(r_A, r_A, r_X, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		case BPF_ALU | BPF_AND | BPF_K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 			/* A &= K */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 			ctx->flags |= SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 			emit_andi(r_A, r_A, k, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		case BPF_ALU | BPF_AND | BPF_X:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 			/* A &= X */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 			ctx->flags |= SEEN_A | SEEN_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 			emit_and(r_A, r_A, r_X, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		case BPF_ALU | BPF_LSH | BPF_K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 			/* A <<= K */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 			ctx->flags |= SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 			emit_sll(r_A, r_A, k, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		case BPF_ALU | BPF_LSH | BPF_X:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 			/* A <<= X */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 			ctx->flags |= SEEN_A | SEEN_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 			emit_sllv(r_A, r_A, r_X, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		case BPF_ALU | BPF_RSH | BPF_K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 			/* A >>= K */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 			ctx->flags |= SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 			emit_srl(r_A, r_A, k, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		case BPF_ALU | BPF_RSH | BPF_X:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 			ctx->flags |= SEEN_A | SEEN_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 			emit_srlv(r_A, r_A, r_X, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		case BPF_ALU | BPF_NEG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 			/* A = -A */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 			ctx->flags |= SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 			emit_neg(r_A, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		case BPF_JMP | BPF_JA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 			/* pc += K */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 			b_off = b_imm(i + k + 1, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 			if (is_bad_offset(b_off))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 				return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 			emit_b(b_off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 			emit_nop(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		case BPF_JMP | BPF_JEQ | BPF_K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 			/* pc += ( A == K ) ? pc->jt : pc->jf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 			condt = MIPS_COND_EQ | MIPS_COND_K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 			goto jmp_cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		case BPF_JMP | BPF_JEQ | BPF_X:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 			ctx->flags |= SEEN_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 			/* pc += ( A == X ) ? pc->jt : pc->jf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 			condt = MIPS_COND_EQ | MIPS_COND_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 			goto jmp_cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		case BPF_JMP | BPF_JGE | BPF_K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 			/* pc += ( A >= K ) ? pc->jt : pc->jf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 			condt = MIPS_COND_GE | MIPS_COND_K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 			goto jmp_cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		case BPF_JMP | BPF_JGE | BPF_X:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 			ctx->flags |= SEEN_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 			/* pc += ( A >= X ) ? pc->jt : pc->jf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 			condt = MIPS_COND_GE | MIPS_COND_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 			goto jmp_cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		case BPF_JMP | BPF_JGT | BPF_K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 			/* pc += ( A > K ) ? pc->jt : pc->jf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 			condt = MIPS_COND_GT | MIPS_COND_K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 			goto jmp_cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		case BPF_JMP | BPF_JGT | BPF_X:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 			ctx->flags |= SEEN_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 			/* pc += ( A > X ) ? pc->jt : pc->jf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 			condt = MIPS_COND_GT | MIPS_COND_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) jmp_cmp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 			/* Greater or Equal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 			if ((condt & MIPS_COND_GE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 			    (condt & MIPS_COND_GT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 				if (condt & MIPS_COND_K) { /* K */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 					ctx->flags |= SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 					emit_sltiu(r_s0, r_A, k, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 				} else { /* X */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 					ctx->flags |= SEEN_A |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 						SEEN_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 					emit_sltu(r_s0, r_A, r_X, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 				/* A < (K|X) ? r_scrach = 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 				b_off = b_imm(i + inst->jf + 1, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 				emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 					   ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 				emit_nop(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 				/* A > (K|X) ? scratch = 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 				if (condt & MIPS_COND_GT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 					/* Checking for equality */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 					ctx->flags |= SEEN_A | SEEN_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 					if (condt & MIPS_COND_K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 						emit_load_imm(r_s0, k, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 					else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 						emit_jit_reg_move(r_s0, r_X,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 								  ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 					b_off = b_imm(i + inst->jf + 1, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 					emit_bcond(MIPS_COND_EQ, r_A, r_s0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 						   b_off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 					emit_nop(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 					/* Finally, A > K|X */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 					b_off = b_imm(i + inst->jt + 1, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 					emit_b(b_off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 					emit_nop(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 				} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 					/* A >= (K|X) so jump */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 					b_off = b_imm(i + inst->jt + 1, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 					emit_b(b_off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 					emit_nop(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 				/* A == K|X */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 				if (condt & MIPS_COND_K) { /* K */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 					ctx->flags |= SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 					emit_load_imm(r_s0, k, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 					/* jump true */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 					b_off = b_imm(i + inst->jt + 1, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 					emit_bcond(MIPS_COND_EQ, r_A, r_s0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 						   b_off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 					emit_nop(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 					/* jump false */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 					b_off = b_imm(i + inst->jf + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 						      ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 					emit_bcond(MIPS_COND_NE, r_A, r_s0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 						   b_off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 					emit_nop(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 				} else { /* X */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 					/* jump true */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 					ctx->flags |= SEEN_A | SEEN_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 					b_off = b_imm(i + inst->jt + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 						      ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 					emit_bcond(MIPS_COND_EQ, r_A, r_X,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 						   b_off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 					emit_nop(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 					/* jump false */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 					b_off = b_imm(i + inst->jf + 1, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 					emit_bcond(MIPS_COND_NE, r_A, r_X,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 						   b_off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 					emit_nop(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		case BPF_JMP | BPF_JSET | BPF_K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 			ctx->flags |= SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 			/* pc += (A & K) ? pc -> jt : pc -> jf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 			emit_load_imm(r_s1, k, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 			emit_and(r_s0, r_A, r_s1, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 			/* jump true */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 			b_off = b_imm(i + inst->jt + 1, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 			emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 			emit_nop(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 			/* jump false */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 			b_off = b_imm(i + inst->jf + 1, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 			emit_b(b_off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 			emit_nop(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		case BPF_JMP | BPF_JSET | BPF_X:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 			ctx->flags |= SEEN_X | SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			/* pc += (A & X) ? pc -> jt : pc -> jf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 			emit_and(r_s0, r_A, r_X, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 			/* jump true */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 			b_off = b_imm(i + inst->jt + 1, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 			emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 			emit_nop(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 			/* jump false */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 			b_off = b_imm(i + inst->jf + 1, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 			emit_b(b_off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 			emit_nop(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		case BPF_RET | BPF_A:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 			ctx->flags |= SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 			if (i != prog->len - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 				 * If this is not the last instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 				 * then jump to the epilogue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 				b_off = b_imm(prog->len, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 				if (is_bad_offset(b_off))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 					return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 				emit_b(b_off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 			emit_reg_move(r_ret, r_A, ctx); /* delay slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		case BPF_RET | BPF_K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 			 * It can emit two instructions so it does not fit on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 			 * the delay slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 			emit_load_imm(r_ret, k, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 			if (i != prog->len - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 				 * If this is not the last instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 				 * then jump to the epilogue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 				b_off = b_imm(prog->len, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 				if (is_bad_offset(b_off))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 					return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 				emit_b(b_off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 				emit_nop(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		case BPF_MISC | BPF_TAX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 			/* X = A */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 			ctx->flags |= SEEN_X | SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 			emit_jit_reg_move(r_X, r_A, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		case BPF_MISC | BPF_TXA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 			/* A = X */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 			ctx->flags |= SEEN_A | SEEN_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 			emit_jit_reg_move(r_A, r_X, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		/* AUX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		case BPF_ANC | SKF_AD_PROTOCOL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 			/* A = ntohs(skb->protocol */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 			ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 			BUILD_BUG_ON(sizeof_field(struct sk_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 						  protocol) != 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 			off = offsetof(struct sk_buff, protocol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 			emit_half_load(r_A, r_skb, off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) #ifdef CONFIG_CPU_LITTLE_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 			/* This needs little endian fixup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 			if (cpu_has_wsbh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 				/* R2 and later have the wsbh instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 				emit_wsbh(r_A, r_A, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 				/* Get first byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 				emit_andi(r_tmp_imm, r_A, 0xff, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 				/* Shift it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 				emit_sll(r_tmp, r_tmp_imm, 8, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 				/* Get second byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 				emit_srl(r_tmp_imm, r_A, 8, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 				emit_andi(r_tmp_imm, r_tmp_imm, 0xff, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 				/* Put everyting together in r_A */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 				emit_or(r_A, r_tmp, r_tmp_imm, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		case BPF_ANC | SKF_AD_CPU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 			ctx->flags |= SEEN_A | SEEN_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 			/* A = current_thread_info()->cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 			BUILD_BUG_ON(sizeof_field(struct thread_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 						  cpu) != 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 			off = offsetof(struct thread_info, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 			/* $28/gp points to the thread_info struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 			emit_load(r_A, 28, off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		case BPF_ANC | SKF_AD_IFINDEX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 			/* A = skb->dev->ifindex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		case BPF_ANC | SKF_AD_HATYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 			/* A = skb->dev->type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 			ctx->flags |= SEEN_SKB | SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 			off = offsetof(struct sk_buff, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 			/* Load *dev pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 			emit_load_ptr(r_s0, r_skb, off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 			/* error (0) in the delay slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 			b_off = b_imm(prog->len, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 			if (is_bad_offset(b_off))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 				return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 			emit_bcond(MIPS_COND_EQ, r_s0, r_zero, b_off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 			emit_reg_move(r_ret, r_zero, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 			if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 				BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 				off = offsetof(struct net_device, ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 				emit_load(r_A, r_s0, off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 			} else { /* (code == (BPF_ANC | SKF_AD_HATYPE) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 				BUILD_BUG_ON(sizeof_field(struct net_device, type) != 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 				off = offsetof(struct net_device, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 				emit_half_load_unsigned(r_A, r_s0, off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		case BPF_ANC | SKF_AD_MARK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 			ctx->flags |= SEEN_SKB | SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 			BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 			off = offsetof(struct sk_buff, mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 			emit_load(r_A, r_skb, off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		case BPF_ANC | SKF_AD_RXHASH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 			ctx->flags |= SEEN_SKB | SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 			BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 			off = offsetof(struct sk_buff, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 			emit_load(r_A, r_skb, off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		case BPF_ANC | SKF_AD_VLAN_TAG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 			ctx->flags |= SEEN_SKB | SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 			BUILD_BUG_ON(sizeof_field(struct sk_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 						  vlan_tci) != 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 			off = offsetof(struct sk_buff, vlan_tci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 			emit_half_load_unsigned(r_A, r_skb, off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 			ctx->flags |= SEEN_SKB | SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 			emit_load_byte(r_A, r_skb, PKT_VLAN_PRESENT_OFFSET(), ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 			if (PKT_VLAN_PRESENT_BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 				emit_srl(r_A, r_A, PKT_VLAN_PRESENT_BIT, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 			if (PKT_VLAN_PRESENT_BIT < 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 				emit_andi(r_A, r_A, 1, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		case BPF_ANC | SKF_AD_PKTTYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 			ctx->flags |= SEEN_SKB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 			emit_load_byte(r_tmp, r_skb, PKT_TYPE_OFFSET(), ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 			/* Keep only the last 3 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 			emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) #ifdef __BIG_ENDIAN_BITFIELD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 			/* Get the actual packet type to the lower 3 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 			emit_srl(r_A, r_A, 5, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 		case BPF_ANC | SKF_AD_QUEUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 			ctx->flags |= SEEN_SKB | SEEN_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 			BUILD_BUG_ON(sizeof_field(struct sk_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 						  queue_mapping) != 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 			BUILD_BUG_ON(offsetof(struct sk_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 					      queue_mapping) > 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 			off = offsetof(struct sk_buff, queue_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 			emit_half_load_unsigned(r_A, r_skb, off, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 			pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 				 inst->code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	/* compute offsets only during the first pass */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	if (ctx->target == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		ctx->offsets[i] = ctx->idx * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) void bpf_jit_compile(struct bpf_prog *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	struct jit_ctx ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	unsigned int alloc_size, tmp_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	if (!bpf_jit_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	memset(&ctx, 0, sizeof(ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	ctx.offsets = kcalloc(fp->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	if (ctx.offsets == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	ctx.skf = fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	if (build_body(&ctx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	tmp_idx = ctx.idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	build_prologue(&ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	/* just to complete the ctx.idx count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	build_epilogue(&ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	alloc_size = 4 * ctx.idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	ctx.target = module_alloc(alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	if (ctx.target == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	/* Clean it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	memset(ctx.target, 0, alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	ctx.idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	/* Generate the actual JIT code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	build_prologue(&ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	if (build_body(&ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		module_memfree(ctx.target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	build_epilogue(&ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	/* Update the icache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	flush_icache_range((ptr)ctx.target, (ptr)(ctx.target + ctx.idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	if (bpf_jit_enable > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 		/* Dump JIT code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 		bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	fp->bpf_func = (void *)ctx.target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	fp->jited = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	kfree(ctx.offsets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) void bpf_jit_free(struct bpf_prog *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	if (fp->jited)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 		module_memfree(fp->bpf_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	bpf_prog_unlock_free(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }