Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Stack tracing support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2012 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/pointer_auth.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <asm/stack_pointer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <asm/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * AArch64 PCS assigns the frame pointer to x29.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * A simple function prologue looks like this:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * 	sub	sp, sp, #0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  *   	stp	x29, x30, [sp]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  *	mov	x29, sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * A simple function epilogue looks like this:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  *	mov	sp, x29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  *	ldp	x29, x30, [sp]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  *	add	sp, sp, #0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * Unwind from one frame record (A) to the next frame record (B).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * We terminate early if the location of B indicates a malformed chain of frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * records (e.g. a cycle), determined based on the location and fp value of A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  * and the location (but not the fp value) of B.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	unsigned long fp = frame->fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	struct stack_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	if (fp & 0xf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	if (!tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	if (!on_accessible_stack(tsk, fp, &info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	if (test_bit(info.type, frame->stacks_done))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	 * As stacks grow downward, any valid record on the same stack must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	 * at a strictly higher address than the prior record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	 * Stacks can nest in several valid orders, e.g.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	 * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	 * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	 * ... but the nesting itself is strict. Once we transition from one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	 * stack to another, it's never valid to unwind back to that first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	 * stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	if (info.type == frame->prev_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		if (fp <= frame->prev_fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		set_bit(frame->prev_type, frame->stacks_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	 * Record this frame record's values and location. The prev_fp and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	 * prev_type are only meaningful to the next unwind_frame() invocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	frame->prev_fp = fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	frame->prev_type = info.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	if (tsk->ret_stack &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		(ptrauth_strip_insn_pac(frame->pc) == (unsigned long)return_to_handler)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		struct ftrace_ret_stack *ret_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		 * This is a case where function graph tracer has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		 * modified a return address (LR) in a stack frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		 * to hook a function return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		 * So replace it to an original value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		if (WARN_ON_ONCE(!ret_stack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		frame->pc = ret_stack->ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	frame->pc = ptrauth_strip_insn_pac(frame->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	 * Frames created upon entry from EL0 have NULL FP and PC values, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	 * don't bother reporting these. Frames created by __noreturn functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	 * might have a valid FP even if PC is bogus, so only terminate where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	 * both are NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	if (!frame->fp && !frame->pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) NOKPROBE_SYMBOL(unwind_frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 			     bool (*fn)(void *, unsigned long), void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		if (!fn(data, frame->pc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		ret = unwind_frame(tsk, frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) NOKPROBE_SYMBOL(walk_stackframe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static void dump_backtrace_entry(unsigned long where, const char *loglvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	printk("%s %pS\n", loglvl, (void *)where);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		    const char *loglvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	struct stackframe frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	int skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	if (regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		skip = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	if (!tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	if (!try_get_task_stack(tsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	if (tsk == current) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		start_backtrace(&frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 				(unsigned long)__builtin_frame_address(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 				(unsigned long)dump_backtrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		 * task blocked in __switch_to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		start_backtrace(&frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 				thread_saved_fp(tsk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 				thread_saved_pc(tsk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	printk("%sCall trace:\n", loglvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		/* skip until specified stack frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		if (!skip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 			dump_backtrace_entry(frame.pc, loglvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		} else if (frame.fp == regs->regs[29]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 			skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 			 * Mostly, this is the case where this function is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 			 * called in panic/abort. As exception handler's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 			 * stack frame does not contain the corresponding pc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 			 * at which an exception has taken place, use regs->pc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 			 * instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 			dump_backtrace_entry(regs->pc, loglvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	} while (!unwind_frame(tsk, &frame));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	put_task_stack(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) EXPORT_SYMBOL_GPL(dump_backtrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	dump_backtrace(NULL, tsk, loglvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #ifdef CONFIG_STACKTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 			      void *cookie, struct task_struct *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 			      struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	struct stackframe frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	if (regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		start_backtrace(&frame, regs->regs[29], regs->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	else if (task == current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		start_backtrace(&frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 				(unsigned long)__builtin_frame_address(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 				(unsigned long)__builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		start_backtrace(&frame, thread_saved_fp(task),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 				thread_saved_pc(task));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	walk_stackframe(task, &frame, consume_entry, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) #endif