^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * misc.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This is a collection of several routines used to extract the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * which includes KASLR relocation, decompression, ELF parsing, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * relocation processing. Additionally included are the screen and serial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * output functions and related debugging support functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * puts by Nick Holloway 1993, better puts by Martin Mares 1995
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "misc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "error.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "pgtable.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "../string.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "../voffset.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/bootparam_utils.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * WARNING!!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * This code is compiled with -fPIC and it is relocated dynamically at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * run time, but no relocation processing is performed. This means that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * it is not safe to place pointers in static structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /* Macros used by the included decompressor code below. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define STATIC static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * Provide definitions of memzero and memmove as some of the decompressors will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * try to define their own functions if these are not defined as macros.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define memzero(s, n) memset((s), 0, (n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define memmove memmove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /* Functions used by the included decompressor code below. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) void *memmove(void *dest, const void *src, size_t n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * This is set up by the setup-routine at boot-time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct boot_params *boot_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) memptr free_mem_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) memptr free_mem_end_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static char *vidmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static int vidport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static int lines, cols;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #ifdef CONFIG_KERNEL_GZIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include "../../../../lib/decompress_inflate.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #ifdef CONFIG_KERNEL_BZIP2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include "../../../../lib/decompress_bunzip2.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #ifdef CONFIG_KERNEL_LZMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include "../../../../lib/decompress_unlzma.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #ifdef CONFIG_KERNEL_XZ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #include "../../../../lib/decompress_unxz.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #ifdef CONFIG_KERNEL_LZO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #include "../../../../lib/decompress_unlzo.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #ifdef CONFIG_KERNEL_LZ4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #include "../../../../lib/decompress_unlz4.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #ifdef CONFIG_KERNEL_ZSTD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #include "../../../../lib/decompress_unzstd.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * NOTE: When adding a new decompressor, please update the analysis in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * ../header.S.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static void scroll(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) memmove(vidmem, vidmem + cols * 2, (lines - 1) * cols * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) for (i = (lines - 1) * cols * 2; i < lines * cols * 2; i += 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) vidmem[i] = ' ';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define XMTRDY 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define TXR 0 /* Transmit register (WRITE) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define LSR 5 /* Line Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static void serial_putchar(int ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unsigned timeout = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) outb(ch, early_serial_base + TXR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) void __putstr(const char *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) int x, y, pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) char c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (early_serial_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) const char *str = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) while (*str) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (*str == '\n')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) serial_putchar('\r');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) serial_putchar(*str++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (lines == 0 || cols == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) x = boot_params->screen_info.orig_x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) y = boot_params->screen_info.orig_y;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) while ((c = *s++) != '\0') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (c == '\n') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) x = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (++y >= lines) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) scroll();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) y--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) vidmem[(x + cols * y) * 2] = c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (++x >= cols) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) x = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (++y >= lines) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) scroll();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) y--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) boot_params->screen_info.orig_x = x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) boot_params->screen_info.orig_y = y;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) pos = (x + cols * y) * 2; /* Update cursor position */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) outb(14, vidport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) outb(0xff & (pos >> 9), vidport+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) outb(15, vidport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) outb(0xff & (pos >> 1), vidport+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) void __puthex(unsigned long value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) char alpha[2] = "0";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) int bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) for (bits = sizeof(value) * 8 - 4; bits >= 0; bits -= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) unsigned long digit = (value >> bits) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (digit < 0xA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) alpha[0] = '0' + digit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) alpha[0] = 'a' + (digit - 0xA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) __putstr(alpha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #if CONFIG_X86_NEED_RELOCS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static void handle_relocations(void *output, unsigned long output_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) unsigned long virt_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) int *reloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) unsigned long delta, map, ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) unsigned long min_addr = (unsigned long)output;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) unsigned long max_addr = min_addr + (VO___bss_start - VO__text);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * Calculate the delta between where vmlinux was linked to load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * and where it was actually loaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) delta = min_addr - LOAD_PHYSICAL_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * The kernel contains a table of relocation addresses. Those
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * addresses have the final load address of the kernel in virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * memory. We are currently working in the self map. So we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * create an adjustment for kernel memory addresses to the self map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * This will involve subtracting out the base address of the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) map = delta - __START_KERNEL_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * 32-bit always performs relocations. 64-bit relocations are only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * needed if KASLR has chosen a different starting address offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * from __START_KERNEL_map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (IS_ENABLED(CONFIG_X86_64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) delta = virt_addr - LOAD_PHYSICAL_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (!delta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) debug_putstr("No relocation needed... ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) debug_putstr("Performing relocations... ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * Process relocations: 32 bit relocations first then 64 bit after.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * Three sets of binary relocations are added to the end of the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * before compression. Each relocation table entry is the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * address of the location which needs to be updated stored as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * 32-bit value which is sign extended to 64 bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * Format is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * kernel bits...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * 0 - zero terminator for 64 bit relocations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * 64 bit relocation repeated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * 0 - zero terminator for inverse 32 bit relocations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * 32 bit inverse relocation repeated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * 0 - zero terminator for 32 bit relocations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * 32 bit relocation repeated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * So we work backwards from the end of the decompressed image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) for (reloc = output + output_len - sizeof(*reloc); *reloc; reloc--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) long extended = *reloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) extended += map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) ptr = (unsigned long)extended;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (ptr < min_addr || ptr > max_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) error("32-bit relocation outside of kernel!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) *(uint32_t *)ptr += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) while (*--reloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) long extended = *reloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) extended += map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) ptr = (unsigned long)extended;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (ptr < min_addr || ptr > max_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) error("inverse 32-bit relocation outside of kernel!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) *(int32_t *)ptr -= delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) for (reloc--; *reloc; reloc--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) long extended = *reloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) extended += map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) ptr = (unsigned long)extended;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (ptr < min_addr || ptr > max_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) error("64-bit relocation outside of kernel!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) *(uint64_t *)ptr += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static inline void handle_relocations(void *output, unsigned long output_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) unsigned long virt_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static void parse_elf(void *output)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) Elf64_Ehdr ehdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) Elf64_Phdr *phdrs, *phdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) Elf32_Ehdr ehdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) Elf32_Phdr *phdrs, *phdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) void *dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) memcpy(&ehdr, output, sizeof(ehdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (ehdr.e_ident[EI_MAG0] != ELFMAG0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) ehdr.e_ident[EI_MAG1] != ELFMAG1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) ehdr.e_ident[EI_MAG2] != ELFMAG2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) ehdr.e_ident[EI_MAG3] != ELFMAG3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) error("Kernel is not a valid ELF file");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) debug_putstr("Parsing ELF... ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) phdrs = malloc(sizeof(*phdrs) * ehdr.e_phnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (!phdrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) error("Failed to allocate space for phdrs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) memcpy(phdrs, output + ehdr.e_phoff, sizeof(*phdrs) * ehdr.e_phnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) for (i = 0; i < ehdr.e_phnum; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) phdr = &phdrs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) switch (phdr->p_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) case PT_LOAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if ((phdr->p_align % 0x200000) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) error("Alignment of LOAD segment isn't multiple of 2MB");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) #ifdef CONFIG_RELOCATABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) dest = output;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) dest = (void *)(phdr->p_paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) memmove(dest, output + phdr->p_offset, phdr->p_filesz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) default: /* Ignore other PT_* */ break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) free(phdrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * The compressed kernel image (ZO), has been moved so that its position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * is against the end of the buffer used to hold the uncompressed kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * image (VO) and the execution environment (.bss, .brk), which makes sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * there is room to do the in-place decompression. (See header.S for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * calculations.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * |-----compressed kernel image------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * V V
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * 0 extract_offset +INIT_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * |-----------|---------------|-------------------------|--------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * | | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * VO__text startup_32 of ZO VO__end ZO__end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * ^ ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * |-------uncompressed kernel image---------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) unsigned char *input_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) unsigned long input_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) unsigned char *output,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) unsigned long output_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) const unsigned long kernel_total_size = VO__end - VO__text;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) unsigned long needed_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /* Retain x86 boot parameters pointer passed from startup_32/64. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) boot_params = rmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /* Clear flags intended for solely in-kernel use. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) boot_params->hdr.loadflags &= ~KASLR_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) sanitize_boot_params(boot_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (boot_params->screen_info.orig_video_mode == 7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) vidmem = (char *) 0xb0000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) vidport = 0x3b4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) vidmem = (char *) 0xb8000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) vidport = 0x3d4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) lines = boot_params->screen_info.orig_video_lines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) cols = boot_params->screen_info.orig_video_cols;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) console_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * Save RSDP address for later use. Have this after console_init()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * so that early debugging output from the RSDP parsing code can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * collected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) boot_params->acpi_rsdp_addr = get_rsdp_addr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) debug_putstr("early console in extract_kernel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) free_mem_ptr = heap; /* Heap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * The memory hole needed for the kernel is the larger of either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * the entire decompressed kernel plus relocation table, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * entire decompressed kernel plus .bss and .brk sections.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * On X86_64, the memory is mapped with PMD pages. Round the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * size up so that the full extent of PMD pages mapped is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * included in the check against the valid memory table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * entries. This ensures the full mapped area is usable RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * and doesn't include any reserved areas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) needed_size = max(output_len, kernel_total_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) needed_size = ALIGN(needed_size, MIN_KERNEL_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /* Report initial kernel position details. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) debug_putaddr(input_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) debug_putaddr(input_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) debug_putaddr(output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) debug_putaddr(output_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) debug_putaddr(kernel_total_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) debug_putaddr(needed_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /* Report address of 32-bit trampoline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) debug_putaddr(trampoline_32bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) choose_random_location((unsigned long)input_data, input_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) (unsigned long *)&output,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) needed_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) &virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /* Validate memory location choices. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if ((unsigned long)output & (MIN_KERNEL_ALIGN - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) error("Destination physical address inappropriately aligned");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (virt_addr & (MIN_KERNEL_ALIGN - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) error("Destination virtual address inappropriately aligned");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (heap > 0x3fffffffffffUL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) error("Destination address too large");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (virt_addr + max(output_len, kernel_total_size) > KERNEL_IMAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) error("Destination virtual address is beyond the kernel mapping area");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (heap > ((-__PAGE_OFFSET-(128<<20)-1) & 0x7fffffff))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) error("Destination address too large");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) #ifndef CONFIG_RELOCATABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) error("Destination address does not match LOAD_PHYSICAL_ADDR");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (virt_addr != LOAD_PHYSICAL_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) error("Destination virtual address changed when not relocatable");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) debug_putstr("\nDecompressing Linux... ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) __decompress(input_data, input_len, NULL, NULL, output, output_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) NULL, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) parse_elf(output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) handle_relocations(output, output_len, virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) debug_putstr("done.\nBooting the kernel.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * Flush GHCB from cache and map it encrypted again when running as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * SEV-ES guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) sev_es_shutdown_ghcb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return output;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) void fortify_panic(const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) error("detected buffer overflow");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }