^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * kexec: kexec_file_load system call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2014 Red Hat Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Vivek Goyal <vgoyal@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/capability.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/kexec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/ima.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <crypto/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <crypto/sha.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/elf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/elfcore.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/kernel_read_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "kexec_internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static int kexec_calculate_store_digests(struct kimage *image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Currently this is the only default function that is exported as some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * architectures need it to do additional handlings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * In the future, other default functions may be exported too if required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) int kexec_image_probe_default(struct kimage *image, void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned long buf_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) const struct kexec_file_ops * const *fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) int ret = -ENOEXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) for (fops = &kexec_file_loaders[0]; *fops && (*fops)->probe; ++fops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) ret = (*fops)->probe(buf, buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) image->fops = *fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* Architectures can provide this probe function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) unsigned long buf_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return kexec_image_probe_default(image, buf, buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static void *kexec_image_load_default(struct kimage *image)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (!image->fops || !image->fops->load)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return ERR_PTR(-ENOEXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return image->fops->load(image, image->kernel_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) image->kernel_buf_len, image->initrd_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) image->initrd_buf_len, image->cmdline_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) image->cmdline_buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) void * __weak arch_kexec_kernel_image_load(struct kimage *image)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return kexec_image_load_default(image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int kexec_image_post_load_cleanup_default(struct kimage *image)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (!image->fops || !image->fops->cleanup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return image->fops->cleanup(image->image_loader_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) int __weak arch_kimage_file_post_load_cleanup(struct kimage *image)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return kexec_image_post_load_cleanup_default(image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #ifdef CONFIG_KEXEC_SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static int kexec_image_verify_sig_default(struct kimage *image, void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) unsigned long buf_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (!image->fops || !image->fops->verify_sig) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) pr_debug("kernel loader does not support signature verification.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return -EKEYREJECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) return image->fops->verify_sig(buf, buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) unsigned long buf_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return kexec_image_verify_sig_default(image, buf, buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * arch_kexec_apply_relocations_add - apply relocations of type RELA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * @pi: Purgatory to be relocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * @section: Section relocations applying to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * @relsec: Section containing RELAs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * @symtab: Corresponding symtab.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * Return: 0 on success, negative errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) int __weak
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) const Elf_Shdr *relsec, const Elf_Shdr *symtab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) pr_err("RELA relocation unsupported.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return -ENOEXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * arch_kexec_apply_relocations - apply relocations of type REL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * @pi: Purgatory to be relocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * @section: Section relocations applying to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * @relsec: Section containing RELs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * @symtab: Corresponding symtab.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * Return: 0 on success, negative errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) int __weak
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) const Elf_Shdr *relsec, const Elf_Shdr *symtab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) pr_err("REL relocation unsupported.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return -ENOEXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * Free up memory used by kernel, initrd, and command line. This is temporary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * memory allocation which is not needed any more after these buffers have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * been loaded into separate segments and have been copied elsewhere.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) void kimage_file_post_load_cleanup(struct kimage *image)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct purgatory_info *pi = &image->purgatory_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) vfree(image->kernel_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) image->kernel_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) vfree(image->initrd_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) image->initrd_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) kfree(image->cmdline_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) image->cmdline_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) vfree(pi->purgatory_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) pi->purgatory_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) vfree(pi->sechdrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) pi->sechdrs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #ifdef CONFIG_IMA_KEXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) vfree(image->ima_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) image->ima_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #endif /* CONFIG_IMA_KEXEC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /* See if architecture has anything to cleanup post load */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) arch_kimage_file_post_load_cleanup(image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * Above call should have called into bootloader to free up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * any data stored in kimage->image_loader_data. It should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * be ok now to free it up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) kfree(image->image_loader_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) image->image_loader_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #ifdef CONFIG_KEXEC_SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) kimage_validate_signature(struct kimage *image)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) ret = arch_kexec_kernel_verify_sig(image, image->kernel_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) image->kernel_buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (IS_ENABLED(CONFIG_KEXEC_SIG_FORCE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) pr_notice("Enforced kernel signature verification failed (%d).\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * If IMA is guaranteed to appraise a signature on the kexec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * image, permit it even if the kernel is otherwise locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (!ima_appraise_signature(READING_KEXEC_IMAGE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) security_locked_down(LOCKDOWN_KEXEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) pr_debug("kernel signature verification failed (%d).\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * In file mode list of segments is prepared by kernel. Copy relevant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * data from user space, do error checking, prepare segment list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) kimage_file_prepare_segments(struct kimage *image, int kernel_fd, int initrd_fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) const char __user *cmdline_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) unsigned long cmdline_len, unsigned flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) void *ldata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) ret = kernel_read_file_from_fd(kernel_fd, 0, &image->kernel_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) INT_MAX, NULL, READING_KEXEC_IMAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) image->kernel_buf_len = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* Call arch image probe handlers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) ret = arch_kexec_kernel_image_probe(image, image->kernel_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) image->kernel_buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) #ifdef CONFIG_KEXEC_SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) ret = kimage_validate_signature(image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /* It is possible that there no initramfs is being loaded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (!(flags & KEXEC_FILE_NO_INITRAMFS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) ret = kernel_read_file_from_fd(initrd_fd, 0, &image->initrd_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) INT_MAX, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) READING_KEXEC_INITRAMFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) image->initrd_buf_len = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (cmdline_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) image->cmdline_buf = memdup_user(cmdline_ptr, cmdline_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (IS_ERR(image->cmdline_buf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) ret = PTR_ERR(image->cmdline_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) image->cmdline_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) image->cmdline_buf_len = cmdline_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /* command line should be a string with last byte null */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (image->cmdline_buf[cmdline_len - 1] != '\0') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) ima_kexec_cmdline(kernel_fd, image->cmdline_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) image->cmdline_buf_len - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* IMA needs to pass the measurement list to the next kernel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) ima_add_kexec_buffer(image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* Call arch image load handlers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) ldata = arch_kexec_kernel_image_load(image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (IS_ERR(ldata)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) ret = PTR_ERR(ldata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) image->image_loader_data = ldata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /* In case of error, free up all allocated memory in this function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) kimage_file_post_load_cleanup(image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) kimage_file_alloc_init(struct kimage **rimage, int kernel_fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) int initrd_fd, const char __user *cmdline_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) unsigned long cmdline_len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct kimage *image;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) bool kexec_on_panic = flags & KEXEC_FILE_ON_CRASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) image = do_kimage_alloc_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (!image)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) image->file_mode = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (kexec_on_panic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /* Enable special crash kernel control page alloc policy. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) image->control_page = crashk_res.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) image->type = KEXEC_TYPE_CRASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) ret = kimage_file_prepare_segments(image, kernel_fd, initrd_fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) cmdline_ptr, cmdline_len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) goto out_free_image;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) ret = sanity_check_segment_list(image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) goto out_free_post_load_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) image->control_code_page = kimage_alloc_control_pages(image,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) get_order(KEXEC_CONTROL_PAGE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (!image->control_code_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) pr_err("Could not allocate control_code_buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) goto out_free_post_load_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (!kexec_on_panic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) image->swap_page = kimage_alloc_control_pages(image, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (!image->swap_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) pr_err("Could not allocate swap buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) goto out_free_control_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) *rimage = image;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) out_free_control_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) kimage_free_page_list(&image->control_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) out_free_post_load_bufs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) kimage_file_post_load_cleanup(image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) out_free_image:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) kfree(image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) unsigned long, cmdline_len, const char __user *, cmdline_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) unsigned long, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) int ret = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct kimage **dest_image, *image;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /* We only trust the superuser with rebooting the system. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /* Make sure we have a legal set of flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (flags != (flags & KEXEC_FILE_FLAGS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) image = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (!mutex_trylock(&kexec_mutex))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) dest_image = &kexec_image;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (flags & KEXEC_FILE_ON_CRASH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) dest_image = &kexec_crash_image;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (kexec_crash_image)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) arch_kexec_unprotect_crashkres();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (flags & KEXEC_FILE_UNLOAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) goto exchange;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * In case of crash, new kernel gets loaded in reserved region. It is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * same memory where old crash kernel might be loaded. Free any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * current crash dump kernel before we corrupt it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (flags & KEXEC_FILE_ON_CRASH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) kimage_free(xchg(&kexec_crash_image, NULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) ret = kimage_file_alloc_init(&image, kernel_fd, initrd_fd, cmdline_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) cmdline_len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) ret = machine_kexec_prepare(image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * Some architecture(like S390) may touch the crash memory before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * machine_kexec_prepare(), we must copy vmcoreinfo data after it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) ret = kimage_crash_copy_vmcoreinfo(image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) ret = kexec_calculate_store_digests(image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) for (i = 0; i < image->nr_segments; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) struct kexec_segment *ksegment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) ksegment = &image->segment[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) pr_debug("Loading segment %d: buf=0x%p bufsz=0x%zx mem=0x%lx memsz=0x%zx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) i, ksegment->buf, ksegment->bufsz, ksegment->mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) ksegment->memsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) ret = kimage_load_segment(image, &image->segment[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) kimage_terminate(image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) ret = machine_kexec_post_load(image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * Free up any temporary buffers allocated which are not needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * after image has been loaded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) kimage_file_post_load_cleanup(image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) exchange:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) image = xchg(dest_image, image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if ((flags & KEXEC_FILE_ON_CRASH) && kexec_crash_image)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) arch_kexec_protect_crashkres();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) mutex_unlock(&kexec_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) kimage_free(image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) static int locate_mem_hole_top_down(unsigned long start, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct kexec_buf *kbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct kimage *image = kbuf->image;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) unsigned long temp_start, temp_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) temp_end = min(end, kbuf->buf_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) temp_start = temp_end - kbuf->memsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /* align down start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) temp_start = temp_start & (~(kbuf->buf_align - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (temp_start < start || temp_start < kbuf->buf_min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) temp_end = temp_start + kbuf->memsz - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * Make sure this does not conflict with any of existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * segments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (kimage_is_destination_range(image, temp_start, temp_end)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) temp_start = temp_start - PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /* We found a suitable memory range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) } while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /* If we are here, we found a suitable memory range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) kbuf->mem = temp_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) /* Success, stop navigating through remaining System RAM ranges */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) static int locate_mem_hole_bottom_up(unsigned long start, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct kexec_buf *kbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct kimage *image = kbuf->image;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) unsigned long temp_start, temp_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) temp_start = max(start, kbuf->buf_min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) temp_start = ALIGN(temp_start, kbuf->buf_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) temp_end = temp_start + kbuf->memsz - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (temp_end > end || temp_end > kbuf->buf_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * Make sure this does not conflict with any of existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * segments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (kimage_is_destination_range(image, temp_start, temp_end)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) temp_start = temp_start + PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /* We found a suitable memory range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) } while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) /* If we are here, we found a suitable memory range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) kbuf->mem = temp_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /* Success, stop navigating through remaining System RAM ranges */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) static int locate_mem_hole_callback(struct resource *res, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct kexec_buf *kbuf = (struct kexec_buf *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) u64 start = res->start, end = res->end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) unsigned long sz = end - start + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) /* Returning 0 will take to next memory range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /* Don't use memory that will be detected and handled by a driver. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (res->flags & IORESOURCE_SYSRAM_DRIVER_MANAGED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (sz < kbuf->memsz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (end < kbuf->buf_min || start > kbuf->buf_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * Allocate memory top down with-in ram range. Otherwise bottom up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (kbuf->top_down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) return locate_mem_hole_top_down(start, end, kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return locate_mem_hole_bottom_up(start, end, kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) #ifdef CONFIG_ARCH_KEEP_MEMBLOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) static int kexec_walk_memblock(struct kexec_buf *kbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) int (*func)(struct resource *, void *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) u64 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) phys_addr_t mstart, mend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct resource res = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (kbuf->image->type == KEXEC_TYPE_CRASH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return func(&crashk_res, kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (kbuf->top_down) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) for_each_free_mem_range_reverse(i, NUMA_NO_NODE, MEMBLOCK_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) &mstart, &mend, NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * In memblock, end points to the first byte after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * range while in kexec, end points to the last byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * in the range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) res.start = mstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) res.end = mend - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) ret = func(&res, kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) &mstart, &mend, NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * In memblock, end points to the first byte after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * range while in kexec, end points to the last byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * in the range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) res.start = mstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) res.end = mend - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) ret = func(&res, kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) static int kexec_walk_memblock(struct kexec_buf *kbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) int (*func)(struct resource *, void *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * kexec_walk_resources - call func(data) on free memory regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * @kbuf: Context info for the search. Also passed to @func.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * @func: Function to call for each memory region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * Return: The memory walk will stop when func returns a non-zero value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * and that value will be returned. If all free regions are visited without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * func returning non-zero, then zero will be returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) static int kexec_walk_resources(struct kexec_buf *kbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) int (*func)(struct resource *, void *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (kbuf->image->type == KEXEC_TYPE_CRASH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return walk_iomem_res_desc(crashk_res.desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) crashk_res.start, crashk_res.end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) kbuf, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) return walk_system_ram_res(0, ULONG_MAX, kbuf, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * kexec_locate_mem_hole - find free memory for the purgatory or the next kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * @kbuf: Parameters for the memory search.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * On success, kbuf->mem will have the start address of the memory region found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * Return: 0 on success, negative errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) int kexec_locate_mem_hole(struct kexec_buf *kbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /* Arch knows where to place */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (kbuf->mem != KEXEC_BUF_MEM_UNKNOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (!IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) ret = kexec_walk_resources(kbuf, locate_mem_hole_callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) ret = kexec_walk_memblock(kbuf, locate_mem_hole_callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) return ret == 1 ? 0 : -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * arch_kexec_locate_mem_hole - Find free memory to place the segments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * @kbuf: Parameters for the memory search.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * On success, kbuf->mem will have the start address of the memory region found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * Return: 0 on success, negative errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) int __weak arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) return kexec_locate_mem_hole(kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * kexec_add_buffer - place a buffer in a kexec segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * @kbuf: Buffer contents and memory parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * This function assumes that kexec_mutex is held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * On successful return, @kbuf->mem will have the physical address of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * the buffer in memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * Return: 0 on success, negative errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) int kexec_add_buffer(struct kexec_buf *kbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) struct kexec_segment *ksegment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) /* Currently adding segment this way is allowed only in file mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (!kbuf->image->file_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (kbuf->image->nr_segments >= KEXEC_SEGMENT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * Make sure we are not trying to add buffer after allocating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * control pages. All segments need to be placed first before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * any control pages are allocated. As control page allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * logic goes through list of segments to make sure there are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * no destination overlaps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (!list_empty(&kbuf->image->control_pages)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) /* Ensure minimum alignment needed for segments. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) kbuf->memsz = ALIGN(kbuf->memsz, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) kbuf->buf_align = max(kbuf->buf_align, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) /* Walk the RAM ranges and allocate a suitable range for the buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) ret = arch_kexec_locate_mem_hole(kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) /* Found a suitable memory range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) ksegment = &kbuf->image->segment[kbuf->image->nr_segments];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) ksegment->kbuf = kbuf->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) ksegment->bufsz = kbuf->bufsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) ksegment->mem = kbuf->mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) ksegment->memsz = kbuf->memsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) kbuf->image->nr_segments++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /* Calculate and store the digest of segments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) static int kexec_calculate_store_digests(struct kimage *image)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) struct crypto_shash *tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) struct shash_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) int ret = 0, i, j, zero_buf_sz, sha_region_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) size_t desc_size, nullsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) char *digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) void *zero_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) struct kexec_sha_region *sha_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) struct purgatory_info *pi = &image->purgatory_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (!IS_ENABLED(CONFIG_ARCH_HAS_KEXEC_PURGATORY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) zero_buf = __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) zero_buf_sz = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) tfm = crypto_alloc_shash("sha256", 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (IS_ERR(tfm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) ret = PTR_ERR(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) desc = kzalloc(desc_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) goto out_free_tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) sha_region_sz = KEXEC_SEGMENT_MAX * sizeof(struct kexec_sha_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) sha_regions = vzalloc(sha_region_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) if (!sha_regions) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) goto out_free_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) desc->tfm = tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) ret = crypto_shash_init(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) goto out_free_sha_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) digest = kzalloc(SHA256_DIGEST_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (!digest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) goto out_free_sha_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) for (j = i = 0; i < image->nr_segments; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) struct kexec_segment *ksegment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) ksegment = &image->segment[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * Skip purgatory as it will be modified once we put digest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * info in purgatory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) if (ksegment->kbuf == pi->purgatory_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ret = crypto_shash_update(desc, ksegment->kbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) ksegment->bufsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * Assume rest of the buffer is filled with zero and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * update digest accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) nullsz = ksegment->memsz - ksegment->bufsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) while (nullsz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) unsigned long bytes = nullsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (bytes > zero_buf_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) bytes = zero_buf_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) ret = crypto_shash_update(desc, zero_buf, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) nullsz -= bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) sha_regions[j].start = ksegment->mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) sha_regions[j].len = ksegment->memsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) ret = crypto_shash_final(desc, digest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) goto out_free_digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha_regions",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) sha_regions, sha_region_sz, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) goto out_free_digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha256_digest",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) digest, SHA256_DIGEST_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) goto out_free_digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) out_free_digest:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) kfree(digest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) out_free_sha_regions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) vfree(sha_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) out_free_desc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) kfree(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) out_free_tfm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) kfree(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) #ifdef CONFIG_ARCH_HAS_KEXEC_PURGATORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * kexec_purgatory_setup_kbuf - prepare buffer to load purgatory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * @pi: Purgatory to be loaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * @kbuf: Buffer to setup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * Allocates the memory needed for the buffer. Caller is responsible to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * the memory after use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * Return: 0 on success, negative errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) static int kexec_purgatory_setup_kbuf(struct purgatory_info *pi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct kexec_buf *kbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) const Elf_Shdr *sechdrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) unsigned long bss_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) unsigned long bss_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) unsigned long align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) kbuf->buf_align = bss_align = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) kbuf->bufsz = bss_sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) for (i = 0; i < pi->ehdr->e_shnum; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (!(sechdrs[i].sh_flags & SHF_ALLOC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) align = sechdrs[i].sh_addralign;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (sechdrs[i].sh_type != SHT_NOBITS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (kbuf->buf_align < align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) kbuf->buf_align = align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) kbuf->bufsz = ALIGN(kbuf->bufsz, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) kbuf->bufsz += sechdrs[i].sh_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (bss_align < align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) bss_align = align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) bss_sz = ALIGN(bss_sz, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) bss_sz += sechdrs[i].sh_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) kbuf->bufsz = ALIGN(kbuf->bufsz, bss_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) kbuf->memsz = kbuf->bufsz + bss_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (kbuf->buf_align < bss_align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) kbuf->buf_align = bss_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) kbuf->buffer = vzalloc(kbuf->bufsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (!kbuf->buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) pi->purgatory_buf = kbuf->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) ret = kexec_add_buffer(kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) vfree(pi->purgatory_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) pi->purgatory_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * kexec_purgatory_setup_sechdrs - prepares the pi->sechdrs buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * @pi: Purgatory to be loaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * @kbuf: Buffer prepared to store purgatory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * Allocates the memory needed for the buffer. Caller is responsible to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * the memory after use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * Return: 0 on success, negative errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) static int kexec_purgatory_setup_sechdrs(struct purgatory_info *pi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) struct kexec_buf *kbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) unsigned long bss_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) Elf_Shdr *sechdrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * The section headers in kexec_purgatory are read-only. In order to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * have them modifiable make a temporary copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) sechdrs = vzalloc(array_size(sizeof(Elf_Shdr), pi->ehdr->e_shnum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (!sechdrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) memcpy(sechdrs, (void *)pi->ehdr + pi->ehdr->e_shoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) pi->ehdr->e_shnum * sizeof(Elf_Shdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) pi->sechdrs = sechdrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) bss_addr = kbuf->mem + kbuf->bufsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) kbuf->image->start = pi->ehdr->e_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) for (i = 0; i < pi->ehdr->e_shnum; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) unsigned long align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) void *src, *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (!(sechdrs[i].sh_flags & SHF_ALLOC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) align = sechdrs[i].sh_addralign;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (sechdrs[i].sh_type == SHT_NOBITS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) bss_addr = ALIGN(bss_addr, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) sechdrs[i].sh_addr = bss_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) bss_addr += sechdrs[i].sh_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) offset = ALIGN(offset, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if (sechdrs[i].sh_flags & SHF_EXECINSTR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) pi->ehdr->e_entry >= sechdrs[i].sh_addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) pi->ehdr->e_entry < (sechdrs[i].sh_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) + sechdrs[i].sh_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) kbuf->image->start -= sechdrs[i].sh_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) kbuf->image->start += kbuf->mem + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) src = (void *)pi->ehdr + sechdrs[i].sh_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) dst = pi->purgatory_buf + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) memcpy(dst, src, sechdrs[i].sh_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) sechdrs[i].sh_addr = kbuf->mem + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) sechdrs[i].sh_offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) offset += sechdrs[i].sh_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) static int kexec_apply_relocations(struct kimage *image)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) struct purgatory_info *pi = &image->purgatory_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) const Elf_Shdr *sechdrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) for (i = 0; i < pi->ehdr->e_shnum; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) const Elf_Shdr *relsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) const Elf_Shdr *symtab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) Elf_Shdr *section;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) relsec = sechdrs + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (relsec->sh_type != SHT_RELA &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) relsec->sh_type != SHT_REL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * For section of type SHT_RELA/SHT_REL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * ->sh_link contains section header index of associated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * symbol table. And ->sh_info contains section header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * index of section to which relocations apply.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (relsec->sh_info >= pi->ehdr->e_shnum ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) relsec->sh_link >= pi->ehdr->e_shnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) return -ENOEXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) section = pi->sechdrs + relsec->sh_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) symtab = sechdrs + relsec->sh_link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if (!(section->sh_flags & SHF_ALLOC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * symtab->sh_link contain section header index of associated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * string table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) if (symtab->sh_link >= pi->ehdr->e_shnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) /* Invalid section number? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * Respective architecture needs to provide support for applying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * relocations of type SHT_RELA/SHT_REL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) if (relsec->sh_type == SHT_RELA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) ret = arch_kexec_apply_relocations_add(pi, section,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) relsec, symtab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) else if (relsec->sh_type == SHT_REL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) ret = arch_kexec_apply_relocations(pi, section,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) relsec, symtab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * kexec_load_purgatory - Load and relocate the purgatory object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) * @image: Image to add the purgatory to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) * @kbuf: Memory parameters to use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) * Allocates the memory needed for image->purgatory_info.sechdrs and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * image->purgatory_info.purgatory_buf/kbuf->buffer. Caller is responsible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) * to free the memory after use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * Return: 0 on success, negative errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) int kexec_load_purgatory(struct kimage *image, struct kexec_buf *kbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) struct purgatory_info *pi = &image->purgatory_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) if (kexec_purgatory_size <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) pi->ehdr = (const Elf_Ehdr *)kexec_purgatory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) ret = kexec_purgatory_setup_kbuf(pi, kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) ret = kexec_purgatory_setup_sechdrs(pi, kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) goto out_free_kbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) ret = kexec_apply_relocations(image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) vfree(pi->sechdrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) pi->sechdrs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) out_free_kbuf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) vfree(pi->purgatory_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) pi->purgatory_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) * kexec_purgatory_find_symbol - find a symbol in the purgatory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * @pi: Purgatory to search in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * @name: Name of the symbol.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * Return: pointer to symbol in read-only symtab on success, NULL on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) static const Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) const Elf_Shdr *sechdrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) const Elf_Ehdr *ehdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) const Elf_Sym *syms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) const char *strtab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) int i, k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) if (!pi->ehdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) ehdr = pi->ehdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) sechdrs = (void *)ehdr + ehdr->e_shoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) for (i = 0; i < ehdr->e_shnum; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) if (sechdrs[i].sh_type != SHT_SYMTAB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) if (sechdrs[i].sh_link >= ehdr->e_shnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) /* Invalid strtab section number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) strtab = (void *)ehdr + sechdrs[sechdrs[i].sh_link].sh_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) syms = (void *)ehdr + sechdrs[i].sh_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) /* Go through symbols for a match */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) for (k = 0; k < sechdrs[i].sh_size/sizeof(Elf_Sym); k++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) if (ELF_ST_BIND(syms[k].st_info) != STB_GLOBAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (strcmp(strtab + syms[k].st_name, name) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) if (syms[k].st_shndx == SHN_UNDEF ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) syms[k].st_shndx >= ehdr->e_shnum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) pr_debug("Symbol: %s has bad section index %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) name, syms[k].st_shndx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) /* Found the symbol we are looking for */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) return &syms[k];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) struct purgatory_info *pi = &image->purgatory_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) const Elf_Sym *sym;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) Elf_Shdr *sechdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) sym = kexec_purgatory_find_symbol(pi, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (!sym)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) sechdr = &pi->sechdrs[sym->st_shndx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) * Returns the address where symbol will finally be loaded after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) * kexec_load_segment()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) return (void *)(sechdr->sh_addr + sym->st_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) * Get or set value of a symbol. If "get_value" is true, symbol value is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * returned in buf otherwise symbol value is set based on value in buf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) void *buf, unsigned int size, bool get_value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) struct purgatory_info *pi = &image->purgatory_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) const Elf_Sym *sym;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) Elf_Shdr *sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) char *sym_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) sym = kexec_purgatory_find_symbol(pi, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (!sym)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (sym->st_size != size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) pr_err("symbol %s size mismatch: expected %lu actual %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) name, (unsigned long)sym->st_size, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) sec = pi->sechdrs + sym->st_shndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (sec->sh_type == SHT_NOBITS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) pr_err("symbol %s is in a bss section. Cannot %s\n", name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) get_value ? "get" : "set");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) sym_buf = (char *)pi->purgatory_buf + sec->sh_offset + sym->st_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) if (get_value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) memcpy((void *)buf, sym_buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) memcpy((void *)sym_buf, buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) #endif /* CONFIG_ARCH_HAS_KEXEC_PURGATORY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) int crash_exclude_mem_range(struct crash_mem *mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) unsigned long long mstart, unsigned long long mend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) unsigned long long start, end, p_start, p_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) struct crash_mem_range temp_range = {0, 0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) for (i = 0; i < mem->nr_ranges; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) start = mem->ranges[i].start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) end = mem->ranges[i].end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) p_start = mstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) p_end = mend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (mstart > end || mend < start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) /* Truncate any area outside of range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (mstart < start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) p_start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) if (mend > end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) p_end = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) /* Found completely overlapping range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (p_start == start && p_end == end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) mem->ranges[i].start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) mem->ranges[i].end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) if (i < mem->nr_ranges - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) /* Shift rest of the ranges to left */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) for (j = i; j < mem->nr_ranges - 1; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) mem->ranges[j].start =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) mem->ranges[j+1].start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) mem->ranges[j].end =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) mem->ranges[j+1].end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * Continue to check if there are another overlapping ranges
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * from the current position because of shifting the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) * mem ranges.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) mem->nr_ranges--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) mem->nr_ranges--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) if (p_start > start && p_end < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) /* Split original range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) mem->ranges[i].end = p_start - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) temp_range.start = p_end + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) temp_range.end = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) } else if (p_start != start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) mem->ranges[i].end = p_start - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) mem->ranges[i].start = p_end + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) /* If a split happened, add the split to array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (!temp_range.end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) /* Split happened */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) if (i == mem->max_nr_ranges - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) /* Location where new range should go */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) j = i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (j < mem->nr_ranges) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) /* Move over all ranges one slot towards the end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) for (i = mem->nr_ranges - 1; i >= j; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) mem->ranges[i + 1] = mem->ranges[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) mem->ranges[j].start = temp_range.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) mem->ranges[j].end = temp_range.end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) mem->nr_ranges++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) void **addr, unsigned long *sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) Elf64_Ehdr *ehdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) Elf64_Phdr *phdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) unsigned char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) unsigned int cpu, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) unsigned long long notes_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) unsigned long mstart, mend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) /* extra phdr for vmcoreinfo ELF note */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) nr_phdr = nr_cpus + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) nr_phdr += mem->nr_ranges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) * area (for example, ffffffff80000000 - ffffffffa0000000 on x86_64).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) * I think this is required by tools like gdb. So same physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) * memory will be mapped in two ELF headers. One will contain kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) * text virtual addresses and other will have __va(physical) addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) nr_phdr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) buf = vzalloc(elf_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) ehdr = (Elf64_Ehdr *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) phdr = (Elf64_Phdr *)(ehdr + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) ehdr->e_ident[EI_CLASS] = ELFCLASS64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) ehdr->e_ident[EI_VERSION] = EV_CURRENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) ehdr->e_ident[EI_OSABI] = ELF_OSABI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) ehdr->e_type = ET_CORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) ehdr->e_machine = ELF_ARCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) ehdr->e_version = EV_CURRENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) ehdr->e_phoff = sizeof(Elf64_Ehdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) ehdr->e_ehsize = sizeof(Elf64_Ehdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) ehdr->e_phentsize = sizeof(Elf64_Phdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) /* Prepare one phdr of type PT_NOTE for each present CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) for_each_present_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) phdr->p_type = PT_NOTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) phdr->p_offset = phdr->p_paddr = notes_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) (ehdr->e_phnum)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) phdr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) /* Prepare one PT_NOTE header for vmcoreinfo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) phdr->p_type = PT_NOTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) (ehdr->e_phnum)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) phdr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) /* Prepare PT_LOAD type program header for kernel text region */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) if (kernel_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) phdr->p_type = PT_LOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) phdr->p_flags = PF_R|PF_W|PF_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) phdr->p_vaddr = (unsigned long) _text;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) phdr->p_filesz = phdr->p_memsz = _end - _text;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) ehdr->e_phnum++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) phdr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) /* Go through all the ranges in mem->ranges[] and prepare phdr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) for (i = 0; i < mem->nr_ranges; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) mstart = mem->ranges[i].start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) mend = mem->ranges[i].end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) phdr->p_type = PT_LOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) phdr->p_flags = PF_R|PF_W|PF_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) phdr->p_offset = mstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) phdr->p_paddr = mstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) phdr->p_vaddr = (unsigned long) __va(mstart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) phdr->p_align = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) ehdr->e_phnum++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) pr_debug("Crash PT_LOAD ELF header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) ehdr->e_phnum, phdr->p_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) phdr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) *addr = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) *sz = elf_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }