^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * ELF loader for kexec_file_load system call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright IBM Corp. 2018
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kexec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/ipl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) static int kexec_file_add_kernel_elf(struct kimage *image,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) struct s390_load_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct kexec_buf buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) const Elf_Ehdr *ehdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) const Elf_Phdr *phdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) Elf_Addr entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) void *kernel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) kernel = image->kernel_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) ehdr = (Elf_Ehdr *)kernel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) buf.image = image;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) if (image->type == KEXEC_TYPE_CRASH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) entry = STARTUP_KDUMP_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) entry = ehdr->e_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) phdr = (void *)ehdr + ehdr->e_phoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) if (phdr->p_type != PT_LOAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) buf.buffer = kernel + phdr->p_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) buf.bufsz = phdr->p_filesz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) buf.mem = ALIGN(phdr->p_paddr, phdr->p_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (image->type == KEXEC_TYPE_CRASH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) buf.mem += crashk_res.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) buf.memsz = phdr->p_memsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) data->memsz = ALIGN(data->memsz, phdr->p_align) + buf.memsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (entry - phdr->p_paddr < phdr->p_memsz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) data->kernel_buf = buf.buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) data->kernel_mem = buf.mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) data->parm = buf.buffer + PARMAREA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) ipl_report_add_component(data->report, &buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) IPL_RB_COMPONENT_FLAG_SIGNED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) IPL_RB_COMPONENT_FLAG_VERIFIED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) IPL_RB_CERT_UNKNOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) ret = kexec_add_buffer(&buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return data->memsz ? 0 : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static void *s390_elf_load(struct kimage *image,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) char *kernel, unsigned long kernel_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) char *initrd, unsigned long initrd_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) char *cmdline, unsigned long cmdline_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) const Elf_Ehdr *ehdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) const Elf_Phdr *phdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /* image->fobs->probe already checked for valid ELF magic number. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) ehdr = (Elf_Ehdr *)kernel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (ehdr->e_type != ET_EXEC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) ehdr->e_ident[EI_CLASS] != ELFCLASS64 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) !elf_check_arch(ehdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (!ehdr->e_phnum || ehdr->e_phentsize != sizeof(Elf_Phdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) size = ehdr->e_ehsize + ehdr->e_phoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) size += ehdr->e_phentsize * ehdr->e_phnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (size > kernel_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) phdr = (void *)ehdr + ehdr->e_phoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) size = ALIGN(size, phdr->p_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (phdr->p_type == PT_INTERP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (phdr->p_offset > kernel_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) size += ALIGN(phdr->p_filesz, phdr->p_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (size > kernel_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return kexec_file_add_components(image, kexec_file_add_kernel_elf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static int s390_elf_probe(const char *buf, unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) const Elf_Ehdr *ehdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (len < sizeof(Elf_Ehdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return -ENOEXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) ehdr = (Elf_Ehdr *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* Only check the ELF magic number here and do proper validity check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * in the loader. Any check here that fails would send the erroneous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * ELF file to the image loader that does not care what it gets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * (Most likely) causing behavior not intended by the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return -ENOEXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) const struct kexec_file_ops s390_kexec_elf_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) .probe = s390_elf_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) .load = s390_elf_load,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #ifdef CONFIG_KEXEC_SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) .verify_sig = s390_verify_sig,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #endif /* CONFIG_KEXEC_SIG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) };