^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2020 Western Digital Corporation or its affiliates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Adapted from arch/arm64/kernel/efi.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/efi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/efi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/pgtable-bits.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * executable, everything else can be mapped with the XN bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * set. Also take the new (optional) RO/XP bits into account.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static __init pgprot_t efimem_to_pgprot_map(efi_memory_desc_t *md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) u64 attr = md->attribute;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) u32 type = md->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) if (type == EFI_MEMORY_MAPPED_IO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) return PAGE_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /* R-- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) (EFI_MEMORY_XP | EFI_MEMORY_RO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) return PAGE_KERNEL_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* R-X */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) if (attr & EFI_MEMORY_RO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) return PAGE_KERNEL_READ_EXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /* RW- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (((attr & (EFI_MEMORY_RP | EFI_MEMORY_WP | EFI_MEMORY_XP)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) EFI_MEMORY_XP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) type != EFI_RUNTIME_SERVICES_CODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) return PAGE_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* RWX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return PAGE_KERNEL_EXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) pgprot_t prot = __pgprot(pgprot_val(efimem_to_pgprot_map(md)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) ~(_PAGE_GLOBAL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /* RISC-V maps one page at a time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) for (i = 0; i < md->num_pages; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) create_pgd_mapping(mm->pgd, md->virt_addr + i * PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) md->phys_addr + i * PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) PAGE_SIZE, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) efi_memory_desc_t *md = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) pte_t pte = READ_ONCE(*ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (md->attribute & EFI_MEMORY_RO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) val = pte_val(pte) & ~_PAGE_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) val = pte_val(pte) | _PAGE_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) pte = __pte(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (md->attribute & EFI_MEMORY_XP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) val = pte_val(pte) & ~_PAGE_EXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) pte = __pte(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) set_pte(ptep, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) int __init efi_set_mapping_permissions(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) efi_memory_desc_t *md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) md->type != EFI_RUNTIME_SERVICES_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * Calling apply_to_page_range() is only safe on regions that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * guaranteed to be mapped down to pages. Since we are only called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * for regions that have been mapped using efi_create_mapping() above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * (and this is checked by the generic Memory Attributes table parsing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * routines), there is no need to check that again here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return apply_to_page_range(mm, md->virt_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) md->num_pages << EFI_PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) set_permissions, md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }