^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Helper routines for building identity mapping page tables. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * included by both the compressed kernel and the regular kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) unsigned long addr, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) addr &= PMD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) for (; addr < end; addr += PMD_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) pmd_t *pmd = pmd_page + pmd_index(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) if (pmd_present(*pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) set_pmd(pmd, __pmd((addr - info->offset) | info->page_flag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) unsigned long addr, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) for (; addr < end; addr = next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) pud_t *pud = pud_page + pud_index(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) next = (addr & PUD_MASK) + PUD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) if (next > end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) next = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) if (info->direct_gbpages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) pud_t pudval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (pud_present(*pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) addr &= PUD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) pudval = __pud((addr - info->offset) | info->page_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) set_pud(pud, pudval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if (pud_present(*pud)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) pmd = pmd_offset(pud, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) ident_pmd_init(info, pmd, addr, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) pmd = (pmd_t *)info->alloc_pgt_page(info->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (!pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) ident_pmd_init(info, pmd, addr, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) set_pud(pud, __pud(__pa(pmd) | info->kernpg_flag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) unsigned long addr, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) for (; addr < end; addr = next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) p4d_t *p4d = p4d_page + p4d_index(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) next = (addr & P4D_MASK) + P4D_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (next > end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) next = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (p4d_present(*p4d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) pud = pud_offset(p4d, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) result = ident_pud_init(info, pud, addr, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) pud = (pud_t *)info->alloc_pgt_page(info->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (!pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) result = ident_pud_init(info, pud, addr, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) unsigned long pstart, unsigned long pend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) unsigned long addr = pstart + info->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unsigned long end = pend + info->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* Set the default pagetable flags if not supplied */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (!info->kernpg_flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) info->kernpg_flag = _KERNPG_TABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* Filter out unsupported __PAGE_KERNEL_* bits: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) info->kernpg_flag &= __default_kernel_pte_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) for (; addr < end; addr = next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) pgd_t *pgd = pgd_page + pgd_index(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) next = (addr & PGDIR_MASK) + PGDIR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (next > end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) next = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (pgd_present(*pgd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) p4d = p4d_offset(pgd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) result = ident_p4d_init(info, p4d, addr, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) p4d = (p4d_t *)info->alloc_pgt_page(info->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (!p4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) result = ident_p4d_init(info, p4d, addr, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (pgtable_l5_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * With p4d folded, pgd is equal to p4d.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * The pgd entry has to point to the pud page table in this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) pud_t *pud = pud_offset(p4d, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }