^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Hibernation support specific for i386 - temporary page tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2006 Rafael J. Wysocki <rjw@sisk.pl>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/suspend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/mmzone.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/suspend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /* Pointer to the temporary resume page tables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) pgd_t *resume_pg_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /* The following three functions are based on the analogous code in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * arch/x86/mm/init_32.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * Create a middle page table on a resume-safe page and put a pointer to it in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * the given global directory entry. This only returns the gd entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * in non-PAE compilation mode, since the middle layer is folded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static pmd_t *resume_one_md_table_init(pgd_t *pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) pmd_t *pmd_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #ifdef CONFIG_X86_PAE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) pmd_table = (pmd_t *)get_safe_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) if (!pmd_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) p4d = p4d_offset(pgd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) pud = pud_offset(p4d, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) BUG_ON(pmd_table != pmd_offset(pud, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) p4d = p4d_offset(pgd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) pud = pud_offset(p4d, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) pmd_table = pmd_offset(pud, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return pmd_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * Create a page table on a resume-safe page and place a pointer to it in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * a middle page directory entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static pte_t *resume_one_page_table_init(pmd_t *pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (pmd_none(*pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (!page_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) BUG_ON(page_table != pte_offset_kernel(pmd, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return page_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return pte_offset_kernel(pmd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * This maps the physical memory to kernel virtual address space, a total
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * of max_low_pfn pages, by creating page tables starting from address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * PAGE_OFFSET. The page tables are allocated out of resume-safe pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static int resume_physical_mapping_init(pgd_t *pgd_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) unsigned long pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) int pgd_idx, pmd_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) pgd_idx = pgd_index(PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) pgd = pgd_base + pgd_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) pfn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) pmd = resume_one_md_table_init(pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (!pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (pfn >= max_low_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (pfn >= max_low_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* Map with big pages if possible, otherwise create
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * normal page tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * NOTE: We can mark everything as executable here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (boot_cpu_has(X86_FEATURE_PSE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) pfn += PTRS_PER_PTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) pte_t *max_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) pte = resume_one_page_table_init(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (!pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) max_pte = pte + PTRS_PER_PTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) for (; pte < max_pte; pte++, pfn++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (pfn >= max_low_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #ifdef CONFIG_X86_PAE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* Init entries of the first-level page table to the zero page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) for (i = 0; i < PTRS_PER_PGD; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) set_pgd(pg_dir + i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static int set_up_temporary_text_mapping(pgd_t *pgd_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) pgd = pgd_base + pgd_index(restore_jump_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) pmd = resume_one_md_table_init(pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (!pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (boot_cpu_has(X86_FEATURE_PSE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) set_pmd(pmd + pmd_index(restore_jump_address),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) __pmd((jump_address_phys & PMD_MASK) | pgprot_val(PAGE_KERNEL_LARGE_EXEC)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) pte = resume_one_page_table_init(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (!pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) set_pte(pte + pte_index(restore_jump_address),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) __pte((jump_address_phys & PAGE_MASK) | pgprot_val(PAGE_KERNEL_EXEC)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) asmlinkage int swsusp_arch_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (!resume_pg_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) resume_init_first_level_page_table(resume_pg_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) error = set_up_temporary_text_mapping(resume_pg_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) error = resume_physical_mapping_init(resume_pg_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) temp_pgt = __pa(resume_pg_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) error = relocate_restore_code();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* We have got enough memory and from now on we cannot recover */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) restore_image();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }