^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/kernel/power/snapshot.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This file provides system snapshot/restore functionality for swsusp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define pr_fmt(fmt) "PM: hibernation: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/version.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/suspend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/nmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/console.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/ktime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/set_memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include "power.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static bool hibernate_restore_protection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static bool hibernate_restore_protection_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) void enable_restore_image_protection(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) hibernate_restore_protection = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static inline void hibernate_restore_protection_begin(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) hibernate_restore_protection_active = hibernate_restore_protection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static inline void hibernate_restore_protection_end(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) hibernate_restore_protection_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static inline void hibernate_restore_protect_page(void *page_address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (hibernate_restore_protection_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) set_memory_ro((unsigned long)page_address, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static inline void hibernate_restore_unprotect_page(void *page_address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (hibernate_restore_protection_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) set_memory_rw((unsigned long)page_address, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static inline void hibernate_restore_protection_begin(void) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static inline void hibernate_restore_protection_end(void) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static inline void hibernate_restore_protect_page(void *page_address) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static inline void hibernate_restore_unprotect_page(void *page_address) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #endif /* CONFIG_STRICT_KERNEL_RWX && CONFIG_ARCH_HAS_SET_MEMORY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static int swsusp_page_is_free(struct page *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static void swsusp_set_page_forbidden(struct page *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static void swsusp_unset_page_forbidden(struct page *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * Number of bytes to reserve for memory allocations made by device drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * cause image creation to fail (tunable via /sys/power/reserved_size).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned long reserved_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) void __init hibernate_reserved_size_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) reserved_size = SPARE_PAGES * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * Preferred image size in bytes (tunable via /sys/power/image_size).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * When it is set to N, swsusp will do its best to ensure the image
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * size will not exceed N bytes, but if that is impossible, it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * try to create the smallest image possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unsigned long image_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) void __init hibernate_image_size_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) image_size = ((totalram_pages() * 2) / 5) * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * List of PBEs needed for restoring the pages that were allocated before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * the suspend and included in the suspend image, but have also been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * allocated by the "resume" kernel, so their contents cannot be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * directly to their "original" page frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct pbe *restore_pblist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* struct linked_page is used to build chains of pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct linked_page {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct linked_page *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) char data[LINKED_PAGE_DATA_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * List of "safe" pages (ie. pages that were not used by the image kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * before hibernation) that may be used as temporary storage for image kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * memory contents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static struct linked_page *safe_pages_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* Pointer to an auxiliary buffer (1 page) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static void *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define PG_ANY 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define PG_SAFE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define PG_UNSAFE_CLEAR 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define PG_UNSAFE_KEEP 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static unsigned int allocated_unsafe_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * get_image_page - Allocate a page for a hibernation image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * @gfp_mask: GFP mask for the allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * @safe_needed: Get pages that were not used before hibernation (restore only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * During image restoration, for storing the PBE list and the image data, we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * only use memory pages that do not conflict with the pages used before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * hibernation. The "unsafe" pages have PageNosaveFree set and we count them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * using allocated_unsafe_pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * Each allocated image page is marked as PageNosave and PageNosaveFree so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * swsusp_free() can release it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static void *get_image_page(gfp_t gfp_mask, int safe_needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) void *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) res = (void *)get_zeroed_page(gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (safe_needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) while (res && swsusp_page_is_free(virt_to_page(res))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /* The page is unsafe, mark it for swsusp_free() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) swsusp_set_page_forbidden(virt_to_page(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) allocated_unsafe_pages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) res = (void *)get_zeroed_page(gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) swsusp_set_page_forbidden(virt_to_page(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) swsusp_set_page_free(virt_to_page(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static void *__get_safe_page(gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (safe_pages_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) void *ret = safe_pages_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) safe_pages_list = safe_pages_list->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) memset(ret, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return get_image_page(gfp_mask, PG_SAFE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) unsigned long get_safe_page(gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return (unsigned long)__get_safe_page(gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static struct page *alloc_image_page(gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) page = alloc_page(gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) swsusp_set_page_forbidden(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) swsusp_set_page_free(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static void recycle_safe_page(void *page_address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct linked_page *lp = page_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) lp->next = safe_pages_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) safe_pages_list = lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * free_image_page - Free a page allocated for hibernation image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * @addr: Address of the page to free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * The page to free should have been allocated by get_image_page() (page flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * set by it are affected).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static inline void free_image_page(void *addr, int clear_nosave_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) BUG_ON(!virt_addr_valid(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) page = virt_to_page(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) swsusp_unset_page_forbidden(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (clear_nosave_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) swsusp_unset_page_free(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) __free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static inline void free_list_of_pages(struct linked_page *list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) int clear_page_nosave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) while (list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct linked_page *lp = list->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) free_image_page(list, clear_page_nosave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) list = lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * struct chain_allocator is used for allocating small objects out of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * a linked list of pages called 'the chain'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * The chain grows each time when there is no room for a new object in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * the current page. The allocated objects cannot be freed individually.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * It is only possible to free them all at once, by freeing the entire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * chain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * NOTE: The chain allocator may be inefficient if the allocated objects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * are not much smaller than PAGE_SIZE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct chain_allocator {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct linked_page *chain; /* the chain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) unsigned int used_space; /* total size of objects allocated out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) of the current page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) gfp_t gfp_mask; /* mask for allocating pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) int safe_needed; /* if set, only "safe" pages are allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) int safe_needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) ca->chain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) ca->used_space = LINKED_PAGE_DATA_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) ca->gfp_mask = gfp_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) ca->safe_needed = safe_needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) void *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct linked_page *lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) get_image_page(ca->gfp_mask, PG_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (!lp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) lp->next = ca->chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) ca->chain = lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) ca->used_space = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) ret = ca->chain->data + ca->used_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) ca->used_space += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * Data types related to memory bitmaps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * Memory bitmap is a structure consiting of many linked lists of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * objects. The main list's elements are of type struct zone_bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * and each of them corresonds to one zone. For each zone bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * object there is a list of objects of type struct bm_block that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * represent each blocks of bitmap in which information is stored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * struct memory_bitmap contains a pointer to the main list of zone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * bitmap objects, a struct bm_position used for browsing the bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * and a pointer to the list of pages used for allocating all of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * zone bitmap objects and bitmap block objects.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * NOTE: It has to be possible to lay out the bitmap in memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * using only allocations of order 0. Additionally, the bitmap is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * designed to work with arbitrary number of zones (this is over the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * top for now, but let's avoid making unnecessary assumptions ;-).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * struct zone_bitmap contains a pointer to a list of bitmap block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * objects and a pointer to the bitmap block object that has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * most recently used for setting bits. Additionally, it contains the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * PFNs that correspond to the start and end of the represented zone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * struct bm_block contains a pointer to the memory page in which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * information is stored (in the form of a block of bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * It also contains the pfns that correspond to the start and end of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * the represented memory area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * The memory bitmap is organized as a radix tree to guarantee fast random
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * access to the bits. There is one radix tree for each zone (as returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * from create_mem_extents).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * One radix tree is represented by one struct mem_zone_bm_rtree. There are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * two linked lists for the nodes of the tree, one for the inner nodes and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * one for the leave nodes. The linked leave nodes are used for fast linear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * access of the memory bitmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * The struct rtree_node represents one node of the radix tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) #define BM_END_OF_MAP (~0UL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) #define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) #define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) #define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * struct rtree_node is a wrapper struct to link the nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * of the rtree together for easy linear iteration over
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * bits and easy freeing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct rtree_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) unsigned long *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * struct mem_zone_bm_rtree represents a bitmap used for one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * populated memory zone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct mem_zone_bm_rtree {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct list_head list; /* Link Zones together */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct list_head nodes; /* Radix Tree inner nodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct list_head leaves; /* Radix Tree leaves */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) unsigned long start_pfn; /* Zone start page frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) unsigned long end_pfn; /* Zone end page frame + 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct rtree_node *rtree; /* Radix Tree Root */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) int levels; /* Number of Radix Tree Levels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) unsigned int blocks; /* Number of Bitmap Blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /* strcut bm_position is used for browsing memory bitmaps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct bm_position {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct mem_zone_bm_rtree *zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct rtree_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) unsigned long node_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) int node_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct memory_bitmap {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct list_head zones;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct linked_page *p_list; /* list of pages used to store zone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) bitmap objects and bitmap block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) objects */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct bm_position cur; /* most recently used bit position */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) /* Functions that operate on memory bitmaps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) #define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) #if BITS_PER_LONG == 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) #define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * alloc_rtree_node - Allocate a new node and add it to the radix tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * This function is used to allocate inner nodes as well as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * leave nodes of the radix tree. It also adds the node to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * corresponding linked list passed in by the *list parameter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct chain_allocator *ca,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) struct rtree_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) node = chain_alloc(ca, sizeof(struct rtree_node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) node->data = get_image_page(gfp_mask, safe_needed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (!node->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) list_add_tail(&node->list, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * add_rtree_block - Add a new leave node to the radix tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * The leave nodes need to be allocated in order to keep the leaves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * linked list in order. This is guaranteed by the zone->blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) int safe_needed, struct chain_allocator *ca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct rtree_node *node, *block, **dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) unsigned int levels_needed, block_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) block_nr = zone->blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) levels_needed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /* How many levels do we need for this block nr? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) while (block_nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) levels_needed += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) block_nr >>= BM_RTREE_LEVEL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /* Make sure the rtree has enough levels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) for (i = zone->levels; i < levels_needed; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) node = alloc_rtree_node(gfp_mask, safe_needed, ca,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) &zone->nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) node->data[0] = (unsigned long)zone->rtree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) zone->rtree = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) zone->levels += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) /* Allocate new block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (!block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /* Now walk the rtree to insert the block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) node = zone->rtree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) dst = &zone->rtree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) block_nr = zone->blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) for (i = zone->levels; i > 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (!node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) node = alloc_rtree_node(gfp_mask, safe_needed, ca,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) &zone->nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) *dst = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) index &= BM_RTREE_LEVEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) dst = (struct rtree_node **)&((*dst)->data[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) node = *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) zone->blocks += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) *dst = block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) int clear_nosave_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * create_zone_bm_rtree - Create a radix tree for one zone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * Allocated the mem_zone_bm_rtree structure and initializes it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * This function also allocated and builds the radix tree for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * zone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) int safe_needed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) struct chain_allocator *ca,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct mem_zone_bm_rtree *zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) unsigned int i, nr_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) unsigned long pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) pages = end - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (!zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) INIT_LIST_HEAD(&zone->nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) INIT_LIST_HEAD(&zone->leaves);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) zone->start_pfn = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) zone->end_pfn = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) for (i = 0; i < nr_blocks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) return zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * free_zone_bm_rtree - Free the memory of the radix tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * Free all node pages of the radix tree. The mem_zone_bm_rtree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * structure itself is not freed here nor are the rtree_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * structs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) int clear_nosave_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) struct rtree_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) list_for_each_entry(node, &zone->nodes, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) free_image_page(node->data, clear_nosave_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) list_for_each_entry(node, &zone->leaves, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) free_image_page(node->data, clear_nosave_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) static void memory_bm_position_reset(struct memory_bitmap *bm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) bm->cur.node = list_entry(bm->cur.zone->leaves.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct rtree_node, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) bm->cur.node_pfn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) bm->cur.node_bit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct mem_extent {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) struct list_head hook;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) unsigned long start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) unsigned long end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * free_mem_extents - Free a list of memory extents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * @list: List of extents to free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) static void free_mem_extents(struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) struct mem_extent *ext, *aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) list_for_each_entry_safe(ext, aux, list, hook) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) list_del(&ext->hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) kfree(ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * create_mem_extents - Create a list of memory extents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * @list: List to put the extents into.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * @gfp_mask: Mask to use for memory allocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * The extents represent contiguous ranges of PFNs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) struct zone *zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) INIT_LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) for_each_populated_zone(zone) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) unsigned long zone_start, zone_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct mem_extent *ext, *cur, *aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) zone_start = zone->zone_start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) zone_end = zone_end_pfn(zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) list_for_each_entry(ext, list, hook)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (zone_start <= ext->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (&ext->hook == list || zone_end < ext->start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /* New extent is necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) struct mem_extent *new_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (!new_ext) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) free_mem_extents(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) new_ext->start = zone_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) new_ext->end = zone_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) list_add_tail(&new_ext->hook, &ext->hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) /* Merge this zone's range of PFNs with the existing one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (zone_start < ext->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) ext->start = zone_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (zone_end > ext->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) ext->end = zone_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) /* More merging may be possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) cur = ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) list_for_each_entry_safe_continue(cur, aux, list, hook) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (zone_end < cur->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (zone_end < cur->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) ext->end = cur->end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) list_del(&cur->hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) kfree(cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * memory_bm_create - Allocate memory for a memory bitmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) int safe_needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) struct chain_allocator ca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) struct list_head mem_extents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) struct mem_extent *ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) chain_init(&ca, gfp_mask, safe_needed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) INIT_LIST_HEAD(&bm->zones);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) error = create_mem_extents(&mem_extents, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) list_for_each_entry(ext, &mem_extents, hook) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) struct mem_zone_bm_rtree *zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) ext->start, ext->end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (!zone) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) goto Error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) list_add_tail(&zone->list, &bm->zones);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) bm->p_list = ca.chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) memory_bm_position_reset(bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) Exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) free_mem_extents(&mem_extents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) Error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) bm->p_list = ca.chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) memory_bm_free(bm, PG_UNSAFE_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) goto Exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * memory_bm_free - Free memory occupied by the memory bitmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * @bm: Memory bitmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) struct mem_zone_bm_rtree *zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) list_for_each_entry(zone, &bm->zones, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) free_zone_bm_rtree(zone, clear_nosave_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) free_list_of_pages(bm->p_list, clear_nosave_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) INIT_LIST_HEAD(&bm->zones);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * Find the bit in memory bitmap @bm that corresponds to the given PFN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * Walk the radix tree to find the page containing the bit that represents @pfn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * and return the position of the bit in @addr and @bit_nr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) void **addr, unsigned int *bit_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) struct mem_zone_bm_rtree *curr, *zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) struct rtree_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) int i, block_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) zone = bm->cur.zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) goto zone_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) zone = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) /* Find the right zone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) list_for_each_entry(curr, &bm->zones, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) zone = curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (!zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) zone_found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * We have found the zone. Now walk the radix tree to find the leaf node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * for our PFN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * If the zone we wish to scan is the current zone and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * pfn falls into the current node then we do not need to walk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * the tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) node = bm->cur.node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) if (zone == bm->cur.zone &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) goto node_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) node = zone->rtree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) for (i = zone->levels; i > 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) index &= BM_RTREE_LEVEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) BUG_ON(node->data[index] == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) node = (struct rtree_node *)node->data[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) node_found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) /* Update last position */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) bm->cur.zone = zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) bm->cur.node = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) /* Set return values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) *addr = node->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) unsigned int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) error = memory_bm_find_bit(bm, pfn, &addr, &bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) BUG_ON(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) set_bit(bit, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) unsigned int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) error = memory_bm_find_bit(bm, pfn, &addr, &bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) set_bit(bit, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) unsigned int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) error = memory_bm_find_bit(bm, pfn, &addr, &bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) BUG_ON(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) clear_bit(bit, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) static void memory_bm_clear_current(struct memory_bitmap *bm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) bit = max(bm->cur.node_bit - 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) clear_bit(bit, bm->cur.node->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) unsigned int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) error = memory_bm_find_bit(bm, pfn, &addr, &bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) BUG_ON(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) return test_bit(bit, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) unsigned int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) return !memory_bm_find_bit(bm, pfn, &addr, &bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * rtree_next_node - Jump to the next leaf node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * Set the position to the beginning of the next node in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * memory bitmap. This is either the next node in the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * zone's radix tree or the first node in the radix tree of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * next zone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * Return true if there is a next node, false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) static bool rtree_next_node(struct memory_bitmap *bm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) bm->cur.node = list_entry(bm->cur.node->list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) struct rtree_node, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) bm->cur.node_pfn += BM_BITS_PER_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) bm->cur.node_bit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) touch_softlockup_watchdog();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) /* No more nodes, goto next zone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) bm->cur.zone = list_entry(bm->cur.zone->list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) struct mem_zone_bm_rtree, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) bm->cur.node = list_entry(bm->cur.zone->leaves.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) struct rtree_node, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) bm->cur.node_pfn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) bm->cur.node_bit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) /* No more zones */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * memory_bm_rtree_next_pfn - Find the next set bit in a memory bitmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * @bm: Memory bitmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * Starting from the last returned position this function searches for the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * set bit in @bm and returns the PFN represented by it. If no more bits are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * set, BM_END_OF_MAP is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * It is required to run memory_bm_position_reset() before the first call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * this function for the given memory bitmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) unsigned long bits, pfn, pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) bit = find_next_bit(bm->cur.node->data, bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) bm->cur.node_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (bit < bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) bm->cur.node_bit = bit + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) return pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) } while (rtree_next_node(bm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) return BM_END_OF_MAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * This structure represents a range of page frames the contents of which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * should not be saved during hibernation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) struct nosave_region {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) unsigned long start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) unsigned long end_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) static LIST_HEAD(nosave_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) struct rtree_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) list_for_each_entry(node, &zone->nodes, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) recycle_safe_page(node->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) list_for_each_entry(node, &zone->leaves, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) recycle_safe_page(node->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) static void memory_bm_recycle(struct memory_bitmap *bm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) struct mem_zone_bm_rtree *zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) struct linked_page *p_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) list_for_each_entry(zone, &bm->zones, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) recycle_zone_bm_rtree(zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) p_list = bm->p_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) while (p_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) struct linked_page *lp = p_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) p_list = lp->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) recycle_safe_page(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * register_nosave_region - Register a region of unsaveable memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * Register a range of page frames the contents of which should not be saved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * during hibernation (to be used in the early initialization code).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) void __init register_nosave_region(unsigned long start_pfn, unsigned long end_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) struct nosave_region *region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (start_pfn >= end_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (!list_empty(&nosave_regions)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) /* Try to extend the previous region (they should be sorted) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) region = list_entry(nosave_regions.prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) struct nosave_region, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (region->end_pfn == start_pfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) region->end_pfn = end_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) goto Report;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) /* This allocation cannot fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) region = memblock_alloc(sizeof(struct nosave_region),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (!region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) panic("%s: Failed to allocate %zu bytes\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) sizeof(struct nosave_region));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) region->start_pfn = start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) region->end_pfn = end_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) list_add_tail(®ion->list, &nosave_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) Report:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) (unsigned long long) start_pfn << PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * Set bits in this map correspond to the page frames the contents of which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * should not be saved during the suspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) static struct memory_bitmap *forbidden_pages_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) /* Set bits in this map correspond to free page frames. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) static struct memory_bitmap *free_pages_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * Each page frame allocated for creating the image is marked by setting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) void swsusp_set_page_free(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (free_pages_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) memory_bm_set_bit(free_pages_map, page_to_pfn(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) static int swsusp_page_is_free(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) return free_pages_map ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) void swsusp_unset_page_free(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (free_pages_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) static void swsusp_set_page_forbidden(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (forbidden_pages_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) int swsusp_page_is_forbidden(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) return forbidden_pages_map ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) static void swsusp_unset_page_forbidden(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (forbidden_pages_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * mark_nosave_pages - Mark pages that should not be saved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * @bm: Memory bitmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * Set the bits in @bm that correspond to the page frames the contents of which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * should not be saved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) static void mark_nosave_pages(struct memory_bitmap *bm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) struct nosave_region *region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) if (list_empty(&nosave_regions))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) list_for_each_entry(region, &nosave_regions, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) unsigned long pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) (unsigned long long) region->start_pfn << PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) ((unsigned long long) region->end_pfn << PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (pfn_valid(pfn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) * It is safe to ignore the result of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * mem_bm_set_bit_check() here, since we won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * touch the PFNs for which the error is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) * returned anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) mem_bm_set_bit_check(bm, pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * Create bitmaps needed for marking page frames that should not be saved and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * free page frames. The forbidden_pages_map and free_pages_map pointers are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * only modified if everything goes well, because we don't want the bits to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * touched before both bitmaps are set up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) int create_basic_memory_bitmaps(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) struct memory_bitmap *bm1, *bm2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (forbidden_pages_map && free_pages_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) BUG_ON(forbidden_pages_map || free_pages_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (!bm1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) goto Free_first_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (!bm2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) goto Free_first_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) goto Free_second_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) forbidden_pages_map = bm1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) free_pages_map = bm2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) mark_nosave_pages(forbidden_pages_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) pr_debug("Basic memory bitmaps created\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) Free_second_object:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) kfree(bm2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) Free_first_bitmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) memory_bm_free(bm1, PG_UNSAFE_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) Free_first_object:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) kfree(bm1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) * Free memory bitmaps allocated by create_basic_memory_bitmaps(). The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) * auxiliary pointers are necessary so that the bitmaps themselves are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) * referred to while they are being freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) void free_basic_memory_bitmaps(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) struct memory_bitmap *bm1, *bm2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) bm1 = forbidden_pages_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) bm2 = free_pages_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) forbidden_pages_map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) free_pages_map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) memory_bm_free(bm1, PG_UNSAFE_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) kfree(bm1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) memory_bm_free(bm2, PG_UNSAFE_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) kfree(bm2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) pr_debug("Basic memory bitmaps freed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) static void clear_or_poison_free_page(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) if (page_poisoning_enabled_static())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) __kernel_poison_pages(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) else if (want_init_on_free())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) clear_highpage(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) void clear_or_poison_free_pages(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) struct memory_bitmap *bm = free_pages_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) unsigned long pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) if (WARN_ON(!(free_pages_map)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) if (page_poisoning_enabled() || want_init_on_free()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) memory_bm_position_reset(bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) pfn = memory_bm_next_pfn(bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) while (pfn != BM_END_OF_MAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (pfn_valid(pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) clear_or_poison_free_page(pfn_to_page(pfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) pfn = memory_bm_next_pfn(bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) memory_bm_position_reset(bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) pr_info("free pages cleared after restore\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) * snapshot_additional_pages - Estimate the number of extra pages needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) * @zone: Memory zone to carry out the computation for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) * Estimate the number of additional pages needed for setting up a hibernation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) * image data structures for @zone (usually, the returned value is greater than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) * the exact number).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) unsigned int snapshot_additional_pages(struct zone *zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) unsigned int rtree, nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) LINKED_PAGE_DATA_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) while (nodes > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) rtree += nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) return 2 * rtree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) #ifdef CONFIG_HIGHMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) * count_free_highmem_pages - Compute the total number of free highmem pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * The returned number is system-wide.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) static unsigned int count_free_highmem_pages(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) struct zone *zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) unsigned int cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) for_each_populated_zone(zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) if (is_highmem(zone))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) cnt += zone_page_state(zone, NR_FREE_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * saveable_highmem_page - Check if a highmem page is saveable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) * Determine whether a highmem page should be included in a hibernation image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) * and it isn't part of a free chunk of pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if (!pfn_valid(pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) page = pfn_to_online_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) if (!page || page_zone(page) != zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) BUG_ON(!PageHighMem(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (PageReserved(page) || PageOffline(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) if (page_is_guard(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) * count_highmem_pages - Compute the total number of saveable highmem pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) static unsigned int count_highmem_pages(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) struct zone *zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) unsigned int n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) for_each_populated_zone(zone) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) unsigned long pfn, max_zone_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) if (!is_highmem(zone))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) mark_free_pages(zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) max_zone_pfn = zone_end_pfn(zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) if (saveable_highmem_page(zone, pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) #endif /* CONFIG_HIGHMEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) * saveable_page - Check if the given page is saveable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) * Determine whether a non-highmem page should be included in a hibernation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) * image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) * We should save the page if it isn't Nosave, and is not in the range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) * of pages statically defined as 'unsaveable', and it isn't part of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) * a free chunk of pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) static struct page *saveable_page(struct zone *zone, unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) if (!pfn_valid(pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) page = pfn_to_online_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (!page || page_zone(page) != zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) BUG_ON(PageHighMem(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (PageOffline(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (PageReserved(page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) if (page_is_guard(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) * count_data_pages - Compute the total number of saveable non-highmem pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) static unsigned int count_data_pages(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) struct zone *zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) unsigned long pfn, max_zone_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) unsigned int n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) for_each_populated_zone(zone) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) if (is_highmem(zone))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) mark_free_pages(zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) max_zone_pfn = zone_end_pfn(zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) if (saveable_page(zone, pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) * This is needed, because copy_page and memcpy are not usable for copying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) * task structs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) static inline void do_copy_page(long *dst, long *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) for (n = PAGE_SIZE / sizeof(long); n; n--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) *dst++ = *src++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) * safe_copy_page - Copy a page in a safe way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) * Check if the page we are going to copy is marked as present in the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) * page tables. This always is the case if CONFIG_DEBUG_PAGEALLOC or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) * CONFIG_ARCH_HAS_SET_DIRECT_MAP is not set. In that case kernel_page_present()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) * always returns 'true'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) static void safe_copy_page(void *dst, struct page *s_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) if (kernel_page_present(s_page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) do_copy_page(dst, page_address(s_page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) kernel_map_pages(s_page, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) do_copy_page(dst, page_address(s_page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) kernel_map_pages(s_page, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) #ifdef CONFIG_HIGHMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) return is_highmem(zone) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) struct page *s_page, *d_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) void *src, *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) s_page = pfn_to_page(src_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) d_page = pfn_to_page(dst_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (PageHighMem(s_page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) src = kmap_atomic(s_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) dst = kmap_atomic(d_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) do_copy_page(dst, src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) kunmap_atomic(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) kunmap_atomic(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (PageHighMem(d_page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) * The page pointed to by src may contain some kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) * data modified by kmap_atomic()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) safe_copy_page(buffer, s_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) dst = kmap_atomic(d_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) copy_page(dst, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) kunmap_atomic(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) safe_copy_page(page_address(d_page), s_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) #define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) safe_copy_page(page_address(pfn_to_page(dst_pfn)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) pfn_to_page(src_pfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) #endif /* CONFIG_HIGHMEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) static void copy_data_pages(struct memory_bitmap *copy_bm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) struct memory_bitmap *orig_bm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) struct zone *zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) unsigned long pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) for_each_populated_zone(zone) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) unsigned long max_zone_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) mark_free_pages(zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) max_zone_pfn = zone_end_pfn(zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (page_is_saveable(zone, pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) memory_bm_set_bit(orig_bm, pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) memory_bm_position_reset(orig_bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) memory_bm_position_reset(copy_bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) for(;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) pfn = memory_bm_next_pfn(orig_bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (unlikely(pfn == BM_END_OF_MAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) /* Total number of image pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) static unsigned int nr_copy_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) /* Number of pages needed for saving the original pfns of the image pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) static unsigned int nr_meta_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) * Numbers of normal and highmem page frames allocated for hibernation image
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) * before suspending devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) static unsigned int alloc_normal, alloc_highmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) * Memory bitmap used for marking saveable pages (during hibernation) or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) * hibernation image pages (during restore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) static struct memory_bitmap orig_bm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) * Memory bitmap used during hibernation for marking allocated page frames that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) * will contain copies of saveable pages. During restore it is initially used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) * for marking hibernation image pages, but then the set bits from it are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) * duplicated in @orig_bm and it is released. On highmem systems it is next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) * used for marking "safe" highmem pages, but it has to be reinitialized for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) * this purpose.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) static struct memory_bitmap copy_bm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) * swsusp_free - Free pages allocated for hibernation image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) * Image pages are alocated before snapshot creation, so they need to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) * released after resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) void swsusp_free(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) unsigned long fb_pfn, fr_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) if (!forbidden_pages_map || !free_pages_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) memory_bm_position_reset(forbidden_pages_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) memory_bm_position_reset(free_pages_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) loop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) fr_pfn = memory_bm_next_pfn(free_pages_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) * Find the next bit set in both bitmaps. This is guaranteed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) if (fb_pfn < fr_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) if (fr_pfn < fb_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) fr_pfn = memory_bm_next_pfn(free_pages_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) } while (fb_pfn != fr_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) struct page *page = pfn_to_page(fr_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) memory_bm_clear_current(forbidden_pages_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) memory_bm_clear_current(free_pages_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) hibernate_restore_unprotect_page(page_address(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) __free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) goto loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) nr_copy_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) nr_meta_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) restore_pblist = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) alloc_normal = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) alloc_highmem = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) hibernate_restore_protection_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) /* Helper functions used for the shrinking of memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) #define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) * preallocate_image_pages - Allocate a number of pages for hibernation image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) * @nr_pages: Number of page frames to allocate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) * @mask: GFP flags to use for the allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) * Return value: Number of page frames actually allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) unsigned long nr_alloc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) while (nr_pages > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) page = alloc_image_page(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) memory_bm_set_bit(©_bm, page_to_pfn(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if (PageHighMem(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) alloc_highmem++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) alloc_normal++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) nr_pages--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) nr_alloc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) return nr_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) static unsigned long preallocate_image_memory(unsigned long nr_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) unsigned long avail_normal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) unsigned long alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) if (avail_normal <= alloc_normal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) alloc = avail_normal - alloc_normal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) if (nr_pages < alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) alloc = nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) return preallocate_image_pages(alloc, GFP_IMAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) #ifdef CONFIG_HIGHMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) static unsigned long preallocate_image_highmem(unsigned long nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) * __fraction - Compute (an approximation of) x * (multiplier / base).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) return div64_u64(x * multiplier, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) unsigned long highmem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) unsigned long total)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) unsigned long alloc = __fraction(nr_pages, highmem, total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) #else /* CONFIG_HIGHMEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) unsigned long highmem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) unsigned long total)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) #endif /* CONFIG_HIGHMEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) * free_unnecessary_pages - Release preallocated pages not needed for the image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) static unsigned long free_unnecessary_pages(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) unsigned long save, to_free_normal, to_free_highmem, free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) save = count_data_pages();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) if (alloc_normal >= save) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) to_free_normal = alloc_normal - save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) save = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) to_free_normal = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) save -= alloc_normal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) save += count_highmem_pages();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) if (alloc_highmem >= save) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) to_free_highmem = alloc_highmem - save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) to_free_highmem = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) save -= alloc_highmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) if (to_free_normal > save)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) to_free_normal -= save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) to_free_normal = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) free = to_free_normal + to_free_highmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) memory_bm_position_reset(©_bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) while (to_free_normal > 0 || to_free_highmem > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) unsigned long pfn = memory_bm_next_pfn(©_bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) struct page *page = pfn_to_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) if (PageHighMem(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) if (!to_free_highmem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) to_free_highmem--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) alloc_highmem--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) if (!to_free_normal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) to_free_normal--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) alloc_normal--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) memory_bm_clear_bit(©_bm, pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) swsusp_unset_page_forbidden(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) swsusp_unset_page_free(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) __free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) return free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) * minimum_image_size - Estimate the minimum acceptable size of an image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) * @saveable: Number of saveable pages in the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) * We want to avoid attempting to free too much memory too hard, so estimate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) * minimum acceptable size of a hibernation image to use as the lower limit for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) * preallocating memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) * We assume that the minimum image size should be proportional to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) * [number of saveable pages] - [number of pages that can be freed in theory]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) * where the second term is the sum of (1) reclaimable slab pages, (2) active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) * and (3) inactive anonymous pages, (4) active and (5) inactive file pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) static unsigned long minimum_image_size(unsigned long saveable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) size = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) + global_node_page_state(NR_ACTIVE_ANON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) + global_node_page_state(NR_INACTIVE_ANON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) + global_node_page_state(NR_ACTIVE_FILE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) + global_node_page_state(NR_INACTIVE_FILE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) return saveable <= size ? 0 : saveable - size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) * hibernate_preallocate_memory - Preallocate memory for hibernation image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) * To create a hibernation image it is necessary to make a copy of every page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) * frame in use. We also need a number of page frames to be free during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) * hibernation for allocations made while saving the image and for device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) * drivers, in case they need to allocate memory from their hibernation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) * estimate) and reserved_size divided by PAGE_SIZE (which is tunable through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) * /sys/power/reserved_size, respectively). To make this happen, we compute the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) * total number of available page frames and allocate at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) * of them, which corresponds to the maximum size of a hibernation image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) * If image_size is set below the number following from the above formula,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) * the preallocation of memory is continued until the total number of saveable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) * pages in the system is below the requested image size or the minimum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) * acceptable image size returned by minimum_image_size(), whichever is greater.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) int hibernate_preallocate_memory(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) struct zone *zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) unsigned long saveable, size, max_size, count, highmem, pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) unsigned long alloc, save_highmem, pages_highmem, avail_normal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) ktime_t start, stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) pr_info("Preallocating image memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) start = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) pr_err("Cannot allocate original bitmap\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) error = memory_bm_create(©_bm, GFP_IMAGE, PG_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) pr_err("Cannot allocate copy bitmap\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) alloc_normal = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) alloc_highmem = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) /* Count the number of saveable data pages. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) save_highmem = count_highmem_pages();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) saveable = count_data_pages();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) * Compute the total number of page frames we can use (count) and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) * number of pages needed for image metadata (size).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) count = saveable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) saveable += save_highmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) highmem = save_highmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) for_each_populated_zone(zone) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) size += snapshot_additional_pages(zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) if (is_highmem(zone))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) highmem += zone_page_state(zone, NR_FREE_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) count += zone_page_state(zone, NR_FREE_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) avail_normal = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) count += highmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) count -= totalreserve_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) /* Compute the maximum number of saveable pages to leave in memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) max_size = (count - (size + PAGES_FOR_IO)) / 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) /* Compute the desired number of image pages specified by image_size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) size = DIV_ROUND_UP(image_size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) if (size > max_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) size = max_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) * If the desired number of image pages is at least as large as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) * current number of saveable pages in memory, allocate page frames for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) * the image and we're done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) if (size >= saveable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) pages = preallocate_image_highmem(save_highmem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) pages += preallocate_image_memory(saveable - pages, avail_normal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) /* Estimate the minimum size of the image. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) pages = minimum_image_size(saveable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) * To avoid excessive pressure on the normal zone, leave room in it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) * accommodate an image of the minimum size (unless it's already too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) * small, in which case don't preallocate pages from it at all).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) if (avail_normal > pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) avail_normal -= pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) avail_normal = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) if (size < pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) size = min_t(unsigned long, pages, max_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) * Let the memory management subsystem know that we're going to need a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) * large number of page frames to allocate and make it free some memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) * NOTE: If this is not done, performance will be hurt badly in some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) * test cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) shrink_all_memory(saveable - size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) * The number of saveable pages in memory was too high, so apply some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) * pressure to decrease it. First, make room for the largest possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) * image and fail if that doesn't work. Next, try to decrease the size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) * of the image as much as indicated by 'size' using allocations from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) * highmem and non-highmem zones separately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) pages_highmem = preallocate_image_highmem(highmem / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) alloc = count - max_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) if (alloc > pages_highmem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) alloc -= pages_highmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) alloc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) pages = preallocate_image_memory(alloc, avail_normal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) if (pages < alloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) /* We have exhausted non-highmem pages, try highmem. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) alloc -= pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) pages += pages_highmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) pages_highmem = preallocate_image_highmem(alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) if (pages_highmem < alloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) pr_err("Image allocation is %lu pages short\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) alloc - pages_highmem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) pages += pages_highmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) * size is the desired number of saveable pages to leave in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) * memory, so try to preallocate (all memory - size) pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) alloc = (count - pages) - size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) pages += preallocate_image_highmem(alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) * There are approximately max_size saveable pages at this point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) * and we want to reduce this number down to size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) alloc = max_size - size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) size = preallocate_highmem_fraction(alloc, highmem, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) pages_highmem += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) alloc -= size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) size = preallocate_image_memory(alloc, avail_normal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) pages_highmem += preallocate_image_highmem(alloc - size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) pages += pages_highmem + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) * We only need as many page frames for the image as there are saveable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) * pages in memory, but we have allocated more. Release the excessive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) * ones now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) pages -= free_unnecessary_pages();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) stop = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) pr_info("Allocated %lu pages for snapshot\n", pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) swsusp_show_speed(start, stop, pages, "Allocated");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) swsusp_free();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) #ifdef CONFIG_HIGHMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) * Compute the number of non-highmem pages that will be necessary for creating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) * copies of highmem pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) if (free_highmem >= nr_highmem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) nr_highmem = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) nr_highmem -= free_highmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) return nr_highmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) #endif /* CONFIG_HIGHMEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) * enough_free_mem - Check if there is enough free memory for the image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) struct zone *zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) unsigned int free = alloc_normal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) for_each_populated_zone(zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) if (!is_highmem(zone))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) free += zone_page_state(zone, NR_FREE_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) nr_pages += count_pages_for_highmem(nr_highmem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) pr_debug("Normal pages needed: %u + %u, available pages: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) nr_pages, PAGES_FOR_IO, free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) return free > nr_pages + PAGES_FOR_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) #ifdef CONFIG_HIGHMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) * get_highmem_buffer - Allocate a buffer for highmem pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) * If there are some highmem pages in the hibernation image, we may need a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) * buffer to copy them and/or load their data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) static inline int get_highmem_buffer(int safe_needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) buffer = get_image_page(GFP_ATOMIC, safe_needed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) return buffer ? 0 : -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) * alloc_highmem_image_pages - Allocate some highmem pages for the image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) * Try to allocate as many pages as needed, but if the number of free highmem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) * pages is less than that, allocate them all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) unsigned int nr_highmem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) unsigned int to_alloc = count_free_highmem_pages();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) if (to_alloc > nr_highmem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) to_alloc = nr_highmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) nr_highmem -= to_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) while (to_alloc-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) memory_bm_set_bit(bm, page_to_pfn(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) return nr_highmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) static inline int get_highmem_buffer(int safe_needed) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) unsigned int n) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) #endif /* CONFIG_HIGHMEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) * swsusp_alloc - Allocate memory for hibernation image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) * We first try to allocate as many highmem pages as there are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) * saveable highmem pages in the system. If that fails, we allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) * non-highmem pages for the copies of the remaining highmem ones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) * In this approach it is likely that the copies of highmem pages will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) * also be located in the high memory, because of the way in which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) * copy_data_pages() works.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) static int swsusp_alloc(struct memory_bitmap *copy_bm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) unsigned int nr_pages, unsigned int nr_highmem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) if (nr_highmem > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) if (get_highmem_buffer(PG_ANY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) if (nr_highmem > alloc_highmem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) nr_highmem -= alloc_highmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) if (nr_pages > alloc_normal) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) nr_pages -= alloc_normal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) while (nr_pages-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) page = alloc_image_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) memory_bm_set_bit(copy_bm, page_to_pfn(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) swsusp_free();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) asmlinkage __visible int swsusp_save(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) unsigned int nr_pages, nr_highmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) pr_info("Creating image:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) drain_local_pages(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) nr_pages = count_data_pages();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) nr_highmem = count_highmem_pages();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) if (!enough_free_mem(nr_pages, nr_highmem)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) pr_err("Not enough free memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) if (swsusp_alloc(©_bm, nr_pages, nr_highmem)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) pr_err("Memory allocation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) * During allocating of suspend pagedir, new cold pages may appear.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) * Kill them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) drain_local_pages(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) copy_data_pages(©_bm, &orig_bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) * End of critical section. From now on, we can write to memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) * but we should not touch disk. This specially means we must _not_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) * touch swap space! Except we must write out our image of course.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) nr_pages += nr_highmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) nr_copy_pages = nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) pr_info("Image created (%d pages copied)\n", nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) #ifndef CONFIG_ARCH_HIBERNATION_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) static int init_header_complete(struct swsusp_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) info->version_code = LINUX_VERSION_CODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) static const char *check_image_kernel(struct swsusp_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) if (info->version_code != LINUX_VERSION_CODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) return "kernel version";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) if (strcmp(info->uts.sysname,init_utsname()->sysname))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) return "system type";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) if (strcmp(info->uts.release,init_utsname()->release))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) return "kernel release";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) if (strcmp(info->uts.version,init_utsname()->version))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) return "version";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) if (strcmp(info->uts.machine,init_utsname()->machine))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) return "machine";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) unsigned long snapshot_get_image_size(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) return nr_copy_pages + nr_meta_pages + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) static int init_header(struct swsusp_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) memset(info, 0, sizeof(struct swsusp_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) info->num_physpages = get_num_physpages();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) info->image_pages = nr_copy_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) info->pages = snapshot_get_image_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) info->size = info->pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) info->size <<= PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) return init_header_complete(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) * pack_pfns - Prepare PFNs for saving.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) * @bm: Memory bitmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) * @buf: Memory buffer to store the PFNs in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) * PFNs corresponding to set bits in @bm are stored in the area of memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) * pointed to by @buf (1 page at a time).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) buf[j] = memory_bm_next_pfn(bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) if (unlikely(buf[j] == BM_END_OF_MAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) * snapshot_read_next - Get the address to read the next image page from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) * @handle: Snapshot handle to be used for the reading.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) * On the first call, @handle should point to a zeroed snapshot_handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) * structure. The structure gets populated then and a pointer to it should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) * passed to this function every next time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) * On success, the function returns a positive number. Then, the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) * is allowed to read up to the returned number of bytes from the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) * location computed by the data_of() macro.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) * The function returns 0 to indicate the end of the data stream condition,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) * and negative numbers are returned on errors. If that happens, the structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) * pointed to by @handle is not updated and should not be used any more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) int snapshot_read_next(struct snapshot_handle *handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) if (handle->cur > nr_meta_pages + nr_copy_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) if (!buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) /* This makes the buffer be freed by swsusp_free() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) buffer = get_image_page(GFP_ATOMIC, PG_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) if (!buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) if (!handle->cur) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) error = init_header((struct swsusp_info *)buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) handle->buffer = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) memory_bm_position_reset(&orig_bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) memory_bm_position_reset(©_bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) } else if (handle->cur <= nr_meta_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) clear_page(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) pack_pfns(buffer, &orig_bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) page = pfn_to_page(memory_bm_next_pfn(©_bm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) if (PageHighMem(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) * Highmem pages are copied to the buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) * because we can't return with a kmapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) * highmem page (we may not be called again).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) void *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) kaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) copy_page(buffer, kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) handle->buffer = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) handle->buffer = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) handle->cur++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) return PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) static void duplicate_memory_bitmap(struct memory_bitmap *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) struct memory_bitmap *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) unsigned long pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) memory_bm_position_reset(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) pfn = memory_bm_next_pfn(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) while (pfn != BM_END_OF_MAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) memory_bm_set_bit(dst, pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) pfn = memory_bm_next_pfn(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) * mark_unsafe_pages - Mark pages that were used before hibernation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) * Mark the pages that cannot be used for storing the image during restoration,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) * because they conflict with the pages that had been used before hibernation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) static void mark_unsafe_pages(struct memory_bitmap *bm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) unsigned long pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) /* Clear the "free"/"unsafe" bit for all PFNs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) memory_bm_position_reset(free_pages_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) pfn = memory_bm_next_pfn(free_pages_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) while (pfn != BM_END_OF_MAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) memory_bm_clear_current(free_pages_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) pfn = memory_bm_next_pfn(free_pages_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) /* Mark pages that correspond to the "original" PFNs as "unsafe" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) duplicate_memory_bitmap(free_pages_map, bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) allocated_unsafe_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) static int check_header(struct swsusp_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) const char *reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) reason = check_image_kernel(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) if (!reason && info->num_physpages != get_num_physpages())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) reason = "memory size";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) if (reason) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) pr_err("Image mismatch: %s\n", reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) * load header - Check the image header and copy the data from it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) static int load_header(struct swsusp_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) restore_pblist = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) error = check_header(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) nr_copy_pages = info->image_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) nr_meta_pages = info->pages - info->image_pages - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) * @bm: Memory bitmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) * @buf: Area of memory containing the PFNs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) * For each element of the array pointed to by @buf (1 page at a time), set the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) * corresponding bit in @bm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) if (unlikely(buf[j] == BM_END_OF_MAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) memory_bm_set_bit(bm, buf[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) #ifdef CONFIG_HIGHMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) * struct highmem_pbe is used for creating the list of highmem pages that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) * should be restored atomically during the resume from disk, because the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) * frames they have occupied before the suspend are in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) struct highmem_pbe {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) struct page *copy_page; /* data is here now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) struct page *orig_page; /* data was here before the suspend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) struct highmem_pbe *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) * List of highmem PBEs needed for restoring the highmem pages that were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) * allocated before the suspend and included in the suspend image, but have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) * also been allocated by the "resume" kernel, so their contents cannot be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) * written directly to their "original" page frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) static struct highmem_pbe *highmem_pblist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) * count_highmem_image_pages - Compute the number of highmem pages in the image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) * @bm: Memory bitmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) * The bits in @bm that correspond to image pages are assumed to be set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) unsigned long pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) unsigned int cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) memory_bm_position_reset(bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) pfn = memory_bm_next_pfn(bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) while (pfn != BM_END_OF_MAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) if (PageHighMem(pfn_to_page(pfn)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) pfn = memory_bm_next_pfn(bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) static unsigned int safe_highmem_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) static struct memory_bitmap *safe_highmem_bm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) * prepare_highmem_image - Allocate memory for loading highmem data from image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) * @bm: Pointer to an uninitialized memory bitmap structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) * @nr_highmem_p: Pointer to the number of highmem image pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) * Try to allocate as many highmem pages as there are highmem image pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) * (@nr_highmem_p points to the variable containing the number of highmem image
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) * pages). The pages that are "safe" (ie. will not be overwritten when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) * hibernation image is restored entirely) have the corresponding bits set in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) * @bm (it must be unitialized).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) * NOTE: This function should not be called if there are no highmem image pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) static int prepare_highmem_image(struct memory_bitmap *bm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) unsigned int *nr_highmem_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) unsigned int to_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) if (get_highmem_buffer(PG_SAFE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) to_alloc = count_free_highmem_pages();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) if (to_alloc > *nr_highmem_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) to_alloc = *nr_highmem_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) *nr_highmem_p = to_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) safe_highmem_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) while (to_alloc-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) page = alloc_page(__GFP_HIGHMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) if (!swsusp_page_is_free(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) /* The page is "safe", set its bit the bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) memory_bm_set_bit(bm, page_to_pfn(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) safe_highmem_pages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) /* Mark the page as allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) swsusp_set_page_forbidden(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) swsusp_set_page_free(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) memory_bm_position_reset(bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) safe_highmem_bm = bm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) static struct page *last_highmem_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) * For a given highmem image page get a buffer that suspend_write_next() should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) * return to its caller to write to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) * If the page is to be saved to its "original" page frame or a copy of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) * the page is to be made in the highmem, @buffer is returned. Otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) * the copy of the page is to be made in normal memory, so the address of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) * the copy is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) * If @buffer is returned, the caller of suspend_write_next() will write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) * the page's contents to @buffer, so they will have to be copied to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) * right location on the next call to suspend_write_next() and it is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) * with the help of copy_last_highmem_page(). For this purpose, if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) * @buffer is returned, @last_highmem_page is set to the page to which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) * the data will have to be copied from @buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) static void *get_highmem_page_buffer(struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) struct chain_allocator *ca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) struct highmem_pbe *pbe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) void *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) * We have allocated the "original" page frame and we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) * use it directly to store the loaded page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) last_highmem_page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) return buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) * The "original" page frame has not been allocated and we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) * use a "safe" page frame to store the loaded page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) if (!pbe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) swsusp_free();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) pbe->orig_page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) if (safe_highmem_pages > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) struct page *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) /* Copy of the page will be stored in high memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) kaddr = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) safe_highmem_pages--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) last_highmem_page = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) pbe->copy_page = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) /* Copy of the page will be stored in normal memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) kaddr = safe_pages_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) safe_pages_list = safe_pages_list->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) pbe->copy_page = virt_to_page(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) pbe->next = highmem_pblist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) highmem_pblist = pbe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) return kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) * copy_last_highmem_page - Copy most the most recent highmem image page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) * Copy the contents of a highmem image from @buffer, where the caller of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) * snapshot_write_next() has stored them, to the right location represented by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) * @last_highmem_page .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) static void copy_last_highmem_page(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) if (last_highmem_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) void *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) dst = kmap_atomic(last_highmem_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) copy_page(dst, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) kunmap_atomic(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) last_highmem_page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) static inline int last_highmem_page_copied(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) return !last_highmem_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) static inline void free_highmem_data(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) if (safe_highmem_bm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) if (buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) free_image_page(buffer, PG_UNSAFE_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) static inline int prepare_highmem_image(struct memory_bitmap *bm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) unsigned int *nr_highmem_p) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) static inline void *get_highmem_page_buffer(struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) struct chain_allocator *ca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) static inline void copy_last_highmem_page(void) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) static inline int last_highmem_page_copied(void) { return 1; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) static inline void free_highmem_data(void) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) #endif /* CONFIG_HIGHMEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) #define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) * prepare_image - Make room for loading hibernation image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) * @new_bm: Unitialized memory bitmap structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) * @bm: Memory bitmap with unsafe pages marked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) * Use @bm to mark the pages that will be overwritten in the process of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) * restoring the system memory state from the suspend image ("unsafe" pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) * and allocate memory for the image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) * The idea is to allocate a new memory bitmap first and then allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) * as many pages as needed for image data, but without specifying what those
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) * pages will be used for just yet. Instead, we mark them all as allocated and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) * create a lists of "safe" pages to be used later. On systems with high
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) * memory a list of "safe" highmem pages is created too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) unsigned int nr_pages, nr_highmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) struct linked_page *lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) /* If there is no highmem, the buffer will not be necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) free_image_page(buffer, PG_UNSAFE_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) nr_highmem = count_highmem_image_pages(bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) mark_unsafe_pages(bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) goto Free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) duplicate_memory_bitmap(new_bm, bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) memory_bm_free(bm, PG_UNSAFE_KEEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) if (nr_highmem > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) error = prepare_highmem_image(bm, &nr_highmem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) goto Free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) * Reserve some safe pages for potential later use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) * NOTE: This way we make sure there will be enough safe pages for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) * chain_alloc() in get_buffer(). It is a bit wasteful, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) * nr_copy_pages cannot be greater than 50% of the memory anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) * nr_copy_pages cannot be less than allocated_unsafe_pages too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) while (nr_pages > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) lp = get_image_page(GFP_ATOMIC, PG_SAFE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) if (!lp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) goto Free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) lp->next = safe_pages_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) safe_pages_list = lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) nr_pages--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) /* Preallocate memory for the image */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) while (nr_pages > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) if (!lp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) goto Free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) if (!swsusp_page_is_free(virt_to_page(lp))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) /* The page is "safe", add it to the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) lp->next = safe_pages_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) safe_pages_list = lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) /* Mark the page as allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) swsusp_set_page_forbidden(virt_to_page(lp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) swsusp_set_page_free(virt_to_page(lp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) nr_pages--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) Free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) swsusp_free();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) * get_buffer - Get the address to store the next image data page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) * Get the address that snapshot_write_next() should return to its caller to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) * write to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) struct pbe *pbe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) unsigned long pfn = memory_bm_next_pfn(bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) if (pfn == BM_END_OF_MAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) return ERR_PTR(-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) page = pfn_to_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) if (PageHighMem(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) return get_highmem_page_buffer(page, ca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) * We have allocated the "original" page frame and we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) * use it directly to store the loaded page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) return page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) * The "original" page frame has not been allocated and we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) * use a "safe" page frame to store the loaded page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) pbe = chain_alloc(ca, sizeof(struct pbe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) if (!pbe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) swsusp_free();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) pbe->orig_address = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) pbe->address = safe_pages_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) safe_pages_list = safe_pages_list->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) pbe->next = restore_pblist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) restore_pblist = pbe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) return pbe->address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) * snapshot_write_next - Get the address to store the next image page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) * @handle: Snapshot handle structure to guide the writing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) * On the first call, @handle should point to a zeroed snapshot_handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) * structure. The structure gets populated then and a pointer to it should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) * passed to this function every next time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) * On success, the function returns a positive number. Then, the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) * is allowed to write up to the returned number of bytes to the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) * location computed by the data_of() macro.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) * The function returns 0 to indicate the "end of file" condition. Negative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) * numbers are returned on errors, in which cases the structure pointed to by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) * @handle is not updated and should not be used any more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) int snapshot_write_next(struct snapshot_handle *handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) static struct chain_allocator ca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) /* Check if we have already loaded the entire image */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) handle->sync_read = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) if (!handle->cur) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) if (!buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) /* This makes the buffer be freed by swsusp_free() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) buffer = get_image_page(GFP_ATOMIC, PG_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) if (!buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) handle->buffer = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) } else if (handle->cur == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) error = load_header(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) safe_pages_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) error = memory_bm_create(©_bm, GFP_ATOMIC, PG_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) hibernate_restore_protection_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) } else if (handle->cur <= nr_meta_pages + 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) error = unpack_orig_pfns(buffer, ©_bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) if (handle->cur == nr_meta_pages + 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) error = prepare_image(&orig_bm, ©_bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) chain_init(&ca, GFP_ATOMIC, PG_SAFE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) memory_bm_position_reset(&orig_bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) restore_pblist = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) handle->buffer = get_buffer(&orig_bm, &ca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) handle->sync_read = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) if (IS_ERR(handle->buffer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) return PTR_ERR(handle->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) copy_last_highmem_page();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) hibernate_restore_protect_page(handle->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) handle->buffer = get_buffer(&orig_bm, &ca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) if (IS_ERR(handle->buffer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) return PTR_ERR(handle->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) if (handle->buffer != buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) handle->sync_read = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) handle->cur++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) return PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) * snapshot_write_finalize - Complete the loading of a hibernation image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) * Must be called after the last call to snapshot_write_next() in case the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) * page in the image happens to be a highmem page and its contents should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) * stored in highmem. Additionally, it recycles bitmap memory that's not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) * necessary any more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) void snapshot_write_finalize(struct snapshot_handle *handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) copy_last_highmem_page();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) hibernate_restore_protect_page(handle->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) /* Do that only if we have loaded the image entirely */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) memory_bm_recycle(&orig_bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) free_highmem_data();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) int snapshot_image_loaded(struct snapshot_handle *handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) return !(!nr_copy_pages || !last_highmem_page_copied() ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) handle->cur <= nr_meta_pages + nr_copy_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) #ifdef CONFIG_HIGHMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) /* Assumes that @buf is ready and points to a "safe" page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) static inline void swap_two_pages_data(struct page *p1, struct page *p2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) void *kaddr1, *kaddr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) kaddr1 = kmap_atomic(p1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) kaddr2 = kmap_atomic(p2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) copy_page(buf, kaddr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) copy_page(kaddr1, kaddr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) copy_page(kaddr2, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) kunmap_atomic(kaddr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) kunmap_atomic(kaddr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) * restore_highmem - Put highmem image pages into their original locations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) * For each highmem page that was in use before hibernation and is included in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) * the image, and also has been allocated by the "restore" kernel, swap its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) * current contents with the previous (ie. "before hibernation") ones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) * If the restore eventually fails, we can call this function once again and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) * restore the highmem state as seen by the restore kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) int restore_highmem(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) struct highmem_pbe *pbe = highmem_pblist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) if (!pbe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) buf = get_image_page(GFP_ATOMIC, PG_SAFE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) while (pbe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) pbe = pbe->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) free_image_page(buf, PG_UNSAFE_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) #endif /* CONFIG_HIGHMEM */