Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * linux/kernel/power/swap.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * This file provides functions for reading the suspend image from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * and writing it to a swap partition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #define pr_fmt(fmt) "PM: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/genhd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/swapops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/lzo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/ktime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include "power.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #define HIBERNATE_SIG	"S1SUSPEND"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  * When reading an {un,}compressed image, we may restore pages in place,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  * in which case some architectures need these pages cleaning before they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  * can be executed. We don't know which pages these may be, so clean the lot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) static bool clean_pages_on_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) static bool clean_pages_on_decompress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  *	The swap map is a data structure used for keeping track of each page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  *	written to a swap partition.  It consists of many swap_map_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  *	structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  *	These structures are stored on the swap and linked together with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  *	help of the .next_swap member.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54)  *	The swap map is created during suspend.  The swap map pages are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  *	allocated and populated one at a time, so we only need one memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  *	page to set up the entire structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58)  *	During resume we pick up all swap_map_page structures into a list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define MAP_PAGE_ENTRIES	(PAGE_SIZE / sizeof(sector_t) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64)  * Number of free pages that are not high.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) static inline unsigned long low_free_pages(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	return nr_free_pages() - nr_free_highpages();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72)  * Number of pages required to be kept free while writing the image. Always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73)  * half of all available low pages before the writing starts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) static inline unsigned long reqd_free_pages(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	return low_free_pages() / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) struct swap_map_page {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	sector_t entries[MAP_PAGE_ENTRIES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	sector_t next_swap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) struct swap_map_page_list {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	struct swap_map_page *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	struct swap_map_page_list *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91)  *	The swap_map_handle structure is used for handling swap in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92)  *	a file-alike way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) struct swap_map_handle {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	struct swap_map_page *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	struct swap_map_page_list *maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	sector_t cur_swap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	sector_t first_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	unsigned int k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	unsigned long reqd_free_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	u32 crc32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) struct swsusp_header {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	              sizeof(u32)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	u32	crc32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	sector_t image;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	unsigned int flags;	/* Flags to pass to the "boot" kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	char	orig_sig[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	char	sig[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) static struct swsusp_header *swsusp_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118)  *	The following functions are used for tracing the allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119)  *	swap pages, so that they can be freed in case of an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) struct swsusp_extent {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	struct rb_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	unsigned long start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	unsigned long end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) static struct rb_root swsusp_extents = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) static int swsusp_extents_insert(unsigned long swap_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	struct rb_node **new = &(swsusp_extents.rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	struct swsusp_extent *ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	/* Figure out where to put the new node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	while (*new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		ext = rb_entry(*new, struct swsusp_extent, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 		parent = *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		if (swap_offset < ext->start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 			/* Try to merge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 			if (swap_offset == ext->start - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 				ext->start--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 				return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 			new = &((*new)->rb_left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		} else if (swap_offset > ext->end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 			/* Try to merge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 			if (swap_offset == ext->end + 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 				ext->end++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 				return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 			new = &((*new)->rb_right);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 			/* It already is in the tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	/* Add the new node and rebalance the tree. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	if (!ext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	ext->start = swap_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	ext->end = swap_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	rb_link_node(&ext->node, parent, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	rb_insert_color(&ext->node, &swsusp_extents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172)  *	alloc_swapdev_block - allocate a swap page and register that it has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173)  *	been allocated, so that it can be freed in case of an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) sector_t alloc_swapdev_block(int swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	offset = swp_offset(get_swap_page_of_type(swap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	if (offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		if (swsusp_extents_insert(offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 			swap_free(swp_entry(swap, offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 			return swapdev_block(swap, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191)  *	free_all_swap_pages - free swap pages allocated for saving image data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192)  *	It also frees the extents used to register which swap entries had been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193)  *	allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) void free_all_swap_pages(int swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	struct rb_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	while ((node = swsusp_extents.rb_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		struct swsusp_extent *ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		ext = rb_entry(node, struct swsusp_extent, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		rb_erase(node, &swsusp_extents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		for (offset = ext->start; offset <= ext->end; offset++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 			swap_free(swp_entry(swap, offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 		kfree(ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) int swsusp_swap_in_use(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	return (swsusp_extents.rb_node != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219)  * General things
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) static unsigned short root_swap = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) static struct block_device *hib_resume_bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) struct hib_bio_batch {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	atomic_t		count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	wait_queue_head_t	wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	blk_status_t		error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	struct blk_plug		plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) static void hib_init_batch(struct hib_bio_batch *hb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	atomic_set(&hb->count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	init_waitqueue_head(&hb->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	hb->error = BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	blk_start_plug(&hb->plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) static void hib_finish_batch(struct hib_bio_batch *hb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	blk_finish_plug(&hb->plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) static void hib_end_io(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	struct hib_bio_batch *hb = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	struct page *page = bio_first_page_all(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	if (bio->bi_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 		pr_alert("Read-error on swap-device (%u:%u:%Lu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 			 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 			 (unsigned long long)bio->bi_iter.bi_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	if (bio_data_dir(bio) == WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	else if (clean_pages_on_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		flush_icache_range((unsigned long)page_address(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 				   (unsigned long)page_address(page) + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	if (bio->bi_status && !hb->error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		hb->error = bio->bi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	if (atomic_dec_and_test(&hb->count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		wake_up(&hb->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		struct hib_bio_batch *hb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	struct page *page = virt_to_page(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	bio = bio_alloc(GFP_NOIO | __GFP_HIGH, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	bio_set_dev(bio, hib_resume_bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	bio_set_op_attrs(bio, op, op_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		pr_err("Adding page to bio failed at %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		       (unsigned long long)bio->bi_iter.bi_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	if (hb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		bio->bi_end_io = hib_end_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		bio->bi_private = hb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		atomic_inc(&hb->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		error = submit_bio_wait(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) static int hib_wait_io(struct hib_bio_batch *hb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	 * We are relying on the behavior of blk_plug that a thread with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	 * a plug will flush the plug list before sleeping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	wait_event(hb->wait, atomic_read(&hb->count) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	return blk_status_to_errno(hb->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313)  * Saving part
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		      swsusp_header, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	    !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		swsusp_header->image = handle->first_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		swsusp_header->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		if (flags & SF_CRC32_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 			swsusp_header->crc32 = handle->crc32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 				      swsusp_resume_block, swsusp_header, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		pr_err("Swap header not found!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 		error = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340)  *	swsusp_swap_check - check if the resume device is a swap device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341)  *	and get its index (if so)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343)  *	This is called before saving image
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) static int swsusp_swap_check(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	if (swsusp_resume_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		res = swap_type_of(swsusp_resume_device, swsusp_resume_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		res = find_first_swap(&swsusp_resume_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	if (res < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	root_swap = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, FMODE_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 			NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	if (IS_ERR(hib_resume_bdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		return PTR_ERR(hib_resume_bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	if (res < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		blkdev_put(hib_resume_bdev, FMODE_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370)  *	write_page - Write one page to given swap location.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371)  *	@buf:		Address we're writing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372)  *	@offset:	Offset of the swap page we're writing to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373)  *	@hb:		bio completion batch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	void *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	if (!offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	if (hb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		src = (void *)__get_free_page(GFP_NOIO | __GFP_NOWARN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		                              __GFP_NORETRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		if (src) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 			copy_page(src, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 			ret = hib_wait_io(hb); /* Free pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 			src = (void *)__get_free_page(GFP_NOIO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 			                              __GFP_NOWARN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 			                              __GFP_NORETRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 			if (src) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 				copy_page(src, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 				WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 				hb = NULL;	/* Go synchronous */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 				src = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		src = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	return hib_submit_io(REQ_OP_WRITE, REQ_SYNC, offset, src, hb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) static void release_swap_writer(struct swap_map_handle *handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	if (handle->cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		free_page((unsigned long)handle->cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	handle->cur = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) static int get_swap_writer(struct swap_map_handle *handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	ret = swsusp_swap_check();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		if (ret != -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 			pr_err("Cannot find swap device, try swapon -a\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	if (!handle->cur) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		goto err_close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	handle->cur_swap = alloc_swapdev_block(root_swap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	if (!handle->cur_swap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		ret = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		goto err_rel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	handle->k = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	handle->reqd_free_pages = reqd_free_pages();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	handle->first_sector = handle->cur_swap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) err_rel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	release_swap_writer(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) err_close:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	swsusp_close(FMODE_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) static int swap_write_page(struct swap_map_handle *handle, void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		struct hib_bio_batch *hb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	sector_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	if (!handle->cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	offset = alloc_swapdev_block(root_swap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	error = write_page(buf, offset, hb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	handle->cur->entries[handle->k++] = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	if (handle->k >= MAP_PAGE_ENTRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		offset = alloc_swapdev_block(root_swap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		if (!offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 			return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		handle->cur->next_swap = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		error = write_page(handle->cur, handle->cur_swap, hb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		clear_page(handle->cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		handle->cur_swap = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		handle->k = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		if (hb && low_free_pages() <= handle->reqd_free_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 			error = hib_wait_io(hb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 			if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 			 * Recalculate the number of required free pages, to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 			 * make sure we never take more than half.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 			handle->reqd_free_pages = reqd_free_pages();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) static int flush_swap_writer(struct swap_map_handle *handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	if (handle->cur && handle->cur_swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		return write_page(handle->cur, handle->cur_swap, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) static int swap_writer_finish(struct swap_map_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		unsigned int flags, int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		pr_info("S");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		error = mark_swapfiles(handle, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		pr_cont("|\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		flush_swap_writer(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		free_all_swap_pages(root_swap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	release_swap_writer(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	swsusp_close(FMODE_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) /* We need to remember how much compressed data we need to read. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) #define LZO_HEADER	sizeof(size_t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) /* Number of pages/bytes we'll compress at one time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) #define LZO_UNC_PAGES	32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) #define LZO_UNC_SIZE	(LZO_UNC_PAGES * PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) /* Number of pages/bytes we need for compressed data (worst case). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) #define LZO_CMP_PAGES	DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 			             LZO_HEADER, PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) #define LZO_CMP_SIZE	(LZO_CMP_PAGES * PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) /* Maximum number of threads for compression/decompression. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) #define LZO_THREADS	3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) /* Minimum/maximum number of pages for read buffering. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) #define LZO_MIN_RD_PAGES	1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) #define LZO_MAX_RD_PAGES	8192
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535)  *	save_image - save the suspend image data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) static int save_image(struct swap_map_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539)                       struct snapshot_handle *snapshot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540)                       unsigned int nr_to_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	unsigned int m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	int nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	int err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	struct hib_bio_batch hb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	ktime_t start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	ktime_t stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	hib_init_batch(&hb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	pr_info("Saving image data pages (%u pages)...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		nr_to_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	m = nr_to_write / 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	if (!m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		m = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	nr_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	start = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		ret = snapshot_read_next(snapshot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		ret = swap_write_page(handle, data_of(*snapshot), &hb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		if (!(nr_pages % m))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 			pr_info("Image saving progress: %3d%%\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 				nr_pages / m * 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		nr_pages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	err2 = hib_wait_io(&hb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	hib_finish_batch(&hb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	stop = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		ret = err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		pr_info("Image saving done\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	swsusp_show_speed(start, stop, nr_to_write, "Wrote");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583)  * Structure used for CRC32.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) struct crc_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	struct task_struct *thr;                  /* thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	atomic_t ready;                           /* ready to start flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	atomic_t stop;                            /* ready to stop flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	unsigned run_threads;                     /* nr current threads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	wait_queue_head_t go;                     /* start crc update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	wait_queue_head_t done;                   /* crc update done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	u32 *crc32;                               /* points to handle's crc32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	size_t *unc_len[LZO_THREADS];             /* uncompressed lengths */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	unsigned char *unc[LZO_THREADS];          /* uncompressed data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598)  * CRC32 update function that runs in its own thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) static int crc32_threadfn(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	struct crc_data *d = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		wait_event(d->go, atomic_read(&d->ready) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		                  kthread_should_stop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		if (kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 			d->thr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 			atomic_set(&d->stop, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 			wake_up(&d->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		atomic_set(&d->ready, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		for (i = 0; i < d->run_threads; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 			*d->crc32 = crc32_le(*d->crc32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 			                     d->unc[i], *d->unc_len[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		atomic_set(&d->stop, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		wake_up(&d->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625)  * Structure used for LZO data compression.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) struct cmp_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	struct task_struct *thr;                  /* thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	atomic_t ready;                           /* ready to start flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	atomic_t stop;                            /* ready to stop flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	int ret;                                  /* return code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	wait_queue_head_t go;                     /* start compression */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	wait_queue_head_t done;                   /* compression done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	size_t unc_len;                           /* uncompressed length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	size_t cmp_len;                           /* compressed length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	unsigned char wrk[LZO1X_1_MEM_COMPRESS];  /* compression workspace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642)  * Compression function that runs in its own thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) static int lzo_compress_threadfn(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	struct cmp_data *d = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		wait_event(d->go, atomic_read(&d->ready) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		                  kthread_should_stop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		if (kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 			d->thr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 			d->ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 			atomic_set(&d->stop, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 			wake_up(&d->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		atomic_set(&d->ready, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		d->ret = lzo1x_1_compress(d->unc, d->unc_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		                          d->cmp + LZO_HEADER, &d->cmp_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		                          d->wrk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		atomic_set(&d->stop, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		wake_up(&d->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670)  * save_image_lzo - Save the suspend image data compressed with LZO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671)  * @handle: Swap map handle to use for saving the image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672)  * @snapshot: Image to read data from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673)  * @nr_to_write: Number of pages to save.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) static int save_image_lzo(struct swap_map_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676)                           struct snapshot_handle *snapshot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677)                           unsigned int nr_to_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	unsigned int m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	int nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	int err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	struct hib_bio_batch hb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	ktime_t start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	ktime_t stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	size_t off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	unsigned thr, run_threads, nr_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	unsigned char *page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	struct cmp_data *data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	struct crc_data *crc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	hib_init_batch(&hb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	 * We'll limit the number of threads for compression to limit memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	 * footprint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	nr_threads = num_online_cpus() - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		pr_err("Failed to allocate LZO page\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		goto out_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	data = vmalloc(array_size(nr_threads, sizeof(*data)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	if (!data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		pr_err("Failed to allocate LZO data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		goto out_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	for (thr = 0; thr < nr_threads; thr++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		memset(&data[thr], 0, offsetof(struct cmp_data, go));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	crc = kmalloc(sizeof(*crc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	if (!crc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		pr_err("Failed to allocate crc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		goto out_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	memset(crc, 0, offsetof(struct crc_data, go));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	 * Start the compression threads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	for (thr = 0; thr < nr_threads; thr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		init_waitqueue_head(&data[thr].go);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		init_waitqueue_head(&data[thr].done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		data[thr].thr = kthread_run(lzo_compress_threadfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		                            &data[thr],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		                            "image_compress/%u", thr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		if (IS_ERR(data[thr].thr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 			data[thr].thr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 			pr_err("Cannot start compression threads\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 			goto out_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	 * Start the CRC32 thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	init_waitqueue_head(&crc->go);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	init_waitqueue_head(&crc->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	handle->crc32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	crc->crc32 = &handle->crc32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	for (thr = 0; thr < nr_threads; thr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		crc->unc[thr] = data[thr].unc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		crc->unc_len[thr] = &data[thr].unc_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	if (IS_ERR(crc->thr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		crc->thr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		pr_err("Cannot start CRC32 thread\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		goto out_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	 * Adjust the number of required free pages after all allocations have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	 * been done. We don't want to run out of pages when writing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	handle->reqd_free_pages = reqd_free_pages();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	pr_info("Using %u thread(s) for compression\n", nr_threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	pr_info("Compressing and saving image data (%u pages)...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		nr_to_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	m = nr_to_write / 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	if (!m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		m = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	nr_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	start = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		for (thr = 0; thr < nr_threads; thr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 			for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 				ret = snapshot_read_next(snapshot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 				if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 					goto out_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 				if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 				memcpy(data[thr].unc + off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 				       data_of(*snapshot), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 				if (!(nr_pages % m))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 					pr_info("Image saving progress: %3d%%\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 						nr_pages / m * 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 				nr_pages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 			if (!off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 			data[thr].unc_len = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 			atomic_set(&data[thr].ready, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 			wake_up(&data[thr].go);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		if (!thr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		crc->run_threads = thr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		atomic_set(&crc->ready, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		wake_up(&crc->go);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 			wait_event(data[thr].done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 			           atomic_read(&data[thr].stop));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 			atomic_set(&data[thr].stop, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 			ret = data[thr].ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 			if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 				pr_err("LZO compression failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 				goto out_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 			if (unlikely(!data[thr].cmp_len ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 			             data[thr].cmp_len >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 			             lzo1x_worst_compress(data[thr].unc_len))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 				pr_err("Invalid LZO compressed length\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 				ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 				goto out_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 			*(size_t *)data[thr].cmp = data[thr].cmp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 			 * Given we are writing one page at a time to disk, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 			 * copy that much from the buffer, although the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 			 * bit will likely be smaller than full page. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 			 * OK - we saved the length of the compressed data, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 			 * any garbage at the end will be discarded when we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 			 * read it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 			for (off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 			     off < LZO_HEADER + data[thr].cmp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 			     off += PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 				memcpy(page, data[thr].cmp + off, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 				ret = swap_write_page(handle, page, &hb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 				if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 					goto out_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		wait_event(crc->done, atomic_read(&crc->stop));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		atomic_set(&crc->stop, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) out_finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	err2 = hib_wait_io(&hb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	stop = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		ret = err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		pr_info("Image saving done\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	swsusp_show_speed(start, stop, nr_to_write, "Wrote");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) out_clean:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	hib_finish_batch(&hb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	if (crc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		if (crc->thr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 			kthread_stop(crc->thr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		kfree(crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	if (data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		for (thr = 0; thr < nr_threads; thr++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 			if (data[thr].thr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 				kthread_stop(data[thr].thr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		vfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	if (page) free_page((unsigned long)page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884)  *	enough_swap - Make sure we have enough swap to save the image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886)  *	Returns TRUE or FALSE after checking the total amount of swap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887)  *	space avaiable from the resume partition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) static int enough_swap(unsigned int nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	unsigned int free_swap = count_swap_pages(root_swap, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	unsigned int required;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	pr_debug("Free swap pages: %u\n", free_swap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	required = PAGES_FOR_IO + nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	return free_swap > required;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902)  *	swsusp_write - Write entire image and metadata.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903)  *	@flags: flags to pass to the "boot" kernel in the image header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905)  *	It is important _NOT_ to umount filesystems at this point. We want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906)  *	them synced (in case something goes wrong) but we DO not want to mark
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907)  *	filesystem clean: it is not. (And it does not matter, if we resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908)  *	correctly, we'll mark system clean, anyway.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) int swsusp_write(unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	struct swap_map_handle handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	struct snapshot_handle snapshot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	struct swsusp_info *header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	unsigned long pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	pages = snapshot_get_image_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	error = get_swap_writer(&handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		pr_err("Cannot get swap writer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	if (flags & SF_NOCOMPRESS_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		if (!enough_swap(pages)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 			pr_err("Not enough free swap\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 			error = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 			goto out_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	memset(&snapshot, 0, sizeof(struct snapshot_handle));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	error = snapshot_read_next(&snapshot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	if (error < (int)PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		if (error >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 			error = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		goto out_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	header = (struct swsusp_info *)data_of(snapshot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	error = swap_write_page(&handle, header, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		error = (flags & SF_NOCOMPRESS_MODE) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 			save_image(&handle, &snapshot, pages - 1) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 			save_image_lzo(&handle, &snapshot, pages - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) out_finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	error = swap_writer_finish(&handle, flags, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953)  *	The following functions allow us to read data using a swap map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954)  *	in a file-alike way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) static void release_swap_reader(struct swap_map_handle *handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	struct swap_map_page_list *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	while (handle->maps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		if (handle->maps->map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 			free_page((unsigned long)handle->maps->map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		tmp = handle->maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		handle->maps = handle->maps->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		kfree(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	handle->cur = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) static int get_swap_reader(struct swap_map_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		unsigned int *flags_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	struct swap_map_page_list *tmp, *last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	sector_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	*flags_p = swsusp_header->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	if (!swsusp_header->image) /* how can this happen? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	handle->cur = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	last = handle->maps = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	offset = swsusp_header->image;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	while (offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		tmp = kzalloc(sizeof(*handle->maps), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		if (!tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 			release_swap_reader(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		if (!handle->maps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 			handle->maps = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		if (last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 			last->next = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		last = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		tmp->map = (struct swap_map_page *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 			   __get_free_page(GFP_NOIO | __GFP_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		if (!tmp->map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 			release_swap_reader(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		error = hib_submit_io(REQ_OP_READ, 0, offset, tmp->map, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 			release_swap_reader(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		offset = tmp->map->next_swap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	handle->k = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	handle->cur = handle->maps->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) static int swap_read_page(struct swap_map_handle *handle, void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		struct hib_bio_batch *hb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	sector_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	struct swap_map_page_list *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	if (!handle->cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	offset = handle->cur->entries[handle->k];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	if (!offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	error = hib_submit_io(REQ_OP_READ, 0, offset, buf, hb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	if (++handle->k >= MAP_PAGE_ENTRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		handle->k = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		free_page((unsigned long)handle->maps->map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		tmp = handle->maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		handle->maps = handle->maps->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		kfree(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		if (!handle->maps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 			release_swap_reader(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 			handle->cur = handle->maps->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) static int swap_reader_finish(struct swap_map_handle *handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	release_swap_reader(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)  *	load_image - load the image using the swap map handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)  *	@handle and the snapshot handle @snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)  *	(assume there are @nr_pages pages to load)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) static int load_image(struct swap_map_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)                       struct snapshot_handle *snapshot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)                       unsigned int nr_to_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	unsigned int m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	ktime_t start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	ktime_t stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	struct hib_bio_batch hb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	int err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	unsigned nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	hib_init_batch(&hb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	clean_pages_on_read = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	pr_info("Loading image data pages (%u pages)...\n", nr_to_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	m = nr_to_read / 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	if (!m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		m = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	nr_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	start = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	for ( ; ; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		ret = snapshot_write_next(snapshot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		ret = swap_read_page(handle, data_of(*snapshot), &hb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		if (snapshot->sync_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 			ret = hib_wait_io(&hb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		if (!(nr_pages % m))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 			pr_info("Image loading progress: %3d%%\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 				nr_pages / m * 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		nr_pages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	err2 = hib_wait_io(&hb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	hib_finish_batch(&hb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	stop = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		ret = err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		pr_info("Image loading done\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		snapshot_write_finalize(snapshot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		if (!snapshot_image_loaded(snapshot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 			ret = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	swsusp_show_speed(start, stop, nr_to_read, "Read");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)  * Structure used for LZO data decompression.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) struct dec_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	struct task_struct *thr;                  /* thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	atomic_t ready;                           /* ready to start flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	atomic_t stop;                            /* ready to stop flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	int ret;                                  /* return code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	wait_queue_head_t go;                     /* start decompression */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	wait_queue_head_t done;                   /* decompression done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	size_t unc_len;                           /* uncompressed length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	size_t cmp_len;                           /* compressed length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)  * Deompression function that runs in its own thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) static int lzo_decompress_threadfn(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	struct dec_data *d = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 		wait_event(d->go, atomic_read(&d->ready) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		                  kthread_should_stop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		if (kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 			d->thr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 			d->ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 			atomic_set(&d->stop, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 			wake_up(&d->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		atomic_set(&d->ready, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		d->unc_len = LZO_UNC_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		                               d->unc, &d->unc_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		if (clean_pages_on_decompress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 			flush_icache_range((unsigned long)d->unc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 					   (unsigned long)d->unc + d->unc_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		atomic_set(&d->stop, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		wake_up(&d->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)  * load_image_lzo - Load compressed image data and decompress them with LZO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)  * @handle: Swap map handle to use for loading data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)  * @snapshot: Image to copy uncompressed data into.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)  * @nr_to_read: Number of pages to load.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) static int load_image_lzo(struct swap_map_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)                           struct snapshot_handle *snapshot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)                           unsigned int nr_to_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	unsigned int m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	int eof = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	struct hib_bio_batch hb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	ktime_t start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	ktime_t stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	unsigned nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	size_t off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	unsigned i, thr, run_threads, nr_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	unsigned ring = 0, pg = 0, ring_size = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	         have = 0, want, need, asked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	unsigned long read_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	unsigned char **page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	struct dec_data *data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	struct crc_data *crc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	hib_init_batch(&hb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	 * We'll limit the number of threads for decompression to limit memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	 * footprint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	nr_threads = num_online_cpus() - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	page = vmalloc(array_size(LZO_MAX_RD_PAGES, sizeof(*page)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		pr_err("Failed to allocate LZO page\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		goto out_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	data = vmalloc(array_size(nr_threads, sizeof(*data)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	if (!data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		pr_err("Failed to allocate LZO data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		goto out_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	for (thr = 0; thr < nr_threads; thr++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		memset(&data[thr], 0, offsetof(struct dec_data, go));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	crc = kmalloc(sizeof(*crc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	if (!crc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		pr_err("Failed to allocate crc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		goto out_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	memset(crc, 0, offsetof(struct crc_data, go));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	clean_pages_on_decompress = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	 * Start the decompression threads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	for (thr = 0; thr < nr_threads; thr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		init_waitqueue_head(&data[thr].go);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		init_waitqueue_head(&data[thr].done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		data[thr].thr = kthread_run(lzo_decompress_threadfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		                            &data[thr],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		                            "image_decompress/%u", thr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		if (IS_ERR(data[thr].thr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 			data[thr].thr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 			pr_err("Cannot start decompression threads\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 			goto out_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	 * Start the CRC32 thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	init_waitqueue_head(&crc->go);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	init_waitqueue_head(&crc->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	handle->crc32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	crc->crc32 = &handle->crc32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	for (thr = 0; thr < nr_threads; thr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 		crc->unc[thr] = data[thr].unc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		crc->unc_len[thr] = &data[thr].unc_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	if (IS_ERR(crc->thr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		crc->thr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		pr_err("Cannot start CRC32 thread\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 		goto out_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	 * Set the number of pages for read buffering.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	 * This is complete guesswork, because we'll only know the real
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	 * picture once prepare_image() is called, which is much later on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	 * during the image load phase. We'll assume the worst case and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	 * say that none of the image pages are from high memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	if (low_free_pages() > snapshot_get_image_size())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 		read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	for (i = 0; i < read_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 						  GFP_NOIO | __GFP_HIGH :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 						  GFP_NOIO | __GFP_NOWARN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 						  __GFP_NORETRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		if (!page[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 			if (i < LZO_CMP_PAGES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 				ring_size = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 				pr_err("Failed to allocate LZO pages\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 				ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 				goto out_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	want = ring_size = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	pr_info("Using %u thread(s) for decompression\n", nr_threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	pr_info("Loading and decompressing image data (%u pages)...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		nr_to_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	m = nr_to_read / 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	if (!m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 		m = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	nr_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	start = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	ret = snapshot_write_next(snapshot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 		goto out_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	for(;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		for (i = 0; !eof && i < want; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 			ret = swap_read_page(handle, page[ring], &hb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 			if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 				 * On real read error, finish. On end of data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 				 * set EOF flag and just exit the read loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 				if (handle->cur &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 				    handle->cur->entries[handle->k]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 					goto out_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 				} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 					eof = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 			if (++ring >= ring_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 				ring = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		asked += i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 		want -= i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		 * We are out of data, wait for some more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		if (!have) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 			if (!asked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 			ret = hib_wait_io(&hb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 				goto out_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 			have += asked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 			asked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 			if (eof)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 				eof = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		if (crc->run_threads) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 			wait_event(crc->done, atomic_read(&crc->stop));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 			atomic_set(&crc->stop, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 			crc->run_threads = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		for (thr = 0; have && thr < nr_threads; thr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 			data[thr].cmp_len = *(size_t *)page[pg];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 			if (unlikely(!data[thr].cmp_len ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 			             data[thr].cmp_len >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 			             lzo1x_worst_compress(LZO_UNC_SIZE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 				pr_err("Invalid LZO compressed length\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 				ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 				goto out_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 			need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 			                    PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 			if (need > have) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 				if (eof > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 					ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 					goto out_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 			for (off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 			     off < LZO_HEADER + data[thr].cmp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 			     off += PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 				memcpy(data[thr].cmp + off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 				       page[pg], PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 				have--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 				want++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 				if (++pg >= ring_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 					pg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 			atomic_set(&data[thr].ready, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 			wake_up(&data[thr].go);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 		 * Wait for more data while we are decompressing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		if (have < LZO_CMP_PAGES && asked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 			ret = hib_wait_io(&hb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 				goto out_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 			have += asked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 			asked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 			if (eof)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 				eof = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 		for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 			wait_event(data[thr].done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 			           atomic_read(&data[thr].stop));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 			atomic_set(&data[thr].stop, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 			ret = data[thr].ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 			if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 				pr_err("LZO decompression failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 				goto out_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 			if (unlikely(!data[thr].unc_len ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 			             data[thr].unc_len > LZO_UNC_SIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 			             data[thr].unc_len & (PAGE_SIZE - 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 				pr_err("Invalid LZO uncompressed length\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 				ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 				goto out_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 			for (off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 			     off < data[thr].unc_len; off += PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 				memcpy(data_of(*snapshot),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 				       data[thr].unc + off, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 				if (!(nr_pages % m))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 					pr_info("Image loading progress: %3d%%\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 						nr_pages / m * 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 				nr_pages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 				ret = snapshot_write_next(snapshot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 				if (ret <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 					crc->run_threads = thr + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 					atomic_set(&crc->ready, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 					wake_up(&crc->go);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 					goto out_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 		crc->run_threads = thr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		atomic_set(&crc->ready, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 		wake_up(&crc->go);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) out_finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	if (crc->run_threads) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 		wait_event(crc->done, atomic_read(&crc->stop));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 		atomic_set(&crc->stop, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	stop = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		pr_info("Image loading done\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		snapshot_write_finalize(snapshot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		if (!snapshot_image_loaded(snapshot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 			ret = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 			if (swsusp_header->flags & SF_CRC32_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 				if(handle->crc32 != swsusp_header->crc32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 					pr_err("Invalid image CRC32!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 					ret = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	swsusp_show_speed(start, stop, nr_to_read, "Read");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) out_clean:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	hib_finish_batch(&hb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	for (i = 0; i < ring_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		free_page((unsigned long)page[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	if (crc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		if (crc->thr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 			kthread_stop(crc->thr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		kfree(crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	if (data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 		for (thr = 0; thr < nr_threads; thr++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 			if (data[thr].thr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 				kthread_stop(data[thr].thr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 		vfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	vfree(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)  *	swsusp_read - read the hibernation image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)  *	@flags_p: flags passed by the "frozen" kernel in the image header should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)  *		  be written into this memory location
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) int swsusp_read(unsigned int *flags_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	struct swap_map_handle handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	struct snapshot_handle snapshot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	struct swsusp_info *header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	memset(&snapshot, 0, sizeof(struct snapshot_handle));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	error = snapshot_write_next(&snapshot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	if (error < (int)PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 		return error < 0 ? error : -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	header = (struct swsusp_info *)data_of(snapshot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	error = get_swap_reader(&handle, flags_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		error = swap_read_page(&handle, header, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 		error = (*flags_p & SF_NOCOMPRESS_MODE) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 			load_image(&handle, &snapshot, header->pages - 1) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 			load_image_lzo(&handle, &snapshot, header->pages - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	swap_reader_finish(&handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		pr_debug("Image successfully loaded\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 		pr_debug("Error %d resuming\n", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)  *      swsusp_check - Check for swsusp signature in the resume device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) int swsusp_check(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	void *holder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 					    FMODE_READ | FMODE_EXCL, &holder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	if (!IS_ERR(hib_resume_bdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 		set_blocksize(hib_resume_bdev, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 		clear_page(swsusp_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 		error = hib_submit_io(REQ_OP_READ, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 					swsusp_resume_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 					swsusp_header, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 			goto put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 		if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 			memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 			/* Reset swap signature now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 			error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 						swsusp_resume_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 						swsusp_header, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 			error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 			blkdev_put(hib_resume_bdev, FMODE_READ | FMODE_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 			pr_debug("Image signature found, resuming\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 		error = PTR_ERR(hib_resume_bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 		pr_debug("Image not found (code %d)\n", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)  *	swsusp_close - close swap device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) void swsusp_close(fmode_t mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	if (IS_ERR(hib_resume_bdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 		pr_debug("Image device not initialised\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	blkdev_put(hib_resume_bdev, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)  *      swsusp_unmark - Unmark swsusp signature in the resume device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) #ifdef CONFIG_SUSPEND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) int swsusp_unmark(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 		      swsusp_header, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 					swsusp_resume_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 					swsusp_header, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 		pr_err("Cannot find swsusp signature!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 		error = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	 * We just returned from suspend, we don't need the image any more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	free_all_swap_pages(root_swap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) static int __init swsusp_header_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	if (!swsusp_header)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 		panic("Could not allocate memory for swsusp_header\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) core_initcall(swsusp_header_init);