Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *  linux/mm/page_io.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *  Swap reorganised 29.12.95, 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *  Asynchronous swapping added 30.12.95. Stephen Tweedie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *  Removed race in async swapping. 14.4.1996. Bruno Haible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *  Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *  Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/kernel_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/swapops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/buffer_head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/writeback.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/frontswap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/psi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <trace/hooks/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) static struct bio *get_swap_bio(gfp_t gfp_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 				struct page *page, bio_end_io_t end_io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	bio = bio_alloc(gfp_flags, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	if (bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 		struct block_device *bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		bio->bi_iter.bi_sector = map_swap_page(page, &bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 		bio_set_dev(bio, bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 		bio->bi_end_io = end_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 		bio_add_page(bio, page, thp_size(page), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	return bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) void end_swap_bio_write(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	struct page *page = bio_first_page_all(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	if (bio->bi_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		SetPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		 * We failed to write the page out to swap-space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		 * Re-dirty the page in order to avoid it being reclaimed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		 * Also print a dire warning that things will go BAD (tm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		 * very quickly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 		 * Also clear PG_reclaim to avoid rotate_reclaimable_page()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 		pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 				     MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 				     (unsigned long long)bio->bi_iter.bi_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		ClearPageReclaim(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	end_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) static void end_swap_bio_read(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	struct page *page = bio_first_page_all(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	struct task_struct *waiter = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	if (bio->bi_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		SetPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		ClearPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 				     MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 				     (unsigned long long)bio->bi_iter.bi_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	WRITE_ONCE(bio->bi_private, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	if (waiter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		blk_wake_io_task(waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		put_task_struct(waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) int generic_swapfile_activate(struct swap_info_struct *sis,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 				struct file *swap_file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 				sector_t *span)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	struct address_space *mapping = swap_file->f_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	unsigned blocks_per_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	unsigned long page_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	unsigned blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	sector_t probe_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	sector_t last_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	sector_t lowest_block = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	sector_t highest_block = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	int nr_extents = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	blkbits = inode->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	blocks_per_page = PAGE_SIZE >> blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	 * Map all the blocks into the extent tree.  This code doesn't try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	 * to be very smart.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	probe_block = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	page_no = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	last_block = i_size_read(inode) >> blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	while ((probe_block + blocks_per_page) <= last_block &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 			page_no < sis->max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		unsigned block_in_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		sector_t first_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		first_block = probe_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		ret = bmap(inode, &first_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		if (ret || !first_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 			goto bad_bmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		 * It must be PAGE_SIZE aligned on-disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		if (first_block & (blocks_per_page - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 			probe_block++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 			goto reprobe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		for (block_in_page = 1; block_in_page < blocks_per_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 					block_in_page++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 			sector_t block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 			block = probe_block + block_in_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 			ret = bmap(inode, &block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 			if (ret || !block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 				goto bad_bmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 			if (block != first_block + block_in_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 				/* Discontiguity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 				probe_block++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 				goto reprobe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		first_block >>= (PAGE_SHIFT - blkbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		if (page_no) {	/* exclude the header page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 			if (first_block < lowest_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 				lowest_block = first_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 			if (first_block > highest_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 				highest_block = first_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		ret = add_swap_extent(sis, page_no, 1, first_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		nr_extents += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		page_no++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		probe_block += blocks_per_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) reprobe:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	ret = nr_extents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	*span = 1 + highest_block - lowest_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	if (page_no == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		page_no = 1;	/* force Empty message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	sis->max = page_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	sis->pages = page_no - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	sis->highest_bit = page_no - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) bad_bmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	pr_err("swapon: swapfile has holes\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)  * We may have stale swap cache pages in memory: notice
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)  * them here and get rid of the unnecessary final write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) int swap_writepage(struct page *page, struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	if (try_to_free_swap(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	 * Arch code may have to preserve more data than just the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	 * contents, e.g. memory tags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	ret = arch_prepare_to_swap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	if (frontswap_store(page) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		set_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		end_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	ret = __swap_writepage(page, wbc, end_swap_bio_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static inline void count_swpout_vm_event(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	if (unlikely(PageTransHuge(page)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		count_vm_event(THP_SWPOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	count_vm_events(PSWPOUT, thp_nr_pages(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	struct cgroup_subsys_state *css;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	if (!page->mem_cgroup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	bio_associate_blkg_from_css(bio, css);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #define bio_associate_blkg_from_page(bio, page)		do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) #endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) int __swap_writepage(struct page *page, struct writeback_control *wbc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		bio_end_io_t end_write_func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	struct swap_info_struct *sis = page_swap_info(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	bool skip = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	if (data_race(sis->flags & SWP_FS_OPS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		struct kiocb kiocb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		struct file *swap_file = sis->swap_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		struct address_space *mapping = swap_file->f_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		struct bio_vec bv = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 			.bv_page = page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 			.bv_len  = PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 			.bv_offset = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		struct iov_iter from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		iov_iter_bvec(&from, WRITE, &bv, 1, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		init_sync_kiocb(&kiocb, swap_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		kiocb.ki_pos = page_file_offset(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		set_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		ret = mapping->a_ops->direct_IO(&kiocb, &from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		if (ret == PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 			trace_android_vh_count_pswpout(sis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 			count_vm_event(PSWPOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 			 * In the case of swap-over-nfs, this can be a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 			 * temporary failure if the system has limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 			 * memory for allocating transmit buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 			 * Mark the page dirty and avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 			 * rotate_reclaimable_page but rate-limit the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 			 * messages but do not flag PageError like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 			 * the normal direct-to-bio case as it could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 			 * be temporary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 			set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 			ClearPageReclaim(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 			pr_err_ratelimited("Write error on dio swapfile (%llu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 					   page_file_offset(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		end_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		trace_android_vh_count_swpout_vm_event(sis, page, &skip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		if (!skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 			count_swpout_vm_event(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	bio = get_swap_bio(GFP_NOIO, page, end_write_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	if (bio == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	bio->bi_opf = REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	bio_associate_blkg_from_page(bio, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	trace_android_vh_count_swpout_vm_event(sis, page, &skip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	if (!skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		count_swpout_vm_event(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	set_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) int swap_readpage(struct page *page, bool synchronous)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	struct swap_info_struct *sis = page_swap_info(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	blk_qc_t qc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	struct gendisk *disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	unsigned long pflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	VM_BUG_ON_PAGE(!PageLocked(page), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	VM_BUG_ON_PAGE(PageUptodate(page), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	 * Count submission time as memory stall. When the device is congested,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	 * or the submitting cgroup IO-throttled, submission can be a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	 * significant part of overall IO time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	psi_memstall_enter(&pflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	if (frontswap_load(page) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	if (data_race(sis->flags & SWP_FS_OPS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		struct file *swap_file = sis->swap_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		struct address_space *mapping = swap_file->f_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		ret = mapping->a_ops->readpage(swap_file, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 			trace_android_vh_count_pswpin(sis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 			count_vm_event(PSWPIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	if (sis->flags & SWP_SYNCHRONOUS_IO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 		ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 			trace_android_vh_count_pswpin(sis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 			count_vm_event(PSWPIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	if (bio == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	disk = bio->bi_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	 * Keep this task valid during swap readpage because the oom killer may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	 * attempt to access it in the page fault retry time check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	bio_set_op_attrs(bio, REQ_OP_READ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	if (synchronous) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		bio->bi_opf |= REQ_HIPRI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		get_task_struct(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		bio->bi_private = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	trace_android_vh_count_pswpin(sis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	count_vm_event(PSWPIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	bio_get(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	qc = submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	while (synchronous) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		set_current_state(TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		if (!READ_ONCE(bio->bi_private))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		if (!blk_poll(disk->queue, qc, true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 			blk_io_schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	__set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	psi_memstall_leave(&pflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) int swap_set_page_dirty(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	struct swap_info_struct *sis = page_swap_info(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	if (data_race(sis->flags & SWP_FS_OPS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		struct address_space *mapping = sis->swap_file->f_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		VM_BUG_ON_PAGE(!PageSwapCache(page), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		return mapping->a_ops->set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		return __set_page_dirty_no_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }