Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/buffer_head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/pagevec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/mpage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/writeback.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/gfs2_ondisk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/backing-dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <trace/events/writeback.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include "gfs2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include "incore.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include "bmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include "glock.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include "inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include "log.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include "meta_io.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include "quota.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include "trans.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include "rgrp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include "super.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include "util.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #include "glops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #include "aops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 			    unsigned int from, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	struct buffer_head *head = page_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	unsigned int bsize = head->b_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	unsigned int to = from + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	unsigned int start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	for (bh = head, start = 0; bh != head || !start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	     bh = bh->b_this_page, start = end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		end = start + bsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		if (end <= from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		if (start >= to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		gfs2_trans_add_data(ip->i_gl, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * @inode: The inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  * @lblock: The block number to look up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  * @bh_result: The buffer head to return the result in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  * @create: Non-zero if we may add block to the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  * Returns: errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 				  struct buffer_head *bh_result, int create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	error = gfs2_block_map(inode, lblock, bh_result, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	if (!buffer_mapped(bh_result))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  * gfs2_writepage - Write page for writeback mappings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  * @page: The page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87)  * @wbc: The writeback control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	struct gfs2_inode *ip = GFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	struct gfs2_sbd *sdp = GFS2_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	struct iomap_writepage_ctx wpc = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	if (current->journal_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		goto redirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	return iomap_writepage(page, wbc, &wpc, &gfs2_writeback_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) redirty:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	redirty_page_for_writepage(wbc, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  * gfs2_write_jdata_page - gfs2 jdata-specific version of block_write_full_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  * @page: The page to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  * @wbc: The writeback control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)  * This is the same as calling block_write_full_page, but it also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)  * writes pages outside of i_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static int gfs2_write_jdata_page(struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 				 struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	struct inode * const inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	loff_t i_size = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	const pgoff_t end_index = i_size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	unsigned offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	 * The page straddles i_size.  It must be zeroed out on each and every
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	 * writepage invocation because it may be mmapped.  "A file is mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	 * in multiples of the page size.  For a file that is not a multiple of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	 * the  page size, the remaining memory is zeroed when mapped, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	 * writes to that region are not written out to the file."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	offset = i_size & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	if (page->index == end_index && offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		zero_user_segment(page, offset, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	return __block_write_full_page(inode, page, gfs2_get_block_noalloc, wbc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 				       end_buffer_async_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  * __gfs2_jdata_writepage - The core of jdata writepage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  * @page: The page to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)  * @wbc: The writeback control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)  * This is shared between writepage and writepages and implements the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  * core of the writepage operation. If a transaction is required then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)  * PageChecked will have been set and the transaction will have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)  * already been started before this is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	struct gfs2_inode *ip = GFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	struct gfs2_sbd *sdp = GFS2_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	if (PageChecked(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		ClearPageChecked(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		if (!page_has_buffers(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 			create_empty_buffers(page, inode->i_sb->s_blocksize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 					     BIT(BH_Dirty)|BIT(BH_Uptodate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	return gfs2_write_jdata_page(page, wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)  * gfs2_jdata_writepage - Write complete page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  * @page: Page to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  * @wbc: The writeback control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)  * Returns: errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	struct gfs2_inode *ip = GFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	struct gfs2_sbd *sdp = GFS2_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	if (PageChecked(page) || current->journal_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		goto out_ignore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	return __gfs2_jdata_writepage(page, wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) out_ignore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	redirty_page_for_writepage(wbc, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)  * gfs2_writepages - Write a bunch of dirty pages back to disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  * @mapping: The mapping to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)  * @wbc: Write-back control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)  * Used for both ordered and writeback modes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static int gfs2_writepages(struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 			   struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	struct iomap_writepage_ctx wpc = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	 * Even if we didn't write any pages here, we might still be holding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	 * dirty pages in the ail. We forcibly flush the ail because we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	 * want balance_dirty_pages() to loop indefinitely trying to write out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	 * pages held in the ail that it can't find.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)  * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)  * @mapping: The mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)  * @wbc: The writeback control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)  * @pvec: The vector of pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)  * @nr_pages: The number of pages to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  * @done_index: Page index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  * Returns: non-zero if loop should terminate, zero otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static int gfs2_write_jdata_pagevec(struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 				    struct writeback_control *wbc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 				    struct pagevec *pvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 				    int nr_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 				    pgoff_t *done_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	struct gfs2_sbd *sdp = GFS2_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	unsigned nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	for(i = 0; i < nr_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		struct page *page = pvec->pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		*done_index = page->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		if (unlikely(page->mapping != mapping)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) continue_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 			unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		if (!PageDirty(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 			/* someone wrote it for us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 			goto continue_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		if (PageWriteback(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 			if (wbc->sync_mode != WB_SYNC_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 				wait_on_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 				goto continue_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		BUG_ON(PageWriteback(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		if (!clear_page_dirty_for_io(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 			goto continue_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		trace_wbc_writepage(wbc, inode_to_bdi(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		ret = __gfs2_jdata_writepage(page, wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		if (unlikely(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 			if (ret == AOP_WRITEPAGE_ACTIVATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 				unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 				ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 				 * done_index is set past this page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 				 * so media errors will not choke
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 				 * background writeout for the entire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 				 * file. This has consequences for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 				 * range_cyclic semantics (ie. it may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 				 * not be suitable for data integrity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 				 * writeout).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 				*done_index = page->index + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 				ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		 * We stop writing back only if we are not doing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		 * integrity sync. In case of integrity sync we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		 * keep going until we have written all the pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		 * we tagged for writeback prior to entering this loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 			ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	gfs2_trans_end(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)  * gfs2_write_cache_jdata - Like write_cache_pages but different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)  * @mapping: The mapping to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)  * @wbc: The writeback control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)  * The reason that we use our own function here is that we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)  * start transactions before we grab page locks. This allows us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)  * to get the ordering right.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static int gfs2_write_cache_jdata(struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 				  struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	int done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	struct pagevec pvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	int nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	pgoff_t writeback_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	pgoff_t index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	pgoff_t end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	pgoff_t done_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	int cycled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	int range_whole = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	xa_mark_t tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	pagevec_init(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	if (wbc->range_cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		writeback_index = mapping->writeback_index; /* prev offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		index = writeback_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		if (index == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 			cycled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 			cycled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		end = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		index = wbc->range_start >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		end = wbc->range_end >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 			range_whole = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		cycled = 1; /* ignore range_cyclic tests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		tag = PAGECACHE_TAG_TOWRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		tag = PAGECACHE_TAG_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		tag_pages_for_writeback(mapping, index, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	done_index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	while (!done && (index <= end)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 				tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		if (nr_pages == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 			done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		pagevec_release(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	if (!cycled && !done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 		 * range_cyclic:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		 * We hit the last page and there is more work to be done: wrap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		 * back to the start of the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		cycled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		end = writeback_index - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		mapping->writeback_index = done_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)  * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)  * @mapping: The mapping to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)  * @wbc: The writeback control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)  * 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static int gfs2_jdata_writepages(struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 				 struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	struct gfs2_inode *ip = GFS2_I(mapping->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	ret = gfs2_write_cache_jdata(mapping, wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 			       GFS2_LFC_JDATA_WPAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		ret = gfs2_write_cache_jdata(mapping, wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)  * stuffed_readpage - Fill in a Linux page with stuffed file data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)  * @ip: the inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)  * @page: the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)  * Returns: errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	struct buffer_head *dibh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	u64 dsize = i_size_read(&ip->i_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	void *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	 * Due to the order of unstuffing files and ->fault(), we can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	 * asked for a zero page in the case of a stuffed file being extended,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	 * so we need to supply one here. It doesn't happen often.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	if (unlikely(page->index)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		zero_user(page, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 		SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	error = gfs2_meta_inode_buffer(ip, &dibh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 		return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	kaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	if (dsize > gfs2_max_stuffed_size(ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 		dsize = gfs2_max_stuffed_size(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	brelse(dibh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) static int __gfs2_readpage(void *file, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	struct gfs2_inode *ip = GFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	struct gfs2_sbd *sdp = GFS2_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	if (!gfs2_is_jdata(ip) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	    (i_blocksize(inode) == PAGE_SIZE && !page_has_buffers(page))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		error = iomap_readpage(page, &gfs2_iomap_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	} else if (gfs2_is_stuffed(ip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		error = stuffed_readpage(ip, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 		error = mpage_readpage(page, gfs2_block_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	if (unlikely(gfs2_withdrawn(sdp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)  * gfs2_readpage - read a page of a file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)  * @file: The file to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)  * @page: The page of the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static int gfs2_readpage(struct file *file, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	return __gfs2_readpage(file, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)  * gfs2_internal_read - read an internal file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)  * @ip: The gfs2 inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)  * @buf: The buffer to fill
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)  * @pos: The file position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)  * @size: The amount to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)                        unsigned size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	struct address_space *mapping = ip->i_inode.i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	unsigned long index = *pos >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	unsigned offset = *pos & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	unsigned copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	unsigned amt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 		amt = size - copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 		if (offset + size > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 			amt = PAGE_SIZE - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 		page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		if (IS_ERR(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 			return PTR_ERR(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 		p = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 		memcpy(buf + copied, p + offset, amt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 		kunmap_atomic(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 		put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 		copied += amt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 		index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 		offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	} while(copied < size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	(*pos) += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)  * gfs2_readahead - Read a bunch of pages at once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)  * @file: The file to read from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)  * @mapping: Address space info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)  * @pages: List of pages to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)  * @nr_pages: Number of pages to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)  * Some notes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)  * 1. This is only for readahead, so we can simply ignore any things
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)  *    which are slightly inconvenient (such as locking conflicts between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)  *    the page lock and the glock) and return having done no I/O. Its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)  *    obviously not something we'd want to do on too regular a basis.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)  *    Any I/O we ignore at this time will be done via readpage later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)  * 2. We don't handle stuffed files here we let readpage do the honours.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)  * 3. mpage_readahead() does most of the heavy lifting in the common case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)  * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) static void gfs2_readahead(struct readahead_control *rac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	struct inode *inode = rac->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	struct gfs2_inode *ip = GFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	if (gfs2_is_stuffed(ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 		;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	else if (gfs2_is_jdata(ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 		mpage_readahead(rac, gfs2_block_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 		iomap_readahead(rac, &gfs2_iomap_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)  * adjust_fs_space - Adjusts the free space available due to gfs2_grow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)  * @inode: the rindex inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) void adjust_fs_space(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	struct gfs2_sbd *sdp = GFS2_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	struct buffer_head *m_bh, *l_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	u64 fs_total, new_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	/* Total up the file system space, according to the latest rindex. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	fs_total = gfs2_ri_total(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	spin_lock(&sdp->sd_statfs_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	gfs2_statfs_change_in(m_sc, m_bh->b_data +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 			      sizeof(struct gfs2_dinode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	if (fs_total > (m_sc->sc_total + l_sc->sc_total))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 		new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 		new_free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	spin_unlock(&sdp->sd_statfs_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	fs_warn(sdp, "File system extended by %llu blocks.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 		(unsigned long long)new_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	gfs2_statfs_change(sdp, new_free, new_free, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 		goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	update_statfs(sdp, m_bh, l_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	brelse(l_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) out2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	brelse(m_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	sdp->sd_rindex_uptodate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	gfs2_trans_end(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)  * jdata_set_page_dirty - Page dirtying function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)  * @page: The page to dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)  * Returns: 1 if it dirtyed the page, or 0 otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) static int jdata_set_page_dirty(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	if (current->journal_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 		SetPageChecked(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	return __set_page_dirty_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)  * gfs2_bmap - Block map function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)  * @mapping: Address space info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)  * @lblock: The block to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)  * Returns: The disk address for the block or 0 on hole or error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	struct gfs2_inode *ip = GFS2_I(mapping->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	struct gfs2_holder i_gh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 	sector_t dblock = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	if (!gfs2_is_stuffed(ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 		dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 	gfs2_glock_dq_uninit(&i_gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	return dblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	struct gfs2_bufdata *bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	lock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	gfs2_log_lock(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	clear_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	bd = bh->b_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 	if (bd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 		if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 			list_del_init(&bd->bd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 			spin_lock(&sdp->sd_ail_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 			gfs2_remove_from_journal(bh, REMOVE_JDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 			spin_unlock(&sdp->sd_ail_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 	bh->b_bdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	clear_buffer_mapped(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 	clear_buffer_req(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	clear_buffer_new(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 	gfs2_log_unlock(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 	unlock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) static void gfs2_invalidatepage(struct page *page, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 				unsigned int length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 	struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	unsigned int stop = offset + length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	int partial_page = (offset || length < PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	struct buffer_head *bh, *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	unsigned long pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 	BUG_ON(!PageLocked(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 	if (!partial_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 		ClearPageChecked(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	if (!page_has_buffers(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	bh = head = page_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 		if (pos + bh->b_size > stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 		if (offset <= pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 			gfs2_discard(sdp, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 		pos += bh->b_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 		bh = bh->b_this_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 	} while (bh != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	if (!partial_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 		try_to_release_page(page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)  * gfs2_releasepage - free the metadata associated with a page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)  * @page: the page that's being released
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)  * @gfp_mask: passed from Linux VFS, ignored by us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)  * Calls try_to_free_buffers() to free the buffers and put the page if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)  * buffers can be released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)  * Returns: 1 if the page was put or else 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	struct address_space *mapping = page->mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 	struct buffer_head *bh, *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 	struct gfs2_bufdata *bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 	if (!page_has_buffers(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 	 * From xfs_vm_releasepage: mm accommodates an old ext3 case where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 	 * clean pages might not have had the dirty bit cleared.  Thus, it can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 	 * send actual dirty pages to ->releasepage() via shrink_active_list().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 	 * As a workaround, we skip pages that contain dirty buffers below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 	 * Once ->releasepage isn't called on dirty pages anymore, we can warn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 	 * on dirty buffers like we used to here again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 	gfs2_log_lock(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 	head = bh = page_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 		if (atomic_read(&bh->b_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 			goto cannot_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 		bd = bh->b_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 		if (bd && bd->bd_tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 			goto cannot_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 		if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 			goto cannot_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 		bh = bh->b_this_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 	} while(bh != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 	head = bh = page_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 		bd = bh->b_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 		if (bd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 			gfs2_assert_warn(sdp, bd->bd_bh == bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 			bd->bd_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 			bh->b_private = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 			 * The bd may still be queued as a revoke, in which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 			 * case we must not dequeue nor free it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 			if (!bd->bd_blkno && !list_empty(&bd->bd_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 				list_del_init(&bd->bd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 			if (list_empty(&bd->bd_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 				kmem_cache_free(gfs2_bufdata_cachep, bd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 		bh = bh->b_this_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 	} while (bh != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 	gfs2_log_unlock(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 	return try_to_free_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) cannot_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 	gfs2_log_unlock(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) static const struct address_space_operations gfs2_aops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 	.writepage = gfs2_writepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 	.writepages = gfs2_writepages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 	.readpage = gfs2_readpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 	.readahead = gfs2_readahead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 	.set_page_dirty = iomap_set_page_dirty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 	.releasepage = iomap_releasepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 	.invalidatepage = iomap_invalidatepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 	.bmap = gfs2_bmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 	.direct_IO = noop_direct_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 	.migratepage = iomap_migrate_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 	.is_partially_uptodate = iomap_is_partially_uptodate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 	.error_remove_page = generic_error_remove_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) static const struct address_space_operations gfs2_jdata_aops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 	.writepage = gfs2_jdata_writepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 	.writepages = gfs2_jdata_writepages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 	.readpage = gfs2_readpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 	.readahead = gfs2_readahead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 	.set_page_dirty = jdata_set_page_dirty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 	.bmap = gfs2_bmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 	.invalidatepage = gfs2_invalidatepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 	.releasepage = gfs2_releasepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 	.is_partially_uptodate = block_is_partially_uptodate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 	.error_remove_page = generic_error_remove_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) void gfs2_set_aops(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 	if (gfs2_is_jdata(GFS2_I(inode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 		inode->i_mapping->a_ops = &gfs2_jdata_aops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 		inode->i_mapping->a_ops = &gfs2_aops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }