Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /* Cache page management and data I/O routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Written by David Howells (dhowells@redhat.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #define FSCACHE_DEBUG_LEVEL PAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/fscache-cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/buffer_head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/pagevec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  * check to see if a page is being written to the cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 	void *val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 	val = radix_tree_lookup(&cookie->stores, page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 	trace_fscache_check_page(cookie, page, val, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 	return val != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) EXPORT_SYMBOL(__fscache_check_page_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  * wait for a page to finish being written to the cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	trace_fscache_page(cookie, page, fscache_page_write_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	wait_event(*wq, !__fscache_check_page_write(cookie, page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) EXPORT_SYMBOL(__fscache_wait_on_page_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  * wait for a page to finish being written to the cache. Put a timeout here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  * since we might be called recursively via parent fs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) bool release_page_wait_timeout(struct fscache_cookie *cookie, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	return wait_event_timeout(*wq, !__fscache_check_page_write(cookie, page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 				  HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59)  * decide whether a page can be released, possibly by cancelling a store to it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60)  * - we're allowed to sleep if __GFP_DIRECT_RECLAIM is flagged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 				  struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 				  gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	struct page *xpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	void *val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	_enter("%p,%p,%x", cookie, page, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	trace_fscache_page(cookie, page, fscache_page_maybe_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) try_again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	val = radix_tree_lookup(&cookie->stores, page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	if (!val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 		fscache_stat(&fscache_n_store_vmscan_not_storing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 		__fscache_uncache_page(cookie, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	/* see if the page is actually undergoing storage - if so we can't get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	 * rid of it till the cache has finished with it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	if (radix_tree_tag_get(&cookie->stores, page->index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 			       FSCACHE_COOKIE_STORING_TAG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		goto page_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	/* the page is pending storage, so we attempt to cancel the store and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	 * discard the store request so that the page can be reclaimed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	spin_lock(&cookie->stores_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	if (radix_tree_tag_get(&cookie->stores, page->index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 			       FSCACHE_COOKIE_STORING_TAG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 		/* the page started to undergo storage whilst we were looking,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 		 * so now we can only wait or return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 		spin_unlock(&cookie->stores_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 		goto page_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	xpage = radix_tree_delete(&cookie->stores, page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	trace_fscache_page(cookie, page, fscache_page_radix_delete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	spin_unlock(&cookie->stores_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	if (xpage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		fscache_stat(&fscache_n_store_vmscan_cancelled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		fscache_stat(&fscache_n_store_radix_deletes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		ASSERTCMP(xpage, ==, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		fscache_stat(&fscache_n_store_vmscan_gone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	wake_up_bit(&cookie->flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	trace_fscache_wake_cookie(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	if (xpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		put_page(xpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	__fscache_uncache_page(cookie, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) page_busy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	/* We will wait here if we're allowed to, but that could deadlock the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	 * allocator as the work threads writing to the cache may all end up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	 * sleeping on memory allocation, so we may need to impose a timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	 * too. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 		fscache_stat(&fscache_n_store_vmscan_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	fscache_stat(&fscache_n_store_vmscan_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	if (!release_page_wait_timeout(cookie, page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		_debug("fscache writeout timeout page: %p{%lx}",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 			page, page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	gfp &= ~__GFP_DIRECT_RECLAIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	goto try_again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) EXPORT_SYMBOL(__fscache_maybe_release_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144)  * note that a page has finished being written to the cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) static void fscache_end_page_write(struct fscache_object *object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 				   struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	struct fscache_cookie *cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	struct page *xpage = NULL, *val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	spin_lock(&object->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	cookie = object->cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	if (cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 		/* delete the page from the tree if it is now no longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 		 * pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 		spin_lock(&cookie->stores_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 		radix_tree_tag_clear(&cookie->stores, page->index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 				     FSCACHE_COOKIE_STORING_TAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 		trace_fscache_page(cookie, page, fscache_page_radix_clear_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 		if (!radix_tree_tag_get(&cookie->stores, page->index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 					FSCACHE_COOKIE_PENDING_TAG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 			fscache_stat(&fscache_n_store_radix_deletes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 			xpage = radix_tree_delete(&cookie->stores, page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 			trace_fscache_page(cookie, page, fscache_page_radix_delete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 			trace_fscache_page(cookie, page, fscache_page_write_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 			val = radix_tree_lookup(&cookie->stores, page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 			trace_fscache_check_page(cookie, page, val, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 			trace_fscache_page(cookie, page, fscache_page_write_end_pend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		spin_unlock(&cookie->stores_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 		wake_up_bit(&cookie->flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 		trace_fscache_wake_cookie(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		trace_fscache_page(cookie, page, fscache_page_write_end_noc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	spin_unlock(&object->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	if (xpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 		put_page(xpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185)  * actually apply the changed attributes to a cache object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) static void fscache_attr_changed_op(struct fscache_operation *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	struct fscache_object *object = op->object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	_enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	fscache_stat(&fscache_n_attr_changed_calls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	if (fscache_object_is_active(object)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		fscache_stat(&fscache_n_cop_attr_changed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 		ret = object->cache->ops->attr_changed(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		fscache_stat_d(&fscache_n_cop_attr_changed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 			fscache_abort_object(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		fscache_op_complete(op, ret < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		fscache_op_complete(op, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	_leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211)  * notification that the attributes on an object have changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) int __fscache_attr_changed(struct fscache_cookie *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	struct fscache_operation *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	struct fscache_object *object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	bool wake_cookie = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	_enter("%p", cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	fscache_stat(&fscache_n_attr_changed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	op = kzalloc(sizeof(*op), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	if (!op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		fscache_stat(&fscache_n_attr_changed_nomem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		_leave(" = -ENOMEM");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	fscache_operation_init(cookie, op, fscache_attr_changed_op, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	trace_fscache_page_op(cookie, NULL, op, fscache_page_op_attr_changed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	op->flags = FSCACHE_OP_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		(1 << FSCACHE_OP_EXCLUSIVE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		(1 << FSCACHE_OP_UNUSE_COOKIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	spin_lock(&cookie->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	if (!fscache_cookie_enabled(cookie) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	    hlist_empty(&cookie->backing_objects))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		goto nobufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	object = hlist_entry(cookie->backing_objects.first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 			     struct fscache_object, cookie_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	__fscache_use_cookie(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	if (fscache_submit_exclusive_op(object, op) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		goto nobufs_dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	spin_unlock(&cookie->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	fscache_stat(&fscache_n_attr_changed_ok);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	fscache_put_operation(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	_leave(" = 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) nobufs_dec:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	wake_cookie = __fscache_unuse_cookie(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) nobufs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	spin_unlock(&cookie->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	fscache_put_operation(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	if (wake_cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		__fscache_wake_unused_cookie(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	fscache_stat(&fscache_n_attr_changed_nobufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	_leave(" = %d", -ENOBUFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) EXPORT_SYMBOL(__fscache_attr_changed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269)  * Handle cancellation of a pending retrieval op
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	struct fscache_retrieval *op =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		container_of(_op, struct fscache_retrieval, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	atomic_set(&op->n_pages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280)  * release a retrieval op reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) static void fscache_release_retrieval_op(struct fscache_operation *_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	struct fscache_retrieval *op =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		container_of(_op, struct fscache_retrieval, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	_enter("{OP%x}", op->op.debug_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	ASSERTIFCMP(op->op.state != FSCACHE_OP_ST_INITIALISED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		    atomic_read(&op->n_pages), ==, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	fscache_hist(fscache_retrieval_histogram, op->start_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	if (op->context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		fscache_put_context(op->cookie, op->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	_leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300)  * allocate a retrieval op
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) static struct fscache_retrieval *fscache_alloc_retrieval(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	struct fscache_cookie *cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	fscache_rw_complete_t end_io_func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	struct fscache_retrieval *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	/* allocate a retrieval operation and attempt to submit it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	op = kzalloc(sizeof(*op), GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	if (!op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		fscache_stat(&fscache_n_retrievals_nomem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	fscache_operation_init(cookie, &op->op, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 			       fscache_do_cancel_retrieval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 			       fscache_release_retrieval_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	op->op.flags	= FSCACHE_OP_MYTHREAD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		(1UL << FSCACHE_OP_WAITING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		(1UL << FSCACHE_OP_UNUSE_COOKIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	op->cookie	= cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	op->mapping	= mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	op->end_io_func	= end_io_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	op->context	= context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	op->start_time	= jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	INIT_LIST_HEAD(&op->to_do);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	/* Pin the netfs read context in case we need to do the actual netfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	 * read because we've encountered a cache read failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	if (context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 		fscache_get_context(op->cookie, context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	return op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339)  * wait for a deferred lookup to complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	unsigned long jif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	_enter("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		_leave(" = 0 [imm]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	fscache_stat(&fscache_n_retrievals_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	jif = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 			TASK_INTERRUPTIBLE) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		fscache_stat(&fscache_n_retrievals_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		_leave(" = -ERESTARTSYS");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	fscache_hist(fscache_retrieval_delay_histogram, jif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	_leave(" = 0 [dly]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371)  * wait for an object to become active (or dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) int fscache_wait_for_operation_activation(struct fscache_object *object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 					  struct fscache_operation *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 					  atomic_t *stat_op_waits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 					  atomic_t *stat_object_dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	if (!test_bit(FSCACHE_OP_WAITING, &op->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		goto check_if_dead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	_debug(">>> WT");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	if (stat_op_waits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		fscache_stat(stat_op_waits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 			TASK_INTERRUPTIBLE) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		trace_fscache_op(object->cookie, op, fscache_op_signal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		ret = fscache_cancel_op(op, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 			return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		/* it's been removed from the pending queue by another party,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		 * so we should get to run shortly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 			    TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	_debug("<<< GO");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) check_if_dead:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	if (op->state == FSCACHE_OP_ST_CANCELLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		if (stat_object_dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 			fscache_stat(stat_object_dead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		_leave(" = -ENOBUFS [cancelled]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	if (unlikely(fscache_object_is_dying(object) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		     fscache_cache_is_broken(object))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 		enum fscache_operation_state state = op->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		trace_fscache_op(object->cookie, op, fscache_op_signal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		fscache_cancel_op(op, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		if (stat_object_dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 			fscache_stat(stat_object_dead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		_leave(" = -ENOBUFS [obj dead %d]", state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421)  * read a page from the cache or allocate a block in which to store it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422)  * - we return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423)  *   -ENOMEM	- out of memory, nothing done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424)  *   -ERESTARTSYS - interrupted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425)  *   -ENOBUFS	- no backing object available in which to cache the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426)  *   -ENODATA	- no data available in the backing object for this block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427)  *   0		- dispatched a read - it'll call end_io_func() when finished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 				 struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 				 fscache_rw_complete_t end_io_func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 				 void *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 				 gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	struct fscache_retrieval *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	struct fscache_object *object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	bool wake_cookie = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	_enter("%p,%p,,,", cookie, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	fscache_stat(&fscache_n_retrievals);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	if (hlist_empty(&cookie->backing_objects))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		goto nobufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		_leave(" = -ENOBUFS [invalidating]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	ASSERTCMP(page, !=, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	if (fscache_wait_for_deferred_lookup(cookie) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	op = fscache_alloc_retrieval(cookie, page->mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 				     end_io_func, context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	if (!op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		_leave(" = -ENOMEM");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	atomic_set(&op->n_pages, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_retr_one);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	spin_lock(&cookie->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	if (!fscache_cookie_enabled(cookie) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	    hlist_empty(&cookie->backing_objects))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		goto nobufs_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	object = hlist_entry(cookie->backing_objects.first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 			     struct fscache_object, cookie_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	__fscache_use_cookie(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	atomic_inc(&object->n_reads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	__set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	if (fscache_submit_op(object, &op->op) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		goto nobufs_unlock_dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	spin_unlock(&cookie->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	fscache_stat(&fscache_n_retrieval_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	/* we wait for the operation to become active, and then process it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	 * *here*, in this thread, and not in the thread pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	ret = fscache_wait_for_operation_activation(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		object, &op->op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		__fscache_stat(&fscache_n_retrieval_op_waits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		__fscache_stat(&fscache_n_retrievals_object_dead));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	/* ask the cache to honour the operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		fscache_stat(&fscache_n_cop_allocate_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		ret = object->cache->ops->allocate_page(op, page, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		fscache_stat_d(&fscache_n_cop_allocate_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 			ret = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		fscache_stat(&fscache_n_cop_read_or_alloc_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	if (ret == -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		fscache_stat(&fscache_n_retrievals_nomem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	else if (ret == -ERESTARTSYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 		fscache_stat(&fscache_n_retrievals_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	else if (ret == -ENODATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		fscache_stat(&fscache_n_retrievals_nodata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	else if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		fscache_stat(&fscache_n_retrievals_nobufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		fscache_stat(&fscache_n_retrievals_ok);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	fscache_put_retrieval(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	_leave(" = %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) nobufs_unlock_dec:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	atomic_dec(&object->n_reads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	wake_cookie = __fscache_unuse_cookie(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) nobufs_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	spin_unlock(&cookie->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	if (wake_cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		__fscache_wake_unused_cookie(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	fscache_put_retrieval(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) nobufs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	fscache_stat(&fscache_n_retrievals_nobufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	_leave(" = -ENOBUFS");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) EXPORT_SYMBOL(__fscache_read_or_alloc_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541)  * read a list of page from the cache or allocate a block in which to store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542)  * them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543)  * - we return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544)  *   -ENOMEM	- out of memory, some pages may be being read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545)  *   -ERESTARTSYS - interrupted, some pages may be being read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546)  *   -ENOBUFS	- no backing object or space available in which to cache any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547)  *                pages not being read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548)  *   -ENODATA	- no data available in the backing object for some or all of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549)  *                the pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550)  *   0		- dispatched a read on all pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552)  * end_io_func() will be called for each page read from the cache as it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553)  * finishes being read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555)  * any pages for which a read is dispatched will be removed from pages and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556)  * nr_pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 				  struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 				  struct list_head *pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 				  unsigned *nr_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 				  fscache_rw_complete_t end_io_func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 				  void *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 				  gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	struct fscache_retrieval *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	struct fscache_object *object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	bool wake_cookie = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	_enter("%p,,%d,,,", cookie, *nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	fscache_stat(&fscache_n_retrievals);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	if (hlist_empty(&cookie->backing_objects))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		goto nobufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		_leave(" = -ENOBUFS [invalidating]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	ASSERTCMP(*nr_pages, >, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	ASSERT(!list_empty(pages));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	if (fscache_wait_for_deferred_lookup(cookie) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	if (!op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	atomic_set(&op->n_pages, *nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	trace_fscache_page_op(cookie, NULL, &op->op, fscache_page_op_retr_multi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	spin_lock(&cookie->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	if (!fscache_cookie_enabled(cookie) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	    hlist_empty(&cookie->backing_objects))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		goto nobufs_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	object = hlist_entry(cookie->backing_objects.first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 			     struct fscache_object, cookie_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	__fscache_use_cookie(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	atomic_inc(&object->n_reads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	__set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	if (fscache_submit_op(object, &op->op) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		goto nobufs_unlock_dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	spin_unlock(&cookie->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	fscache_stat(&fscache_n_retrieval_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	/* we wait for the operation to become active, and then process it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	 * *here*, in this thread, and not in the thread pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	ret = fscache_wait_for_operation_activation(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		object, &op->op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		__fscache_stat(&fscache_n_retrieval_op_waits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		__fscache_stat(&fscache_n_retrievals_object_dead));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	/* ask the cache to honour the operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		fscache_stat(&fscache_n_cop_allocate_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		ret = object->cache->ops->allocate_pages(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 			op, pages, nr_pages, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		fscache_stat_d(&fscache_n_cop_allocate_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		fscache_stat(&fscache_n_cop_read_or_alloc_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		ret = object->cache->ops->read_or_alloc_pages(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 			op, pages, nr_pages, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	if (ret == -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		fscache_stat(&fscache_n_retrievals_nomem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	else if (ret == -ERESTARTSYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		fscache_stat(&fscache_n_retrievals_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	else if (ret == -ENODATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		fscache_stat(&fscache_n_retrievals_nodata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	else if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		fscache_stat(&fscache_n_retrievals_nobufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		fscache_stat(&fscache_n_retrievals_ok);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	fscache_put_retrieval(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	_leave(" = %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) nobufs_unlock_dec:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	atomic_dec(&object->n_reads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	wake_cookie = __fscache_unuse_cookie(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) nobufs_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	spin_unlock(&cookie->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	fscache_put_retrieval(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	if (wake_cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		__fscache_wake_unused_cookie(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) nobufs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	fscache_stat(&fscache_n_retrievals_nobufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	_leave(" = -ENOBUFS");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668)  * allocate a block in the cache on which to store a page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669)  * - we return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670)  *   -ENOMEM	- out of memory, nothing done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671)  *   -ERESTARTSYS - interrupted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672)  *   -ENOBUFS	- no backing object available in which to cache the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673)  *   0		- block allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) int __fscache_alloc_page(struct fscache_cookie *cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 			 struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 			 gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	struct fscache_retrieval *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	struct fscache_object *object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	bool wake_cookie = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	_enter("%p,%p,,,", cookie, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	fscache_stat(&fscache_n_allocs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	if (hlist_empty(&cookie->backing_objects))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		goto nobufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	ASSERTCMP(page, !=, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		_leave(" = -ENOBUFS [invalidating]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	if (fscache_wait_for_deferred_lookup(cookie) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	if (!op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	atomic_set(&op->n_pages, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_alloc_one);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	spin_lock(&cookie->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	if (!fscache_cookie_enabled(cookie) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	    hlist_empty(&cookie->backing_objects))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		goto nobufs_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	object = hlist_entry(cookie->backing_objects.first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 			     struct fscache_object, cookie_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	__fscache_use_cookie(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	if (fscache_submit_op(object, &op->op) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		goto nobufs_unlock_dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	spin_unlock(&cookie->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	fscache_stat(&fscache_n_alloc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	ret = fscache_wait_for_operation_activation(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		object, &op->op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		__fscache_stat(&fscache_n_alloc_op_waits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		__fscache_stat(&fscache_n_allocs_object_dead));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	/* ask the cache to honour the operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	fscache_stat(&fscache_n_cop_allocate_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	ret = object->cache->ops->allocate_page(op, page, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	fscache_stat_d(&fscache_n_cop_allocate_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	if (ret == -ERESTARTSYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		fscache_stat(&fscache_n_allocs_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	else if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		fscache_stat(&fscache_n_allocs_nobufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		fscache_stat(&fscache_n_allocs_ok);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	fscache_put_retrieval(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	_leave(" = %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) nobufs_unlock_dec:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	wake_cookie = __fscache_unuse_cookie(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) nobufs_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	spin_unlock(&cookie->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	fscache_put_retrieval(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	if (wake_cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		__fscache_wake_unused_cookie(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) nobufs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	fscache_stat(&fscache_n_allocs_nobufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	_leave(" = -ENOBUFS");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) EXPORT_SYMBOL(__fscache_alloc_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762)  * Unmark pages allocate in the readahead code path (via:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763)  * fscache_readpages_or_alloc) after delegating to the base filesystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) void __fscache_readpages_cancel(struct fscache_cookie *cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 				struct list_head *pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	list_for_each_entry(page, pages, lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		if (PageFsCache(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 			__fscache_uncache_page(cookie, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) EXPORT_SYMBOL(__fscache_readpages_cancel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778)  * release a write op reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) static void fscache_release_write_op(struct fscache_operation *_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	_enter("{OP%x}", _op->debug_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786)  * perform the background storage of a page into the cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) static void fscache_write_op(struct fscache_operation *_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	struct fscache_storage *op =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		container_of(_op, struct fscache_storage, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	struct fscache_object *object = op->op.object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	struct fscache_cookie *cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	unsigned n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	void *results[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	_enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	spin_lock(&object->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	cookie = object->cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	if (!fscache_object_is_active(object)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		/* If we get here, then the on-disk cache object likely no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		 * longer exists, so we should just cancel this write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		 * operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		spin_unlock(&object->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		fscache_op_complete(&op->op, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		_leave(" [inactive]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	if (!cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		/* If we get here, then the cookie belonging to the object was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		 * detached, probably by the cookie being withdrawn due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		 * memory pressure, which means that the pages we might write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		 * to the cache from no longer exist - therefore, we can just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		 * cancel this write operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		spin_unlock(&object->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		fscache_op_complete(&op->op, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		_leave(" [cancel] op{f=%lx s=%u} obj{s=%s f=%lx}",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		       _op->flags, _op->state, object->state->short_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		       object->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	spin_lock(&cookie->stores_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	fscache_stat(&fscache_n_store_calls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	/* find a page to store */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	results[0] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 				       FSCACHE_COOKIE_PENDING_TAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	trace_fscache_gang_lookup(cookie, &op->op, results, n, op->store_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	if (n != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		goto superseded;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	page = results[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	_debug("gang %d [%lx]", n, page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	radix_tree_tag_set(&cookie->stores, page->index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 			   FSCACHE_COOKIE_STORING_TAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	radix_tree_tag_clear(&cookie->stores, page->index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 			     FSCACHE_COOKIE_PENDING_TAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	trace_fscache_page(cookie, page, fscache_page_radix_pend2store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	spin_unlock(&cookie->stores_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	spin_unlock(&object->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	if (page->index >= op->store_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		goto discard_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	fscache_stat(&fscache_n_store_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	fscache_stat(&fscache_n_cop_write_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	ret = object->cache->ops->write_page(op, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	fscache_stat_d(&fscache_n_cop_write_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	trace_fscache_wrote_page(cookie, page, &op->op, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	fscache_end_page_write(object, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		fscache_abort_object(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		fscache_op_complete(&op->op, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		fscache_enqueue_operation(&op->op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	_leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) discard_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	fscache_stat(&fscache_n_store_pages_over_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	trace_fscache_wrote_page(cookie, page, &op->op, -ENOBUFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	fscache_end_page_write(object, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) superseded:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	/* this writer is going away and there aren't any more things to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	 * write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	_debug("cease");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	spin_unlock(&cookie->stores_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	spin_unlock(&object->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	fscache_op_complete(&op->op, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	_leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892)  * Clear the pages pending writing for invalidation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) void fscache_invalidate_writes(struct fscache_cookie *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	void *results[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	int n, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	_enter("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		spin_lock(&cookie->stores_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 					       ARRAY_SIZE(results),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 					       FSCACHE_COOKIE_PENDING_TAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		if (n == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 			spin_unlock(&cookie->stores_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		for (i = n - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 			page = results[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 			radix_tree_delete(&cookie->stores, page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 			trace_fscache_page(cookie, page, fscache_page_radix_delete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 			trace_fscache_page(cookie, page, fscache_page_inval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		spin_unlock(&cookie->stores_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		for (i = n - 1; i >= 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 			put_page(results[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	wake_up_bit(&cookie->flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	trace_fscache_wake_cookie(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	_leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932)  * request a page be stored in the cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933)  * - returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934)  *   -ENOMEM	- out of memory, nothing done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935)  *   -ENOBUFS	- no backing object available in which to cache the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936)  *   0		- dispatched a write - it'll call end_io_func() when finished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938)  * if the cookie still has a backing object at this point, that object can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939)  * in one of a few states with respect to storage processing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941)  *  (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942)  *      set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944)  *	(a) no writes yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946)  *	(b) writes deferred till post-creation (mark page for writing and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947)  *	    return immediately)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949)  *  (2) negative lookup, object created, initial fill being made from netfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951)  *	(a) fill point not yet reached this page (mark page for writing and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952)  *          return)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954)  *	(b) fill point passed this page (queue op to store this page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956)  *  (3) object extant (queue op to store this page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958)  * any other state is invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) int __fscache_write_page(struct fscache_cookie *cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 			 struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 			 loff_t object_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 			 gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	struct fscache_storage *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	struct fscache_object *object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	bool wake_cookie = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	_enter("%p,%x,", cookie, (u32) page->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	ASSERT(PageFsCache(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	fscache_stat(&fscache_n_stores);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		_leave(" = -ENOBUFS [invalidating]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	if (!op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	fscache_operation_init(cookie, &op->op, fscache_write_op, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 			       fscache_release_write_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	op->op.flags = FSCACHE_OP_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		(1 << FSCACHE_OP_WAITING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		(1 << FSCACHE_OP_UNUSE_COOKIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		goto nomem_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_write_one);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	ret = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	spin_lock(&cookie->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	if (!fscache_cookie_enabled(cookie) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	    hlist_empty(&cookie->backing_objects))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		goto nobufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	object = hlist_entry(cookie->backing_objects.first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 			     struct fscache_object, cookie_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		goto nobufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	trace_fscache_page(cookie, page, fscache_page_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	/* add the page to the pending-storage radix tree on the backing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	 * object */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	spin_lock(&object->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	if (object->store_limit_l != object_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		fscache_set_store_limit(object, object_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	spin_lock(&cookie->stores_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	_debug("store limit %llx", (unsigned long long) object->store_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	ret = radix_tree_insert(&cookie->stores, page->index, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		if (ret == -EEXIST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 			goto already_queued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		_debug("insert failed %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		goto nobufs_unlock_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	trace_fscache_page(cookie, page, fscache_page_radix_insert);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	radix_tree_tag_set(&cookie->stores, page->index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 			   FSCACHE_COOKIE_PENDING_TAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	trace_fscache_page(cookie, page, fscache_page_radix_set_pend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	/* we only want one writer at a time, but we do need to queue new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	 * writers after exclusive ops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		goto already_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	spin_unlock(&cookie->stores_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	spin_unlock(&object->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	op->op.debug_id	= atomic_inc_return(&fscache_op_debug_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	op->store_limit = object->store_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	__fscache_use_cookie(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	if (fscache_submit_op(object, &op->op) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		goto submit_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	spin_unlock(&cookie->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	radix_tree_preload_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	fscache_stat(&fscache_n_store_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	fscache_stat(&fscache_n_stores_ok);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	/* the work queue now carries its own ref on the object */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	fscache_put_operation(&op->op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	_leave(" = 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) already_queued:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	fscache_stat(&fscache_n_stores_again);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) already_pending:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	spin_unlock(&cookie->stores_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	spin_unlock(&object->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	spin_unlock(&cookie->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	radix_tree_preload_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	fscache_put_operation(&op->op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	fscache_stat(&fscache_n_stores_ok);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	_leave(" = 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) submit_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	spin_lock(&cookie->stores_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	radix_tree_delete(&cookie->stores, page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	trace_fscache_page(cookie, page, fscache_page_radix_delete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	spin_unlock(&cookie->stores_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	wake_cookie = __fscache_unuse_cookie(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	ret = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	goto nobufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) nobufs_unlock_obj:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	spin_unlock(&cookie->stores_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	spin_unlock(&object->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) nobufs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	spin_unlock(&cookie->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	radix_tree_preload_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	fscache_put_operation(&op->op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	if (wake_cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		__fscache_wake_unused_cookie(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	fscache_stat(&fscache_n_stores_nobufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	_leave(" = -ENOBUFS");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) nomem_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	fscache_put_operation(&op->op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) nomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	fscache_stat(&fscache_n_stores_oom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	_leave(" = -ENOMEM");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) EXPORT_SYMBOL(__fscache_write_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)  * remove a page from the cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	struct fscache_object *object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	_enter(",%p", page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	ASSERTCMP(page, !=, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	fscache_stat(&fscache_n_uncaches);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	/* cache withdrawal may beat us to it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	if (!PageFsCache(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	trace_fscache_page(cookie, page, fscache_page_uncache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	/* get the object */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	spin_lock(&cookie->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	if (hlist_empty(&cookie->backing_objects)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 		ClearPageFsCache(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 		goto done_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	object = hlist_entry(cookie->backing_objects.first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 			     struct fscache_object, cookie_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	/* there might now be stuff on disk we could read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	/* only invoke the cache backend if we managed to mark the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	 * uncached here; this deals with synchronisation vs withdrawal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	if (TestClearPageFsCache(page) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	    object->cache->ops->uncache_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		/* the cache backend releases the cookie lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		fscache_stat(&fscache_n_cop_uncache_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		object->cache->ops->uncache_page(object, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		fscache_stat_d(&fscache_n_cop_uncache_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) done_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	spin_unlock(&cookie->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	_leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) EXPORT_SYMBOL(__fscache_uncache_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)  * fscache_mark_page_cached - Mark a page as being cached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)  * @op: The retrieval op pages are being marked for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)  * @page: The page to be marked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)  * Mark a netfs page as being cached.  After this is called, the netfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)  * must call fscache_uncache_page() to remove the mark.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	struct fscache_cookie *cookie = op->op.object->cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) #ifdef CONFIG_FSCACHE_STATS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	atomic_inc(&fscache_n_marks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	trace_fscache_page(cookie, page, fscache_page_cached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	_debug("- mark %p{%lx}", page, page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	if (TestSetPageFsCache(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		static bool once_only;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		if (!once_only) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 			once_only = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 			pr_warn("Cookie type %s marked page %lx multiple times\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 				cookie->def->name, page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	if (cookie->def->mark_page_cached)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		cookie->def->mark_page_cached(cookie->netfs_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 					      op->mapping, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) EXPORT_SYMBOL(fscache_mark_page_cached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)  * fscache_mark_pages_cached - Mark pages as being cached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)  * @op: The retrieval op pages are being marked for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)  * @pagevec: The pages to be marked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)  * Mark a bunch of netfs pages as being cached.  After this is called,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)  * the netfs must call fscache_uncache_page() to remove the mark.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) void fscache_mark_pages_cached(struct fscache_retrieval *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 			       struct pagevec *pagevec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	unsigned long loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	for (loop = 0; loop < pagevec->nr; loop++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		fscache_mark_page_cached(op, pagevec->pages[loop]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	pagevec_reinit(pagevec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) EXPORT_SYMBOL(fscache_mark_pages_cached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)  * Uncache all the pages in an inode that are marked PG_fscache, assuming them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)  * to be associated with the given cookie.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 				       struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	struct address_space *mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	struct pagevec pvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	pgoff_t next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	_enter("%p,%p", cookie, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	if (!mapping || mapping->nrpages == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		_leave(" [no pages]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	pagevec_init(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		if (!pagevec_lookup(&pvec, mapping, &next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		for (i = 0; i < pagevec_count(&pvec); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 			struct page *page = pvec.pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 			if (PageFsCache(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 				__fscache_wait_on_page_write(cookie, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 				__fscache_uncache_page(cookie, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		pagevec_release(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	} while (next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	_leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) EXPORT_SYMBOL(__fscache_uncache_all_inode_pages);