Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * net/sunrpc/cache.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Generic code for various authentication-related caches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * used by sunrpc clients and servers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/kmod.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/string_helpers.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <asm/ioctls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/sunrpc/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/sunrpc/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/sunrpc/stats.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/sunrpc/rpc_pipe_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <trace/events/sunrpc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include "netns.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define	 RPCDBG_FACILITY RPCDBG_CACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) static void cache_revisit_request(struct cache_head *item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) static void cache_init(struct cache_head *h, struct cache_detail *detail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	time64_t now = seconds_since_boot();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	INIT_HLIST_NODE(&h->cache_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	h->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	kref_init(&h->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	h->expiry_time = now + CACHE_NEW_EXPIRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	if (now <= detail->flush_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 		/* ensure it isn't already expired */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 		now = detail->flush_time + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	h->last_refresh = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) static void cache_fresh_unlocked(struct cache_head *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 				struct cache_detail *detail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 						struct cache_head *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 						int hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	struct hlist_head *head = &detail->hash_table[hash];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	struct cache_head *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	hlist_for_each_entry_rcu(tmp, head, cache_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 		if (!detail->match(tmp, key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 		if (test_bit(CACHE_VALID, &tmp->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 		    cache_is_expired(detail, tmp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 		tmp = cache_get_rcu(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 		return tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) static void sunrpc_begin_cache_remove_entry(struct cache_head *ch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 					    struct cache_detail *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	/* Must be called under cd->hash_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	hlist_del_init_rcu(&ch->cache_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	set_bit(CACHE_CLEANED, &ch->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	cd->entries --;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) static void sunrpc_end_cache_remove_entry(struct cache_head *ch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 					  struct cache_detail *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	cache_fresh_unlocked(ch, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	cache_put(ch, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 						 struct cache_head *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 						 int hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	struct cache_head *new, *tmp, *freeme = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	struct hlist_head *head = &detail->hash_table[hash];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	new = detail->alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	/* must fully initialise 'new', else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	 * we might get lose if we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	 * cache_put it soon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	cache_init(new, detail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	detail->init(new, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	spin_lock(&detail->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	/* check if entry appeared while we slept */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	hlist_for_each_entry_rcu(tmp, head, cache_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 				 lockdep_is_held(&detail->hash_lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		if (!detail->match(tmp, key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		if (test_bit(CACHE_VALID, &tmp->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		    cache_is_expired(detail, tmp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 			sunrpc_begin_cache_remove_entry(tmp, detail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 			trace_cache_entry_expired(detail, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 			freeme = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 		cache_get(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 		spin_unlock(&detail->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		cache_put(new, detail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		return tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	hlist_add_head_rcu(&new->cache_list, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	detail->entries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	cache_get(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	spin_unlock(&detail->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	if (freeme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		sunrpc_end_cache_remove_entry(freeme, detail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) struct cache_head *sunrpc_cache_lookup_rcu(struct cache_detail *detail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 					   struct cache_head *key, int hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	struct cache_head *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	ret = sunrpc_cache_find_rcu(detail, key, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	/* Didn't find anything, insert an empty entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	return sunrpc_cache_add_entry(detail, key, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) EXPORT_SYMBOL_GPL(sunrpc_cache_lookup_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) static void cache_fresh_locked(struct cache_head *head, time64_t expiry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 			       struct cache_detail *detail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	time64_t now = seconds_since_boot();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	if (now <= detail->flush_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 		/* ensure it isn't immediately treated as expired */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 		now = detail->flush_time + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	head->expiry_time = expiry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	head->last_refresh = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	set_bit(CACHE_VALID, &head->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) static void cache_fresh_unlocked(struct cache_head *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 				 struct cache_detail *detail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 		cache_revisit_request(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		cache_dequeue(detail, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) static void cache_make_negative(struct cache_detail *detail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 				struct cache_head *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	set_bit(CACHE_NEGATIVE, &h->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	trace_cache_entry_make_negative(detail, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) static void cache_entry_update(struct cache_detail *detail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 			       struct cache_head *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 			       struct cache_head *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	if (!test_bit(CACHE_NEGATIVE, &new->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		detail->update(h, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		trace_cache_entry_update(detail, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 		cache_make_negative(detail, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 				       struct cache_head *new, struct cache_head *old, int hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	/* The 'old' entry is to be replaced by 'new'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	 * If 'old' is not VALID, we update it directly,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	 * otherwise we need to replace it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	struct cache_head *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	if (!test_bit(CACHE_VALID, &old->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		spin_lock(&detail->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		if (!test_bit(CACHE_VALID, &old->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 			cache_entry_update(detail, old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 			cache_fresh_locked(old, new->expiry_time, detail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 			spin_unlock(&detail->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 			cache_fresh_unlocked(old, detail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 			return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		spin_unlock(&detail->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	/* We need to insert a new entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	tmp = detail->alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	if (!tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 		cache_put(old, detail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	cache_init(tmp, detail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	detail->init(tmp, old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	spin_lock(&detail->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	cache_entry_update(detail, tmp, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	detail->entries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	cache_get(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	cache_fresh_locked(tmp, new->expiry_time, detail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	cache_fresh_locked(old, 0, detail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	spin_unlock(&detail->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	cache_fresh_unlocked(tmp, detail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	cache_fresh_unlocked(old, detail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	cache_put(old, detail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	return tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) EXPORT_SYMBOL_GPL(sunrpc_cache_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) static inline int cache_is_valid(struct cache_head *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	if (!test_bit(CACHE_VALID, &h->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		/* entry is valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		if (test_bit(CACHE_NEGATIVE, &h->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 			return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 			 * In combination with write barrier in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 			 * sunrpc_cache_update, ensures that anyone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 			 * using the cache entry after this sees the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 			 * updated contents:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 			smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	spin_lock(&detail->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	rv = cache_is_valid(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	if (rv == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		cache_make_negative(detail, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 				   detail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		rv = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	spin_unlock(&detail->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	cache_fresh_unlocked(h, detail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283)  * This is the generic cache management routine for all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284)  * the authentication caches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285)  * It checks the currency of a cache item and will (later)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286)  * initiate an upcall to fill it if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289)  * Returns 0 if the cache_head can be used, or cache_puts it and returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290)  * -EAGAIN if upcall is pending and request has been queued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291)  * -ETIMEDOUT if upcall failed or request could not be queue or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292)  *           upcall completed but item is still invalid (implying that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293)  *           the cache item has been replaced with a newer one).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294)  * -ENOENT if cache entry was negative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) int cache_check(struct cache_detail *detail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		    struct cache_head *h, struct cache_req *rqstp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	time64_t refresh_age, age;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	/* First decide return status as best we can */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	rv = cache_is_valid(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	/* now see if we want to start an upcall */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	refresh_age = (h->expiry_time - h->last_refresh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	age = seconds_since_boot() - h->last_refresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	if (rqstp == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		if (rv == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 			rv = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	} else if (rv == -EAGAIN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		   (h->expiry_time != 0 && age > refresh_age/2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		dprintk("RPC:       Want update, refage=%lld, age=%lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 				refresh_age, age);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 		switch (detail->cache_upcall(detail, h)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		case -EINVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 			rv = try_to_negate_entry(detail, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		case -EAGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 			cache_fresh_unlocked(h, detail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	if (rv == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		if (!cache_defer_req(rqstp, h)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 			 * Request was not deferred; handle it as best
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 			 * we can ourselves:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 			rv = cache_is_valid(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 			if (rv == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 				rv = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		cache_put(h, detail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) EXPORT_SYMBOL_GPL(cache_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344)  * caches need to be periodically cleaned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345)  * For this we maintain a list of cache_detail and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346)  * a current pointer into that list and into the table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347)  * for that entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349)  * Each time cache_clean is called it finds the next non-empty entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350)  * in the current table and walks the list in that entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351)  * looking for entries that can be removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353)  * An entry gets removed if:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354)  * - The expiry is before current time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355)  * - The last_refresh time is before the flush_time for that cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357)  * later we might drop old entries with non-NEVER expiry if that table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358)  * is getting 'full' for some definition of 'full'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360)  * The question of "how often to scan a table" is an interesting one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361)  * and is answered in part by the use of the "nextcheck" field in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362)  * cache_detail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363)  * When a scan of a table begins, the nextcheck field is set to a time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364)  * that is well into the future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365)  * While scanning, if an expiry time is found that is earlier than the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366)  * current nextcheck time, nextcheck is set to that expiry time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367)  * If the flush_time is ever set to a time earlier than the nextcheck
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368)  * time, the nextcheck time is then set to that flush_time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370)  * A table is then only scanned if the current time is at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371)  * the nextcheck time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) static LIST_HEAD(cache_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) static DEFINE_SPINLOCK(cache_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) static struct cache_detail *current_detail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) static int current_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) static void do_cache_clean(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) static struct delayed_work cache_cleaner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) void sunrpc_init_cache_detail(struct cache_detail *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	spin_lock_init(&cd->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	INIT_LIST_HEAD(&cd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	spin_lock(&cache_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	cd->nextcheck = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	cd->entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	atomic_set(&cd->writers, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	cd->last_close = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	cd->last_warn = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	list_add(&cd->others, &cache_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	spin_unlock(&cache_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	/* start the cleaning process */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	queue_delayed_work(system_power_efficient_wq, &cache_cleaner, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) void sunrpc_destroy_cache_detail(struct cache_detail *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	cache_purge(cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	spin_lock(&cache_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	spin_lock(&cd->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	if (current_detail == cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		current_detail = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	list_del_init(&cd->others);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	spin_unlock(&cd->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	spin_unlock(&cache_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	if (list_empty(&cache_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		/* module must be being unloaded so its safe to kill the worker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		cancel_delayed_work_sync(&cache_cleaner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) /* clean cache tries to find something to clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419)  * and cleans it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420)  * It returns 1 if it cleaned something,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421)  *            0 if it didn't find anything this time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422)  *           -1 if it fell off the end of the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) static int cache_clean(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	struct list_head *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	spin_lock(&cache_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	/* find a suitable table if we don't already have one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	while (current_detail == NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	    current_index >= current_detail->hash_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		if (current_detail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 			next = current_detail->others.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 			next = cache_list.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		if (next == &cache_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 			current_detail = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 			spin_unlock(&cache_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		current_detail = list_entry(next, struct cache_detail, others);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		if (current_detail->nextcheck > seconds_since_boot())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 			current_index = current_detail->hash_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 			current_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 			current_detail->nextcheck = seconds_since_boot()+30*60;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	/* find a non-empty bucket in the table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	while (current_detail &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	       current_index < current_detail->hash_size &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	       hlist_empty(&current_detail->hash_table[current_index]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		current_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	/* find a cleanable entry in the bucket and clean it, or set to next bucket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	if (current_detail && current_index < current_detail->hash_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		struct cache_head *ch = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		struct cache_detail *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		struct hlist_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		struct hlist_node *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		spin_lock(&current_detail->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		/* Ok, now to clean this strand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		head = &current_detail->hash_table[current_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 			if (current_detail->nextcheck > ch->expiry_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 				current_detail->nextcheck = ch->expiry_time+1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 			if (!cache_is_expired(current_detail, ch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 			sunrpc_begin_cache_remove_entry(ch, current_detail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 			trace_cache_entry_expired(current_detail, ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 			rv = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		spin_unlock(&current_detail->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		d = current_detail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		if (!ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 			current_index ++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		spin_unlock(&cache_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		if (ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 			sunrpc_end_cache_remove_entry(ch, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		spin_unlock(&cache_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497)  * We want to regularly clean the cache, so we need to schedule some work ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) static void do_cache_clean(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	int delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	if (list_empty(&cache_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	if (cache_clean() == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		delay = round_jiffies_relative(30*HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		delay = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	queue_delayed_work(system_power_efficient_wq, &cache_cleaner, delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516)  * Clean all caches promptly.  This just calls cache_clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517)  * repeatedly until we are sure that every cache has had a chance to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518)  * be fully cleaned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) void cache_flush(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	while (cache_clean() != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	while (cache_clean() != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) EXPORT_SYMBOL_GPL(cache_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) void cache_purge(struct cache_detail *detail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	struct cache_head *ch = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	struct hlist_head *head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	spin_lock(&detail->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	if (!detail->entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		spin_unlock(&detail->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	for (i = 0; i < detail->hash_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		head = &detail->hash_table[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		while (!hlist_empty(head)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 			ch = hlist_entry(head->first, struct cache_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 					 cache_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 			sunrpc_begin_cache_remove_entry(ch, detail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 			spin_unlock(&detail->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 			sunrpc_end_cache_remove_entry(ch, detail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 			spin_lock(&detail->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	spin_unlock(&detail->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) EXPORT_SYMBOL_GPL(cache_purge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559)  * Deferral and Revisiting of Requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561)  * If a cache lookup finds a pending entry, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562)  * need to defer the request and revisit it later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563)  * All deferred requests are stored in a hash table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564)  * indexed by "struct cache_head *".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565)  * As it may be wasteful to store a whole request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566)  * structure, we allow the request to provide a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567)  * deferred form, which must contain a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568)  * 'struct cache_deferred_req'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569)  * This cache_deferred_req contains a method to allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570)  * it to be revisited when cache info is available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) #define	DFR_HASHSIZE	(PAGE_SIZE/sizeof(struct list_head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) #define	DFR_HASH(item)	((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) #define	DFR_MAX	300	/* ??? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) static DEFINE_SPINLOCK(cache_defer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) static LIST_HEAD(cache_defer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) static int cache_defer_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) static void __unhash_deferred_req(struct cache_deferred_req *dreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	hlist_del_init(&dreq->hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	if (!list_empty(&dreq->recent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		list_del_init(&dreq->recent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		cache_defer_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	int hash = DFR_HASH(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	INIT_LIST_HEAD(&dreq->recent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) static void setup_deferral(struct cache_deferred_req *dreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 			   struct cache_head *item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 			   int count_me)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	dreq->item = item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	spin_lock(&cache_defer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	__hash_deferred_req(dreq, item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	if (count_me) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		cache_defer_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		list_add(&dreq->recent, &cache_defer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	spin_unlock(&cache_defer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) struct thread_deferred_req {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	struct cache_deferred_req handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	struct completion completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	struct thread_deferred_req *dr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		container_of(dreq, struct thread_deferred_req, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	complete(&dr->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) static void cache_wait_req(struct cache_req *req, struct cache_head *item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	struct thread_deferred_req sleeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	struct cache_deferred_req *dreq = &sleeper.handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	dreq->revisit = cache_restart_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	setup_deferral(dreq, item, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	if (!test_bit(CACHE_PENDING, &item->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	    wait_for_completion_interruptible_timeout(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		    &sleeper.completion, req->thread_wait) <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		/* The completion wasn't completed, so we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		 * to clean up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		spin_lock(&cache_defer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		if (!hlist_unhashed(&sleeper.handle.hash)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 			__unhash_deferred_req(&sleeper.handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 			spin_unlock(&cache_defer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 			/* cache_revisit_request already removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 			 * this from the hash table, but hasn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 			 * called ->revisit yet.  It will very soon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 			 * and we need to wait for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 			spin_unlock(&cache_defer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 			wait_for_completion(&sleeper.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) static void cache_limit_defers(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	/* Make sure we haven't exceed the limit of allowed deferred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	 * requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	struct cache_deferred_req *discard = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	if (cache_defer_cnt <= DFR_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	spin_lock(&cache_defer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	/* Consider removing either the first or the last */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	if (cache_defer_cnt > DFR_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		if (prandom_u32() & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 			discard = list_entry(cache_defer_list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 					     struct cache_deferred_req, recent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 			discard = list_entry(cache_defer_list.prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 					     struct cache_deferred_req, recent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		__unhash_deferred_req(discard);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	spin_unlock(&cache_defer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	if (discard)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		discard->revisit(discard, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) /* Return true if and only if a deferred request is queued. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	struct cache_deferred_req *dreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	if (req->thread_wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		cache_wait_req(req, item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		if (!test_bit(CACHE_PENDING, &item->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	dreq = req->defer(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	if (dreq == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	setup_deferral(dreq, item, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	if (!test_bit(CACHE_PENDING, &item->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		/* Bit could have been cleared before we managed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		 * set up the deferral, so need to revisit just in case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		cache_revisit_request(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	cache_limit_defers();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) static void cache_revisit_request(struct cache_head *item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	struct cache_deferred_req *dreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	struct list_head pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	struct hlist_node *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	int hash = DFR_HASH(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	INIT_LIST_HEAD(&pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	spin_lock(&cache_defer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		if (dreq->item == item) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 			__unhash_deferred_req(dreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 			list_add(&dreq->recent, &pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	spin_unlock(&cache_defer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	while (!list_empty(&pending)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		dreq = list_entry(pending.next, struct cache_deferred_req, recent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		list_del_init(&dreq->recent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		dreq->revisit(dreq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) void cache_clean_deferred(void *owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	struct cache_deferred_req *dreq, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	struct list_head pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	INIT_LIST_HEAD(&pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	spin_lock(&cache_defer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		if (dreq->owner == owner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 			__unhash_deferred_req(dreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 			list_add(&dreq->recent, &pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	spin_unlock(&cache_defer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	while (!list_empty(&pending)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		dreq = list_entry(pending.next, struct cache_deferred_req, recent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		list_del_init(&dreq->recent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		dreq->revisit(dreq, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765)  * communicate with user-space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767)  * We have a magic /proc file - /proc/net/rpc/<cachename>/channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768)  * On read, you get a full request, or block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769)  * On write, an update request is processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770)  * Poll works if anything to read, and always allows write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772)  * Implemented by linked list of requests.  Each open file has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773)  * a ->private that also exists in this list.  New requests are added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774)  * to the end and may wakeup and preceding readers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775)  * New readers are added to the head.  If, on read, an item is found with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776)  * CACHE_UPCALLING clear, we free it from the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) static DEFINE_SPINLOCK(queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) static DEFINE_MUTEX(queue_io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) struct cache_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	struct list_head	list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	int			reader;	/* if 0, then request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) struct cache_request {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	struct cache_queue	q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	struct cache_head	*item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	char			* buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	int			len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	int			readers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) struct cache_reader {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	struct cache_queue	q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	int			offset;	/* if non-0, we have a refcnt on next request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) static int cache_request(struct cache_detail *detail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 			       struct cache_request *crq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	char *bp = crq->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	int len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	detail->cache_request(detail, crq->item, &bp, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	if (len < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	return PAGE_SIZE - len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 			  loff_t *ppos, struct cache_detail *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	struct cache_reader *rp = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	struct cache_request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	if (count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	inode_lock(inode); /* protect against multiple concurrent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 			      * readers on this file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824)  again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	spin_lock(&queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	/* need to find next request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	while (rp->q.list.next != &cd->queue &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	       list_entry(rp->q.list.next, struct cache_queue, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	       ->reader) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		struct list_head *next = rp->q.list.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		list_move(&rp->q.list, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	if (rp->q.list.next == &cd->queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		spin_unlock(&queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		WARN_ON_ONCE(rp->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	rq = container_of(rp->q.list.next, struct cache_request, q.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	WARN_ON_ONCE(rq->q.reader);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	if (rp->offset == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		rq->readers++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	spin_unlock(&queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	if (rq->len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		err = cache_request(cd, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		rq->len = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		spin_lock(&queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		list_move(&rp->q.list, &rq->q.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		spin_unlock(&queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		if (rp->offset + count > rq->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 			count = rq->len - rp->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		if (copy_to_user(buf, rq->buf + rp->offset, count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		rp->offset += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		if (rp->offset >= rq->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 			rp->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 			spin_lock(&queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 			list_move(&rp->q.list, &rq->q.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 			spin_unlock(&queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	if (rp->offset == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		/* need to release rq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		spin_lock(&queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		rq->readers--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		if (rq->readers == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		    !test_bit(CACHE_PENDING, &rq->item->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 			list_del(&rq->q.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 			spin_unlock(&queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 			cache_put(rq->item, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 			kfree(rq->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 			kfree(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			spin_unlock(&queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	if (err == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	return err ? err :  count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 				 size_t count, struct cache_detail *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	if (count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	if (copy_from_user(kaddr, buf, count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	kaddr[count] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	ret = cd->cache_parse(cd, kaddr, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		ret = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) static ssize_t cache_slow_downcall(const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 				   size_t count, struct cache_detail *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	static char write_buf[32768]; /* protected by queue_io_mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	ssize_t ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	if (count >= sizeof(write_buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	mutex_lock(&queue_io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	ret = cache_do_downcall(write_buf, buf, count, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	mutex_unlock(&queue_io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) static ssize_t cache_downcall(struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 			      const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 			      size_t count, struct cache_detail *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	char *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	ssize_t ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	if (count >= PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		goto out_slow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	page = find_or_create_page(mapping, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		goto out_slow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	kaddr = kmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	ret = cache_do_downcall(kaddr, buf, count, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) out_slow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	return cache_slow_downcall(buf, count, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) static ssize_t cache_write(struct file *filp, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 			   size_t count, loff_t *ppos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 			   struct cache_detail *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	struct address_space *mapping = filp->f_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	ssize_t ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	if (!cd->cache_parse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	ret = cache_downcall(mapping, buf, count, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) static __poll_t cache_poll(struct file *filp, poll_table *wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 			       struct cache_detail *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	__poll_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	struct cache_reader *rp = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	struct cache_queue *cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	poll_wait(filp, &queue_wait, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	/* alway allow write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	mask = EPOLLOUT | EPOLLWRNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	if (!rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	spin_lock(&queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	for (cq= &rp->q; &cq->list != &cd->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	     cq = list_entry(cq->list.next, struct cache_queue, list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		if (!cq->reader) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 			mask |= EPOLLIN | EPOLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	spin_unlock(&queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) static int cache_ioctl(struct inode *ino, struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		       unsigned int cmd, unsigned long arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		       struct cache_detail *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	int len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	struct cache_reader *rp = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	struct cache_queue *cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	if (cmd != FIONREAD || !rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	spin_lock(&queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	/* only find the length remaining in current request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	 * or the length of the next request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	for (cq= &rp->q; &cq->list != &cd->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	     cq = list_entry(cq->list.next, struct cache_queue, list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		if (!cq->reader) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 			struct cache_request *cr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 				container_of(cq, struct cache_request, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 			len = cr->len - rp->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	spin_unlock(&queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	return put_user(len, (int __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) static int cache_open(struct inode *inode, struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		      struct cache_detail *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	struct cache_reader *rp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	if (!cd || !try_module_get(cd->owner))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	nonseekable_open(inode, filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	if (filp->f_mode & FMODE_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		rp = kmalloc(sizeof(*rp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		if (!rp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 			module_put(cd->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		rp->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		rp->q.reader = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		spin_lock(&queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		list_add(&rp->q.list, &cd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		spin_unlock(&queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	if (filp->f_mode & FMODE_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		atomic_inc(&cd->writers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	filp->private_data = rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) static int cache_release(struct inode *inode, struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 			 struct cache_detail *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	struct cache_reader *rp = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	if (rp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		spin_lock(&queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		if (rp->offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 			struct cache_queue *cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 			for (cq= &rp->q; &cq->list != &cd->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 			     cq = list_entry(cq->list.next, struct cache_queue, list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 				if (!cq->reader) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 					container_of(cq, struct cache_request, q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 						->readers--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 			rp->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		list_del(&rp->q.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		spin_unlock(&queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		filp->private_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		kfree(rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	if (filp->f_mode & FMODE_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		atomic_dec(&cd->writers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		cd->last_close = seconds_since_boot();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	module_put(cd->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	struct cache_queue *cq, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	struct cache_request *cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	struct list_head dequeued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	INIT_LIST_HEAD(&dequeued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	spin_lock(&queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	list_for_each_entry_safe(cq, tmp, &detail->queue, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		if (!cq->reader) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 			cr = container_of(cq, struct cache_request, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 			if (cr->item != ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 			if (test_bit(CACHE_PENDING, &ch->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 				/* Lost a race and it is pending again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 			if (cr->readers != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 			list_move(&cr->q.list, &dequeued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	spin_unlock(&queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	while (!list_empty(&dequeued)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		cr = list_entry(dequeued.next, struct cache_request, q.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		list_del(&cr->q.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		cache_put(cr->item, detail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		kfree(cr->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		kfree(cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)  * Support routines for text-based upcalls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)  * Fields are separated by spaces.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)  * Fields are either mangled to quote space tab newline slosh with slosh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)  * or a hexified with a leading \x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)  * Record is terminated with newline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) void qword_add(char **bpp, int *lp, char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	char *bp = *bpp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	int len = *lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	if (len < 0) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	if (ret >= len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		bp += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		len = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 		bp += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		len -= ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		*bp++ = ' ';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	*bpp = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	*lp = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) EXPORT_SYMBOL_GPL(qword_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) void qword_addhex(char **bpp, int *lp, char *buf, int blen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	char *bp = *bpp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	int len = *lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	if (len < 0) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	if (len > 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		*bp++ = '\\';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		*bp++ = 'x';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		len -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		while (blen && len >= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 			bp = hex_byte_pack(bp, *buf++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 			len -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 			blen--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	if (blen || len<1) len = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		*bp++ = ' ';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 		len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	*bpp = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	*lp = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) EXPORT_SYMBOL_GPL(qword_addhex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) static void warn_no_listener(struct cache_detail *detail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	if (detail->last_warn != detail->last_close) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 		detail->last_warn = detail->last_close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		if (detail->warn_no_listener)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 			detail->warn_no_listener(detail, detail->last_close != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) static bool cache_listeners_exist(struct cache_detail *detail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	if (atomic_read(&detail->writers))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	if (detail->last_close == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		/* This cache was never opened */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	if (detail->last_close < seconds_since_boot() - 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		 * We allow for the possibility that someone might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		 * restart a userspace daemon without restarting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		 * server; but after 30 seconds, we give up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		 return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)  * register an upcall request to user-space and queue it up for read() by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)  * upcall daemon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)  * Each request is at most one page long.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) static int cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	struct cache_request *crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	if (test_bit(CACHE_CLEANED, &h->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		/* Too late to make an upcall */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	crq = kmalloc(sizeof (*crq), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	if (!crq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	crq->q.reader = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	crq->buf = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	crq->len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	crq->readers = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	spin_lock(&queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	if (test_bit(CACHE_PENDING, &h->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		crq->item = cache_get(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		list_add_tail(&crq->q.list, &detail->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		trace_cache_entry_upcall(detail, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		/* Lost a race, no longer PENDING, so don't enqueue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	spin_unlock(&queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	wake_up(&queue_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	if (ret == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		kfree(crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	if (test_and_set_bit(CACHE_PENDING, &h->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	return cache_pipe_upcall(detail, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) int sunrpc_cache_pipe_upcall_timeout(struct cache_detail *detail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 				     struct cache_head *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	if (!cache_listeners_exist(detail)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 		warn_no_listener(detail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		trace_cache_entry_no_listener(detail, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	return sunrpc_cache_pipe_upcall(detail, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)  * parse a message from user-space and pass it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)  * to an appropriate cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)  * Messages are, like requests, separated into fields by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)  * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)  * Message is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)  *   reply cachename expiry key ... content....
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)  * key and content are both parsed by cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) int qword_get(char **bpp, char *dest, int bufsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	/* return bytes copied, or -1 on error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	char *bp = *bpp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	int len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	while (*bp == ' ') bp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	if (bp[0] == '\\' && bp[1] == 'x') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 		/* HEX STRING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		bp += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		while (len < bufsize - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 			int h, l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 			h = hex_to_bin(bp[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 			if (h < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 			l = hex_to_bin(bp[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 			if (l < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 			*dest++ = (h << 4) | l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 			bp += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 			len++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		/* text with \nnn octal quoting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 			if (*bp == '\\' &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 			    isodigit(bp[1]) && (bp[1] <= '3') &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 			    isodigit(bp[2]) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 			    isodigit(bp[3])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 				int byte = (*++bp -'0');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 				bp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 				byte = (byte << 3) | (*bp++ - '0');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 				byte = (byte << 3) | (*bp++ - '0');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 				*dest++ = byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 				len++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 				*dest++ = *bp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 				len++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	if (*bp != ' ' && *bp != '\n' && *bp != '\0')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	while (*bp == ' ') bp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	*bpp = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	*dest = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) EXPORT_SYMBOL_GPL(qword_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)  * support /proc/net/rpc/$CACHENAME/content
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)  * as a seqfile.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)  * We call ->cache_show passing NULL for the item to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)  * get a header, then pass each real item in the cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) static void *__cache_seq_start(struct seq_file *m, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	loff_t n = *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	unsigned int hash, entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	struct cache_head *ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	struct cache_detail *cd = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	if (!n--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		return SEQ_START_TOKEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	hash = n >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	entry = n & ((1LL<<32) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	hlist_for_each_entry_rcu(ch, &cd->hash_table[hash], cache_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		if (!entry--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 			return ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	n &= ~((1LL<<32) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		hash++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 		n += 1LL<<32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	} while(hash < cd->hash_size &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		hlist_empty(&cd->hash_table[hash]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	if (hash >= cd->hash_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	*pos = n+1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	return hlist_entry_safe(rcu_dereference_raw(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 				hlist_first_rcu(&cd->hash_table[hash])),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 				struct cache_head, cache_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) static void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	struct cache_head *ch = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	int hash = (*pos >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	struct cache_detail *cd = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	if (p == SEQ_START_TOKEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 		hash = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	else if (ch->cache_list.next == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 		hash++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		*pos += 1LL<<32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		++*pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 		return hlist_entry_safe(rcu_dereference_raw(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 					hlist_next_rcu(&ch->cache_list)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 					struct cache_head, cache_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	*pos &= ~((1LL<<32) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	while (hash < cd->hash_size &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	       hlist_empty(&cd->hash_table[hash])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		hash++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 		*pos += 1LL<<32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	if (hash >= cd->hash_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	++*pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	return hlist_entry_safe(rcu_dereference_raw(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 				hlist_first_rcu(&cd->hash_table[hash])),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 				struct cache_head, cache_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) void *cache_seq_start_rcu(struct seq_file *m, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	__acquires(RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	return __cache_seq_start(m, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) EXPORT_SYMBOL_GPL(cache_seq_start_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	return cache_seq_next(file, p, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) EXPORT_SYMBOL_GPL(cache_seq_next_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) void cache_seq_stop_rcu(struct seq_file *m, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	__releases(RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) EXPORT_SYMBOL_GPL(cache_seq_stop_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) static int c_show(struct seq_file *m, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	struct cache_head *cp = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	struct cache_detail *cd = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	if (p == SEQ_START_TOKEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 		return cd->cache_show(m, cd, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	ifdebug(CACHE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 		seq_printf(m, "# expiry=%lld refcnt=%d flags=%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 			   convert_to_wallclock(cp->expiry_time),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 			   kref_read(&cp->ref), cp->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	cache_get(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	if (cache_check(cd, cp, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		/* cache_check does a cache_put on failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 		seq_puts(m, "# ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 		if (cache_is_expired(cd, cp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 			seq_puts(m, "# ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 		cache_put(cp, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	return cd->cache_show(m, cd, cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) static const struct seq_operations cache_content_op = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	.start	= cache_seq_start_rcu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	.next	= cache_seq_next_rcu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	.stop	= cache_seq_stop_rcu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	.show	= c_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) static int content_open(struct inode *inode, struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 			struct cache_detail *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	struct seq_file *seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	if (!cd || !try_module_get(cd->owner))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	err = seq_open(file, &cache_content_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 		module_put(cd->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	seq = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	seq->private = cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) static int content_release(struct inode *inode, struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		struct cache_detail *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	int ret = seq_release(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	module_put(cd->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) static int open_flush(struct inode *inode, struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 			struct cache_detail *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	if (!cd || !try_module_get(cd->owner))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 		return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	return nonseekable_open(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) static int release_flush(struct inode *inode, struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 			struct cache_detail *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	module_put(cd->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) static ssize_t read_flush(struct file *file, char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 			  size_t count, loff_t *ppos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 			  struct cache_detail *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	char tbuf[22];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	len = snprintf(tbuf, sizeof(tbuf), "%llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 			convert_to_wallclock(cd->flush_time));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	return simple_read_from_buffer(buf, count, ppos, tbuf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) static ssize_t write_flush(struct file *file, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 			   size_t count, loff_t *ppos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 			   struct cache_detail *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	char tbuf[20];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	char *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	time64_t now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	if (*ppos || count > sizeof(tbuf)-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	if (copy_from_user(tbuf, buf, count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	tbuf[count] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	simple_strtoul(tbuf, &ep, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	if (*ep && *ep != '\n')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	/* Note that while we check that 'buf' holds a valid number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	 * we always ignore the value and just flush everything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	 * Making use of the number leads to races.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	now = seconds_since_boot();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	/* Always flush everything, so behave like cache_purge()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	 * Do this by advancing flush_time to the current time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	 * or by one second if it has already reached the current time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	 * Newly added cache entries will always have ->last_refresh greater
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	 * that ->flush_time, so they don't get flushed prematurely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	if (cd->flush_time >= now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 		now = cd->flush_time + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	cd->flush_time = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	cd->nextcheck = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	cache_flush();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	if (cd->flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 		cd->flush();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	*ppos += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 				 size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	struct cache_detail *cd = PDE_DATA(file_inode(filp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	return cache_read(filp, buf, count, ppos, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 				  size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	struct cache_detail *cd = PDE_DATA(file_inode(filp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	return cache_write(filp, buf, count, ppos, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) static __poll_t cache_poll_procfs(struct file *filp, poll_table *wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	struct cache_detail *cd = PDE_DATA(file_inode(filp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	return cache_poll(filp, wait, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) static long cache_ioctl_procfs(struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 			       unsigned int cmd, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	struct cache_detail *cd = PDE_DATA(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	return cache_ioctl(inode, filp, cmd, arg, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) static int cache_open_procfs(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	struct cache_detail *cd = PDE_DATA(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	return cache_open(inode, filp, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) static int cache_release_procfs(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	struct cache_detail *cd = PDE_DATA(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	return cache_release(inode, filp, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) static const struct proc_ops cache_channel_proc_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	.proc_lseek	= no_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	.proc_read	= cache_read_procfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	.proc_write	= cache_write_procfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	.proc_poll	= cache_poll_procfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	.proc_ioctl	= cache_ioctl_procfs, /* for FIONREAD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	.proc_open	= cache_open_procfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	.proc_release	= cache_release_procfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) static int content_open_procfs(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	struct cache_detail *cd = PDE_DATA(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	return content_open(inode, filp, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) static int content_release_procfs(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	struct cache_detail *cd = PDE_DATA(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	return content_release(inode, filp, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) static const struct proc_ops content_proc_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	.proc_open	= content_open_procfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	.proc_read	= seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	.proc_lseek	= seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	.proc_release	= content_release_procfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) static int open_flush_procfs(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	struct cache_detail *cd = PDE_DATA(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	return open_flush(inode, filp, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) static int release_flush_procfs(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	struct cache_detail *cd = PDE_DATA(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	return release_flush(inode, filp, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 			    size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	struct cache_detail *cd = PDE_DATA(file_inode(filp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	return read_flush(filp, buf, count, ppos, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) static ssize_t write_flush_procfs(struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 				  const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 				  size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	struct cache_detail *cd = PDE_DATA(file_inode(filp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	return write_flush(filp, buf, count, ppos, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) static const struct proc_ops cache_flush_proc_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	.proc_open	= open_flush_procfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	.proc_read	= read_flush_procfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	.proc_write	= write_flush_procfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	.proc_release	= release_flush_procfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	.proc_lseek	= no_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) static void remove_cache_proc_entries(struct cache_detail *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	if (cd->procfs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 		proc_remove(cd->procfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 		cd->procfs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	struct proc_dir_entry *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	struct sunrpc_net *sn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	sn = net_generic(net, sunrpc_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	if (cd->procfs == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 		goto out_nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	p = proc_create_data("flush", S_IFREG | 0600,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 			     cd->procfs, &cache_flush_proc_ops, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	if (p == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 		goto out_nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	if (cd->cache_request || cd->cache_parse) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 		p = proc_create_data("channel", S_IFREG | 0600, cd->procfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 				     &cache_channel_proc_ops, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 		if (p == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 			goto out_nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	if (cd->cache_show) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 		p = proc_create_data("content", S_IFREG | 0400, cd->procfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 				     &content_proc_ops, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 		if (p == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 			goto out_nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) out_nomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	remove_cache_proc_entries(cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) #else /* CONFIG_PROC_FS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) void __init cache_initialize(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) int cache_register_net(struct cache_detail *cd, struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	sunrpc_init_cache_detail(cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	ret = create_cache_proc_entries(cd, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 		sunrpc_destroy_cache_detail(cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) EXPORT_SYMBOL_GPL(cache_register_net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) void cache_unregister_net(struct cache_detail *cd, struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	remove_cache_proc_entries(cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	sunrpc_destroy_cache_detail(cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) EXPORT_SYMBOL_GPL(cache_unregister_net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	struct cache_detail *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	if (cd == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	cd->hash_table = kcalloc(cd->hash_size, sizeof(struct hlist_head),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 				 GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	if (cd->hash_table == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 		kfree(cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	for (i = 0; i < cd->hash_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 		INIT_HLIST_HEAD(&cd->hash_table[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	cd->net = net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	return cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) EXPORT_SYMBOL_GPL(cache_create_net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) void cache_destroy_net(struct cache_detail *cd, struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	kfree(cd->hash_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	kfree(cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) EXPORT_SYMBOL_GPL(cache_destroy_net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 				 size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	return cache_read(filp, buf, count, ppos, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 				  size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	return cache_write(filp, buf, count, ppos, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) static __poll_t cache_poll_pipefs(struct file *filp, poll_table *wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	return cache_poll(filp, wait, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) static long cache_ioctl_pipefs(struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 			      unsigned int cmd, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	struct cache_detail *cd = RPC_I(inode)->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	return cache_ioctl(inode, filp, cmd, arg, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) static int cache_open_pipefs(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	struct cache_detail *cd = RPC_I(inode)->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	return cache_open(inode, filp, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) static int cache_release_pipefs(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	struct cache_detail *cd = RPC_I(inode)->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	return cache_release(inode, filp, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) const struct file_operations cache_file_operations_pipefs = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	.llseek		= no_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	.read		= cache_read_pipefs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	.write		= cache_write_pipefs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	.poll		= cache_poll_pipefs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	.unlocked_ioctl	= cache_ioctl_pipefs, /* for FIONREAD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	.open		= cache_open_pipefs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	.release	= cache_release_pipefs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) static int content_open_pipefs(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	struct cache_detail *cd = RPC_I(inode)->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	return content_open(inode, filp, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) static int content_release_pipefs(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	struct cache_detail *cd = RPC_I(inode)->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	return content_release(inode, filp, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) const struct file_operations content_file_operations_pipefs = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	.open		= content_open_pipefs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	.read		= seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 	.llseek		= seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	.release	= content_release_pipefs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) static int open_flush_pipefs(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	struct cache_detail *cd = RPC_I(inode)->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	return open_flush(inode, filp, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) static int release_flush_pipefs(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	struct cache_detail *cd = RPC_I(inode)->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	return release_flush(inode, filp, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 			    size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	return read_flush(filp, buf, count, ppos, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) static ssize_t write_flush_pipefs(struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 				  const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 				  size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	return write_flush(filp, buf, count, ppos, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) const struct file_operations cache_flush_operations_pipefs = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	.open		= open_flush_pipefs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	.read		= read_flush_pipefs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	.write		= write_flush_pipefs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	.release	= release_flush_pipefs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	.llseek		= no_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) int sunrpc_cache_register_pipefs(struct dentry *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 				 const char *name, umode_t umode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 				 struct cache_detail *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	if (IS_ERR(dir))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 		return PTR_ERR(dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	cd->pipefs = dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	if (cd->pipefs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 		rpc_remove_cache_dir(cd->pipefs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 		cd->pipefs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	spin_lock(&cd->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	if (!hlist_unhashed(&h->cache_list)){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 		sunrpc_begin_cache_remove_entry(h, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 		spin_unlock(&cd->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 		sunrpc_end_cache_remove_entry(h, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		spin_unlock(&cd->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) EXPORT_SYMBOL_GPL(sunrpc_cache_unhash);