Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * linux/net/sunrpc/sched.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Scheduling for synchronous and asynchronous RPC requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * TCP NFS related read + write fixes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/mempool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/freezer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/sunrpc/clnt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/sunrpc/metrics.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include "sunrpc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <trace/events/sunrpc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  * RPC slabs and memory pools
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #define RPC_BUFFER_MAXSIZE	(2048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #define RPC_BUFFER_POOLSIZE	(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define RPC_TASK_POOLSIZE	(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) static struct kmem_cache	*rpc_task_slabp __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) static struct kmem_cache	*rpc_buffer_slabp __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) static mempool_t	*rpc_task_mempool __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) static mempool_t	*rpc_buffer_mempool __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) static void			rpc_async_schedule(struct work_struct *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) static void			 rpc_release_task(struct rpc_task *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) static void __rpc_queue_timer_fn(struct work_struct *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  * RPC tasks sit here while waiting for conditions to improve.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) static struct rpc_wait_queue delay_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54)  * rpciod-related stuff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) struct workqueue_struct *rpciod_workqueue __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) struct workqueue_struct *xprtiod_workqueue __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) EXPORT_SYMBOL_GPL(xprtiod_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) rpc_task_timeout(const struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	unsigned long timeout = READ_ONCE(task->tk_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	if (timeout != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 		unsigned long now = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 		if (time_before(now, timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 			return timeout - now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) EXPORT_SYMBOL_GPL(rpc_task_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75)  * Disable the timer for a given RPC task. Should be called with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76)  * queue->lock and bh_disabled in order to avoid races within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77)  * rpc_run_timer().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	if (list_empty(&task->u.tk_wait.timer_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	task->tk_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	list_del(&task->u.tk_wait.timer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	if (list_empty(&queue->timer_list.list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 		cancel_delayed_work(&queue->timer_list.dwork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	unsigned long now = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	queue->timer_list.expires = expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	if (time_before_eq(expires, now))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 		expires = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 		expires -= now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103)  * Set up a timer for the current task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 		unsigned long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	task->tk_timeout = timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		rpc_set_queue_timer(queue, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	if (queue->priority != priority) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		queue->priority = priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		queue->nr = 1U << priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	rpc_set_waitqueue_priority(queue, queue->maxpriority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129)  * Add a request to a queue list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) __rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	struct rpc_task *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	list_for_each_entry(t, q, u.tk_wait.list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		if (t->tk_owner == task->tk_owner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 			list_add_tail(&task->u.tk_wait.links,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 					&t->u.tk_wait.links);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 			/* Cache the queue head in task->u.tk_wait.list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 			task->u.tk_wait.list.next = q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 			task->u.tk_wait.list.prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	INIT_LIST_HEAD(&task->u.tk_wait.links);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	list_add_tail(&task->u.tk_wait.list, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151)  * Remove request from a queue list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) __rpc_list_dequeue_task(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	struct list_head *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	struct rpc_task *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	if (task->u.tk_wait.list.prev == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 		list_del(&task->u.tk_wait.links);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	if (!list_empty(&task->u.tk_wait.links)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 		t = list_first_entry(&task->u.tk_wait.links,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 				struct rpc_task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 				u.tk_wait.links);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 		/* Assume __rpc_list_enqueue_task() cached the queue head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		q = t->u.tk_wait.list.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 		list_add_tail(&t->u.tk_wait.list, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 		list_del(&task->u.tk_wait.links);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	list_del(&task->u.tk_wait.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176)  * Add new request to a priority queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		unsigned char queue_priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	if (unlikely(queue_priority > queue->maxpriority))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		queue_priority = queue->maxpriority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	__rpc_list_enqueue_task(&queue->tasks[queue_priority], task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188)  * Add new request to wait queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190)  * Swapper tasks always get inserted at the head of the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191)  * This should avoid many nasty memory deadlocks and hopefully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192)  * improve overall performance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193)  * Everyone else gets appended to the queue to ensure proper FIFO behavior.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 		struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		unsigned char queue_priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	INIT_LIST_HEAD(&task->u.tk_wait.timer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	if (RPC_IS_PRIORITY(queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		__rpc_add_wait_queue_priority(queue, task, queue_priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	else if (RPC_IS_SWAPPER(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		list_add(&task->u.tk_wait.list, &queue->tasks[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	task->tk_waitqueue = queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	queue->qlen++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	/* barrier matches the read in rpc_wake_up_task_queue_locked() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	rpc_set_queued(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214)  * Remove request from a priority queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	__rpc_list_dequeue_task(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222)  * Remove request from queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223)  * Note: must be called with spin lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	__rpc_disable_timer(queue, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	if (RPC_IS_PRIORITY(queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		__rpc_remove_wait_queue_priority(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		list_del(&task->u.tk_wait.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	queue->qlen--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	spin_lock_init(&queue->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		INIT_LIST_HEAD(&queue->tasks[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	queue->maxpriority = nr_queues - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	rpc_reset_waitqueue_priority(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	queue->qlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	queue->timer_list.expires = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	INIT_DELAYED_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	INIT_LIST_HEAD(&queue->timer_list.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	rpc_assign_waitqueue_name(queue, qname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	__rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	__rpc_init_priority_wait_queue(queue, qname, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	cancel_delayed_work_sync(&queue->timer_list.dwork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	freezable_schedule_unsafe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	if (signal_pending_state(mode, current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) static void rpc_task_set_debuginfo(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	static atomic_t rpc_pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	task->tk_pid = atomic_inc_return(&rpc_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) static inline void rpc_task_set_debuginfo(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) static void rpc_set_active(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	rpc_task_set_debuginfo(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	trace_rpc_task_begin(task, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298)  * Mark an RPC call as having completed by clearing the 'active' bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299)  * and then waking up all tasks that were sleeping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) static int rpc_complete_task(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	void *m = &task->tk_runstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	trace_rpc_task_complete(task, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	spin_lock_irqsave(&wq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	ret = atomic_dec_and_test(&task->tk_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	if (waitqueue_active(wq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 		__wake_up_locked_key(wq, TASK_NORMAL, &k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	spin_unlock_irqrestore(&wq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321)  * Allow callers to wait for completion of an RPC call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323)  * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324)  * to enforce taking of the wq->lock and hence avoid races with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325)  * rpc_complete_task().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	if (action == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		action = rpc_wait_bit_killable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 			action, TASK_KILLABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337)  * Make an RPC task runnable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339)  * Note: If the task is ASYNC, and is being made runnable after sitting on an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340)  * rpc_wait_queue, this must be called with the queue spinlock held to protect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341)  * the wait queue operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342)  * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343)  * which is needed to ensure that __rpc_execute() doesn't loop (due to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344)  * lockless RPC_IS_QUEUED() test) before we've had a chance to test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345)  * the RPC_TASK_RUNNING flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) static void rpc_make_runnable(struct workqueue_struct *wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	bool need_wakeup = !rpc_test_and_set_running(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	rpc_clear_queued(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	if (!need_wakeup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	if (RPC_IS_ASYNC(task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		INIT_WORK(&task->u.tk_work, rpc_async_schedule);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		queue_work(wq, &task->u.tk_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363)  * Prepare for sleeping on a wait queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364)  * By always appending tasks to the list we ensure FIFO behavior.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365)  * NB: An RPC task will only receive interrupt-driven events as long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366)  * as it's on a wait queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		unsigned char queue_priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	trace_rpc_task_sleep(task, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	__rpc_add_wait_queue(q, task, queue_priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		unsigned char queue_priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	__rpc_do_sleep_on_priority(q, task, queue_priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		struct rpc_task *task, unsigned long timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		unsigned char queue_priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	if (time_is_after_jiffies(timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		__rpc_do_sleep_on_priority(q, task, queue_priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		__rpc_add_timer(q, task, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		task->tk_status = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	if (action && !WARN_ON_ONCE(task->tk_callback != NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		task->tk_callback = action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) static bool rpc_sleep_check_activated(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	/* We shouldn't ever put an inactive task to sleep */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 		task->tk_status = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		rpc_put_task_async(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 				rpc_action action, unsigned long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	if (!rpc_sleep_check_activated(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	rpc_set_tk_callback(task, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	 * Protect the queue operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	spin_lock(&q->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	__rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	spin_unlock(&q->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 				rpc_action action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	if (!rpc_sleep_check_activated(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	rpc_set_tk_callback(task, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	WARN_ON_ONCE(task->tk_timeout != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	 * Protect the queue operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	spin_lock(&q->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	__rpc_sleep_on_priority(q, task, task->tk_priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	spin_unlock(&q->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) EXPORT_SYMBOL_GPL(rpc_sleep_on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		struct rpc_task *task, unsigned long timeout, int priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	if (!rpc_sleep_check_activated(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	priority -= RPC_PRIORITY_LOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	 * Protect the queue operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	spin_lock(&q->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	__rpc_sleep_on_priority_timeout(q, task, timeout, priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	spin_unlock(&q->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		int priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	if (!rpc_sleep_check_activated(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	WARN_ON_ONCE(task->tk_timeout != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	priority -= RPC_PRIORITY_LOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	 * Protect the queue operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	spin_lock(&q->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	__rpc_sleep_on_priority(q, task, priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	spin_unlock(&q->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485)  * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486)  * @wq: workqueue on which to run task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487)  * @queue: wait queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488)  * @task: task to be woken up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490)  * Caller must hold queue->lock, and have cleared the task queued flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		struct rpc_wait_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	/* Has the task been executed yet? If not, we cannot wake it up! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	if (!RPC_IS_ACTIVATED(task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	trace_rpc_task_wakeup(task, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	__rpc_remove_wait_queue(queue, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	rpc_make_runnable(wq, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510)  * Wake up a queued task while the queue lock is being held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) static struct rpc_task *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		struct rpc_wait_queue *queue, struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		bool (*action)(struct rpc_task *, void *), void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	if (RPC_IS_QUEUED(task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		if (task->tk_waitqueue == queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 			if (action == NULL || action(task, data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 				__rpc_do_wake_up_task_on_wq(wq, queue, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 				return task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530)  * Wake up a queued task while the queue lock is being held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 					  struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 						   task, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540)  * Wake up a task on a specific queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	if (!RPC_IS_QUEUED(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	spin_lock(&queue->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	rpc_wake_up_task_queue_locked(queue, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	spin_unlock(&queue->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) static bool rpc_task_action_set_status(struct rpc_task *task, void *status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	task->tk_status = *(int *)status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		struct rpc_task *task, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 			task, rpc_task_action_set_status, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567)  * rpc_wake_up_queued_task_set_status - wake up a task and set task->tk_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568)  * @queue: pointer to rpc_wait_queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569)  * @task: pointer to rpc_task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570)  * @status: integer error value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572)  * If @task is queued on @queue, then it is woken up, and @task->tk_status is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573)  * set to the value of @status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		struct rpc_task *task, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	if (!RPC_IS_QUEUED(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	spin_lock(&queue->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	rpc_wake_up_task_queue_set_status_locked(queue, task, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	spin_unlock(&queue->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587)  * Wake up the next task on a priority queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	struct list_head *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	struct rpc_task *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	 * Service the privileged queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	q = &queue->tasks[RPC_NR_PRIORITY - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	if (queue->maxpriority > RPC_PRIORITY_PRIVILEGED && !list_empty(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	 * Service a batch of tasks from a single owner.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	q = &queue->tasks[queue->priority];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	if (!list_empty(q) && queue->nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		queue->nr--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	 * Service the next queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		if (q == &queue->tasks[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 			q = &queue->tasks[queue->maxpriority];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 			q = q - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		if (!list_empty(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 			task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 			goto new_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	} while (q != &queue->tasks[queue->priority]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	rpc_reset_waitqueue_priority(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) new_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	return task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	if (RPC_IS_PRIORITY(queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		return __rpc_find_next_queued_priority(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	if (!list_empty(&queue->tasks[0]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646)  * Wake up the first task on the wait queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		struct rpc_wait_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		bool (*func)(struct rpc_task *, void *), void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	struct rpc_task	*task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	spin_lock(&queue->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	task = __rpc_find_next_queued(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	if (task != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 				task, func, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	spin_unlock(&queue->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	return task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665)  * Wake up the first task on the wait queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		bool (*func)(struct rpc_task *, void *), void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) EXPORT_SYMBOL_GPL(rpc_wake_up_first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680)  * Wake up the next task on the wait queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) EXPORT_SYMBOL_GPL(rpc_wake_up_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689)  * rpc_wake_up_locked - wake up all rpc_tasks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690)  * @queue: rpc_wait_queue on which the tasks are sleeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) static void rpc_wake_up_locked(struct rpc_wait_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	struct rpc_task *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		task = __rpc_find_next_queued(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		if (task == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		rpc_wake_up_task_queue_locked(queue, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706)  * rpc_wake_up - wake up all rpc_tasks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707)  * @queue: rpc_wait_queue on which the tasks are sleeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709)  * Grabs queue->lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) void rpc_wake_up(struct rpc_wait_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	spin_lock(&queue->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	rpc_wake_up_locked(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	spin_unlock(&queue->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) EXPORT_SYMBOL_GPL(rpc_wake_up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720)  * rpc_wake_up_status_locked - wake up all rpc_tasks and set their status value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721)  * @queue: rpc_wait_queue on which the tasks are sleeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722)  * @status: status value to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) static void rpc_wake_up_status_locked(struct rpc_wait_queue *queue, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	struct rpc_task *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		task = __rpc_find_next_queued(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		if (task == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		rpc_wake_up_task_queue_set_status_locked(queue, task, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737)  * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738)  * @queue: rpc_wait_queue on which the tasks are sleeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739)  * @status: status value to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741)  * Grabs queue->lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	spin_lock(&queue->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	rpc_wake_up_status_locked(queue, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	spin_unlock(&queue->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) EXPORT_SYMBOL_GPL(rpc_wake_up_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) static void __rpc_queue_timer_fn(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	struct rpc_wait_queue *queue = container_of(work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 			struct rpc_wait_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 			timer_list.dwork.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	struct rpc_task *task, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	unsigned long expires, now, timeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	spin_lock(&queue->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	expires = now = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		timeo = task->tk_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		if (time_after_eq(now, timeo)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 			trace_rpc_task_timeout(task, task->tk_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 			task->tk_status = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 			rpc_wake_up_task_queue_locked(queue, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		if (expires == now || time_after(expires, timeo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 			expires = timeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	if (!list_empty(&queue->timer_list.list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		rpc_set_queue_timer(queue, expires);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	spin_unlock(&queue->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) static void __rpc_atrun(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	if (task->tk_status == -ETIMEDOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		task->tk_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784)  * Run a task at a later time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) void rpc_delay(struct rpc_task *task, unsigned long delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) EXPORT_SYMBOL_GPL(rpc_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793)  * Helper to call task->tk_ops->rpc_call_prepare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) void rpc_prepare_task(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) rpc_init_task_statistics(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	/* Initialize retry counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	task->tk_garb_retry = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	task->tk_cred_retry = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	task->tk_rebind_retry = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	/* starting timestamp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	task->tk_start = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) rpc_reset_task_statistics(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	task->tk_timeouts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	rpc_init_task_statistics(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821)  * Helper that calls task->tk_ops->rpc_call_done if it exists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) void rpc_exit_task(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	trace_rpc_task_end(task, task->tk_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	task->tk_action = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	if (task->tk_ops->rpc_count_stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		task->tk_ops->rpc_count_stats(task, task->tk_calldata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	else if (task->tk_client)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		rpc_count_iostats(task, task->tk_client->cl_metrics);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	if (task->tk_ops->rpc_call_done != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		task->tk_ops->rpc_call_done(task, task->tk_calldata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		if (task->tk_action != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 			/* Always release the RPC slot and buffer memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 			xprt_release(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 			rpc_reset_task_statistics(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) void rpc_signal_task(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	struct rpc_wait_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	if (!RPC_IS_ACTIVATED(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	trace_rpc_task_signalled(task, task->tk_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	queue = READ_ONCE(task->tk_waitqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	if (queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		rpc_wake_up_queued_task_set_status(queue, task, -ERESTARTSYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) void rpc_exit(struct rpc_task *task, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	task->tk_status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	task->tk_action = rpc_exit_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	rpc_wake_up_queued_task(task->tk_waitqueue, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) EXPORT_SYMBOL_GPL(rpc_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	if (ops->rpc_release != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		ops->rpc_release(calldata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871)  * This is the RPC `scheduler' (or rather, the finite state machine).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) static void __rpc_execute(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	struct rpc_wait_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	int task_is_async = RPC_IS_ASYNC(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	WARN_ON_ONCE(RPC_IS_QUEUED(task));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	if (RPC_IS_QUEUED(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		void (*do_action)(struct rpc_task *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		 * Perform the next FSM step or a pending callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		 * tk_action may be NULL if the task has been killed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		 * In particular, note that rpc_killall_tasks may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		 * do this at any time, so beware when dereferencing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		do_action = task->tk_action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		if (task->tk_callback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 			do_action = task->tk_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 			task->tk_callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		if (!do_action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		trace_rpc_task_run_action(task, do_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		do_action(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		 * Lockless check for whether task is sleeping or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		if (!RPC_IS_QUEUED(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		 * Signalled tasks should exit rather than sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		if (RPC_SIGNALLED(task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 			task->tk_rpc_status = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 			rpc_exit(task, -ERESTARTSYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		 * The queue->lock protects against races with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		 * rpc_make_runnable().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		 * Note that once we clear RPC_TASK_RUNNING on an asynchronous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		 * rpc_task, rpc_make_runnable() can assign it to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		 * different workqueue. We therefore cannot assume that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		 * rpc_task pointer may still be dereferenced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		queue = task->tk_waitqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		spin_lock(&queue->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		if (!RPC_IS_QUEUED(task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 			spin_unlock(&queue->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		rpc_clear_running(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		spin_unlock(&queue->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		if (task_is_async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		/* sync task: sleep here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		trace_rpc_task_sync_sleep(task, task->tk_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		status = out_of_line_wait_on_bit(&task->tk_runstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 				RPC_TASK_QUEUED, rpc_wait_bit_killable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 				TASK_KILLABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 			 * When a sync task receives a signal, it exits with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 			 * -ERESTARTSYS. In order to catch any callbacks that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 			 * clean up after sleeping on some queue, we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 			 * break the loop here, but go around once more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 			trace_rpc_task_signalled(task, task->tk_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 			set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 			task->tk_rpc_status = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 			rpc_exit(task, -ERESTARTSYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		trace_rpc_task_sync_wake(task, task->tk_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	/* Release all resources associated with the task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	rpc_release_task(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962)  * User-visible entry point to the scheduler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964)  * This may be called recursively if e.g. an async NFS task updates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965)  * the attributes and finds that dirty pages must be flushed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966)  * NOTE: Upon exit of this function the task is guaranteed to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967)  *	 released. In particular note that tk_release() will have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968)  *	 been called, so your task memory may have been freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) void rpc_execute(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	bool is_async = RPC_IS_ASYNC(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	rpc_set_active(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	rpc_make_runnable(rpciod_workqueue, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	if (!is_async) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		unsigned int pflags = memalloc_nofs_save();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		__rpc_execute(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		memalloc_nofs_restore(pflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) static void rpc_async_schedule(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	unsigned int pflags = memalloc_nofs_save();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	__rpc_execute(container_of(work, struct rpc_task, u.tk_work));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	memalloc_nofs_restore(pflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992)  * rpc_malloc - allocate RPC buffer resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993)  * @task: RPC task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995)  * A single memory region is allocated, which is split between the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996)  * RPC call and RPC reply that this task is being used for. When
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997)  * this RPC is retired, the memory is released by calling rpc_free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999)  * To prevent rpciod from hanging, this allocator never sleeps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)  * returning -ENOMEM and suppressing warning if the request cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)  * be serviced immediately. The caller can arrange to sleep in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)  * way that is safe for rpciod.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)  * Most requests are 'small' (under 2KiB) and can be serviced from a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)  * mempool, ensuring that NFS reads and writes can always proceed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)  * and that there is good locality of reference for these buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) int rpc_malloc(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	struct rpc_rqst *rqst = task->tk_rqstp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	size_t size = rqst->rq_callsize + rqst->rq_rcvsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	struct rpc_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	gfp_t gfp = GFP_NOFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	if (RPC_IS_SWAPPER(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	size += sizeof(struct rpc_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	if (size <= RPC_BUFFER_MAXSIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		buf = mempool_alloc(rpc_buffer_mempool, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		buf = kmalloc(size, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	buf->len = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	rqst->rq_buffer = buf->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) EXPORT_SYMBOL_GPL(rpc_malloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)  * rpc_free - free RPC buffer resources allocated via rpc_malloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)  * @task: RPC task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) void rpc_free(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	void *buffer = task->tk_rqstp->rq_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	struct rpc_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	buf = container_of(buffer, struct rpc_buffer, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	size = buf->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	if (size <= RPC_BUFFER_MAXSIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		mempool_free(buf, rpc_buffer_mempool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) EXPORT_SYMBOL_GPL(rpc_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)  * Creation and deletion of RPC task structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	memset(task, 0, sizeof(*task));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	atomic_set(&task->tk_count, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	task->tk_flags  = task_setup_data->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	task->tk_ops = task_setup_data->callback_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	task->tk_calldata = task_setup_data->callback_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	INIT_LIST_HEAD(&task->tk_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	task->tk_owner = current->tgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	/* Initialize workqueue for async tasks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	task->tk_workqueue = task_setup_data->workqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	task->tk_xprt = rpc_task_get_xprt(task_setup_data->rpc_client,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 			xprt_get(task_setup_data->rpc_xprt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	if (task->tk_ops->rpc_call_prepare != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		task->tk_action = rpc_prepare_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	rpc_init_task_statistics(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) static struct rpc_task *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) rpc_alloc_task(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)  * Create a new task for the specified client.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	struct rpc_task	*task = setup_data->task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	unsigned short flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	if (task == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		task = rpc_alloc_task();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		flags = RPC_TASK_DYNAMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	rpc_init_task(task, setup_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	task->tk_flags |= flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	return task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)  * rpc_free_task - release rpc task and perform cleanups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)  * Note that we free up the rpc_task _after_ rpc_release_calldata()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)  * in order to work around a workqueue dependency issue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)  * Tejun Heo states:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)  * "Workqueue currently considers two work items to be the same if they're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)  * on the same address and won't execute them concurrently - ie. it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)  * makes a work item which is queued again while being executed wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)  * for the previous execution to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)  * If a work function frees the work item, and then waits for an event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)  * which should be performed by another work item and *that* work item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)  * recycles the freed work item, it can create a false dependency loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)  * There really is no reliable way to detect this short of verifying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)  * every memory free."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) static void rpc_free_task(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	unsigned short tk_flags = task->tk_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	put_rpccred(task->tk_op_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	rpc_release_calldata(task->tk_ops, task->tk_calldata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	if (tk_flags & RPC_TASK_DYNAMIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 		mempool_free(task, rpc_task_mempool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) static void rpc_async_release(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	unsigned int pflags = memalloc_nofs_save();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	memalloc_nofs_restore(pflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) static void rpc_release_resources_task(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	xprt_release(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	if (task->tk_msg.rpc_cred) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		if (!(task->tk_flags & RPC_TASK_CRED_NOREF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 			put_cred(task->tk_msg.rpc_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		task->tk_msg.rpc_cred = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	rpc_task_release_client(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) static void rpc_final_put_task(struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		struct workqueue_struct *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	if (q != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 		INIT_WORK(&task->u.tk_work, rpc_async_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 		queue_work(q, &task->u.tk_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 		rpc_free_task(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	if (atomic_dec_and_test(&task->tk_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		rpc_release_resources_task(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		rpc_final_put_task(task, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) void rpc_put_task(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	rpc_do_put_task(task, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) EXPORT_SYMBOL_GPL(rpc_put_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) void rpc_put_task_async(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	rpc_do_put_task(task, task->tk_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) EXPORT_SYMBOL_GPL(rpc_put_task_async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) static void rpc_release_task(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	WARN_ON_ONCE(RPC_IS_QUEUED(task));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	rpc_release_resources_task(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	 * Note: at this point we have been removed from rpc_clnt->cl_tasks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	 * so it should be safe to use task->tk_count as a test for whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	 * or not any other processes still hold references to our rpc_task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		/* Wake up anyone who may be waiting for task completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		if (!rpc_complete_task(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		if (!atomic_dec_and_test(&task->tk_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	rpc_final_put_task(task, task->tk_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) int rpciod_up(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) void rpciod_down(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	module_put(THIS_MODULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)  * Start up the rpciod workqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) static int rpciod_start(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	struct workqueue_struct *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	 * Create the rpciod thread and wait for it to start.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	if (!wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		goto out_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	rpciod_workqueue = wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	/* Note: highpri because network receive is latency sensitive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	wq = alloc_workqueue("xprtiod", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_HIGHPRI, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	if (!wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		goto free_rpciod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	xprtiod_workqueue = wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) free_rpciod:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	wq = rpciod_workqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	rpciod_workqueue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	destroy_workqueue(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) out_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) static void rpciod_stop(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	struct workqueue_struct *wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	if (rpciod_workqueue == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	wq = rpciod_workqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	rpciod_workqueue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	destroy_workqueue(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	wq = xprtiod_workqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	xprtiod_workqueue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	destroy_workqueue(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) rpc_destroy_mempool(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	rpciod_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	mempool_destroy(rpc_buffer_mempool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	mempool_destroy(rpc_task_mempool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	kmem_cache_destroy(rpc_task_slabp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	kmem_cache_destroy(rpc_buffer_slabp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	rpc_destroy_wait_queue(&delay_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) rpc_init_mempool(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	 * The following is not strictly a mempool initialisation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	 * but there is no harm in doing it here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	rpc_init_wait_queue(&delay_queue, "delayq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	if (!rpciod_start())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		goto err_nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	rpc_task_slabp = kmem_cache_create("rpc_tasks",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 					     sizeof(struct rpc_task),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 					     0, SLAB_HWCACHE_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 					     NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	if (!rpc_task_slabp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		goto err_nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 					     RPC_BUFFER_MAXSIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 					     0, SLAB_HWCACHE_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 					     NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	if (!rpc_buffer_slabp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 		goto err_nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 						    rpc_task_slabp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	if (!rpc_task_mempool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		goto err_nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 						      rpc_buffer_slabp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	if (!rpc_buffer_mempool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		goto err_nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) err_nomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	rpc_destroy_mempool();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }