Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * kernel/workqueue.c - generic async execution with shared worker pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2002		Ingo Molnar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *   Derived from the taskqueue/keventd code by:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *     David Woodhouse <dwmw2@infradead.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *     Andrew Morton
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *     Kai Petzke <wpp@marie.physik.tu-berlin.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *     Theodore Ts'o <tytso@mit.edu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * Made to use alloc_percpu by Christoph Lameter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * Copyright (C) 2010		SUSE Linux Products GmbH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * Copyright (C) 2010		Tejun Heo <tj@kernel.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * This is the generic async execution mechanism.  Work items as are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  * executed in process context.  The worker pool is shared and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * automatically managed.  There are two worker pools for each CPU (one for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * normal work items and the other for high priority ones) and some extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  * pools for workqueues which are not bound to any specific CPU - the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  * number of these backing pools is dynamic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  * Please read Documentation/core-api/workqueue.rst for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include <linux/mempolicy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <linux/freezer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <linux/debug_locks.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <linux/lockdep.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include <linux/jhash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include <linux/hashtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <linux/rculist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include <linux/nodemask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include <linux/sched/isolation.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include <linux/nmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #include <linux/kvm_para.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #include <uapi/linux/sched/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #include "workqueue_internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #include <trace/hooks/wqlockup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) /* events/workqueue.h uses default TRACE_INCLUDE_PATH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #undef TRACE_INCLUDE_PATH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	 * worker_pool flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	 * A bound pool is either associated or disassociated with its CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	 * While associated (!DISASSOCIATED), all workers are bound to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	 * CPU and none has %WORKER_UNBOUND set and concurrency management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	 * is in effect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	 * While DISASSOCIATED, the cpu may be offline and all workers have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	 * %WORKER_UNBOUND set and concurrency management disabled, and may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	 * be executing on any CPU.  The pool behaves as an unbound one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	 * Note that DISASSOCIATED should be flipped only while holding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	 * wq_pool_attach_mutex to avoid changing binding state while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	 * worker_attach_to_pool() is in progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	POOL_MANAGER_ACTIVE	= 1 << 0,	/* being managed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	POOL_DISASSOCIATED	= 1 << 2,	/* cpu can't serve workers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	/* worker flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	WORKER_DIE		= 1 << 1,	/* die die die */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	WORKER_IDLE		= 1 << 2,	/* is idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	WORKER_PREP		= 1 << 3,	/* preparing to run works */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	WORKER_CPU_INTENSIVE	= 1 << 6,	/* cpu intensive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	WORKER_UNBOUND		= 1 << 7,	/* worker is unbound */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	WORKER_REBOUND		= 1 << 8,	/* worker was rebound */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	WORKER_NOT_RUNNING	= WORKER_PREP | WORKER_CPU_INTENSIVE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 				  WORKER_UNBOUND | WORKER_REBOUND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	NR_STD_WORKER_POOLS	= 2,		/* # standard pools per cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	UNBOUND_POOL_HASH_ORDER	= 6,		/* hashed by pool->attrs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	BUSY_WORKER_HASH_ORDER	= 6,		/* 64 pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	MAX_IDLE_WORKERS_RATIO	= 4,		/* 1/4 of busy can be idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	IDLE_WORKER_TIMEOUT	= 300 * HZ,	/* keep idle ones for 5 mins */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 						/* call for help after 10ms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 						   (min two ticks) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	MAYDAY_INTERVAL		= HZ / 10,	/* and then every 100ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	CREATE_COOLDOWN		= HZ,		/* time to breath after fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	 * Rescue workers are used only on emergencies and shared by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	 * all cpus.  Give MIN_NICE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	RESCUER_NICE_LEVEL	= MIN_NICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	HIGHPRI_NICE_LEVEL	= MIN_NICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	WQ_NAME_LEN		= 24,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118)  * Structure fields follow one of the following exclusion rules.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120)  * I: Modifiable by initialization/destruction paths and read-only for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121)  *    everyone else.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123)  * P: Preemption protected.  Disabling preemption is enough and should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124)  *    only be modified and accessed from the local cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126)  * L: pool->lock protected.  Access with pool->lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128)  * X: During normal operation, modification requires pool->lock and should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129)  *    be done only from local cpu.  Either disabling preemption on local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130)  *    cpu or grabbing pool->lock is enough for read access.  If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131)  *    POOL_DISASSOCIATED is set, it's identical to L.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133)  * A: wq_pool_attach_mutex protected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135)  * PL: wq_pool_mutex protected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137)  * PR: wq_pool_mutex protected for writes.  RCU protected for reads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139)  * PW: wq_pool_mutex and wq->mutex protected for writes.  Either for reads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141)  * PWR: wq_pool_mutex and wq->mutex protected for writes.  Either or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142)  *      RCU for reads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144)  * WQ: wq->mutex protected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146)  * WR: wq->mutex protected for writes.  RCU protected for reads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148)  * MD: wq_mayday_lock protected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) /* struct worker is defined in workqueue_internal.h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) struct worker_pool {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	raw_spinlock_t		lock;		/* the pool lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	int			cpu;		/* I: the associated cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	int			node;		/* I: the associated node ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	int			id;		/* I: pool ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	unsigned int		flags;		/* X: flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	unsigned long		watchdog_ts;	/* L: watchdog timestamp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	struct list_head	worklist;	/* L: list of pending works */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	int			nr_workers;	/* L: total number of workers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	int			nr_idle;	/* L: currently idle workers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	struct list_head	idle_list;	/* X: list of idle workers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	struct timer_list	idle_timer;	/* L: worker idle timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	struct timer_list	mayday_timer;	/* L: SOS timer for workers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	/* a workers is either on busy_hash or idle_list, or the manager */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 						/* L: hash of busy workers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	struct worker		*manager;	/* L: purely informational */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	struct list_head	workers;	/* A: attached workers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	struct completion	*detach_completion; /* all workers detached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	struct ida		worker_ida;	/* worker IDs for task name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	struct workqueue_attrs	*attrs;		/* I: worker attributes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	struct hlist_node	hash_node;	/* PL: unbound_pool_hash node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	int			refcnt;		/* PL: refcnt for unbound pools */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	 * The current concurrency level.  As it's likely to be accessed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	 * from other CPUs during try_to_wake_up(), put it in a separate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	 * cacheline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	atomic_t		nr_running ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	 * Destruction of pool is RCU protected to allow dereferences
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	 * from get_work_pool().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	struct rcu_head		rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) } ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200)  * The per-pool workqueue.  While queued, the lower WORK_STRUCT_FLAG_BITS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201)  * of work_struct->data are used for flags and the remaining high bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202)  * point to the pwq; thus, pwqs need to be aligned at two's power of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203)  * number of flag bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) struct pool_workqueue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	struct worker_pool	*pool;		/* I: the associated pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	struct workqueue_struct *wq;		/* I: the owning workqueue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	int			work_color;	/* L: current color */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	int			flush_color;	/* L: flushing color */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	int			refcnt;		/* L: reference count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	int			nr_in_flight[WORK_NR_COLORS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 						/* L: nr of in_flight works */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	int			nr_active;	/* L: nr of active works */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	int			max_active;	/* L: max active works */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	struct list_head	delayed_works;	/* L: delayed works */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	struct list_head	pwqs_node;	/* WR: node on wq->pwqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	struct list_head	mayday_node;	/* MD: node on wq->maydays */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	 * Release of unbound pwq is punted to system_wq.  See put_pwq()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	 * and pwq_unbound_release_workfn() for details.  pool_workqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	 * itself is also RCU protected so that the first pwq can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	 * determined without grabbing wq->mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	struct work_struct	unbound_release_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	struct rcu_head		rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) } __aligned(1 << WORK_STRUCT_FLAG_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230)  * Structure used to wait for workqueue flush.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) struct wq_flusher {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	struct list_head	list;		/* WQ: list of flushers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	int			flush_color;	/* WQ: flush color waiting for */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	struct completion	done;		/* flush completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) struct wq_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241)  * The externally visible workqueue.  It relays the issued work items to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242)  * the appropriate worker_pool through its pool_workqueues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) struct workqueue_struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	struct list_head	pwqs;		/* WR: all pwqs of this wq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	struct list_head	list;		/* PR: list of all workqueues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	struct mutex		mutex;		/* protects this wq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	int			work_color;	/* WQ: current work color */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	int			flush_color;	/* WQ: current flush color */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	atomic_t		nr_pwqs_to_flush; /* flush in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	struct wq_flusher	*first_flusher;	/* WQ: first flusher */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	struct list_head	flusher_queue;	/* WQ: flush waiters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	struct list_head	flusher_overflow; /* WQ: flush overflow list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	struct list_head	maydays;	/* MD: pwqs requesting rescue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	struct worker		*rescuer;	/* MD: rescue worker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	int			nr_drainers;	/* WQ: drain in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	int			saved_max_active; /* WQ: saved pwq max_active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	struct workqueue_attrs	*unbound_attrs;	/* PW: only for unbound wqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	struct pool_workqueue	*dfl_pwq;	/* PW: only for unbound wqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) #ifdef CONFIG_SYSFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	struct wq_device	*wq_dev;	/* I: for sysfs interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) #ifdef CONFIG_LOCKDEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	char			*lock_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	struct lock_class_key	key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	struct lockdep_map	lockdep_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	char			name[WQ_NAME_LEN]; /* I: workqueue name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	 * Destruction of workqueue_struct is RCU protected to allow walking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	 * the workqueues list without grabbing wq_pool_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	 * This is used to dump all workqueues from sysrq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	struct rcu_head		rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	/* hot fields used during command issue, aligned to cacheline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	unsigned int		flags ____cacheline_aligned; /* WQ: WQ_* flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	struct pool_workqueue __rcu *numa_pwq_tbl[]; /* PWR: unbound pwqs indexed by node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) static struct kmem_cache *pwq_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) static cpumask_var_t *wq_numa_possible_cpumask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 					/* possible CPUs of each node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) static bool wq_disable_numa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) module_param_named(disable_numa, wq_disable_numa, bool, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) /* see the comment above the definition of WQ_POWER_EFFICIENT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) module_param_named(power_efficient, wq_power_efficient, bool, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) static bool wq_online;			/* can kworkers be created yet? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) static bool wq_numa_enabled;		/* unbound NUMA affinity enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) /* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) static DEFINE_MUTEX(wq_pool_mutex);	/* protects pools and workqueues list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) static DEFINE_RAW_SPINLOCK(wq_mayday_lock);	/* protects wq->maydays list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) /* wait for manager to go away */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) static LIST_HEAD(workqueues);		/* PR: list of all workqueues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) static bool workqueue_freezing;		/* PL: have wqs started freezing? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) /* PL: allowable cpus for unbound wqs and work items */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) static cpumask_var_t wq_unbound_cpumask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) /* CPU where unbound work was last round robin scheduled from this CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) static DEFINE_PER_CPU(int, wq_rr_cpu_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323)  * Local execution of unbound work items is no longer guaranteed.  The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324)  * following always forces round-robin CPU selection on unbound work items
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325)  * to uncover usages which depend on it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) static bool wq_debug_force_rr_cpu = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) static bool wq_debug_force_rr_cpu = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) /* the per-cpu worker pools */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) static DEFINE_IDR(worker_pool_idr);	/* PR: idr of all pools */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) /* PL: hash of all unbound pools keyed by pool->attrs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) /* I: attributes used when instantiating standard unbound pools on demand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) /* I: attributes used when instantiating ordered pools on demand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) struct workqueue_struct *system_wq __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) EXPORT_SYMBOL(system_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) struct workqueue_struct *system_highpri_wq __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) EXPORT_SYMBOL_GPL(system_highpri_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) struct workqueue_struct *system_long_wq __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) EXPORT_SYMBOL_GPL(system_long_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) struct workqueue_struct *system_unbound_wq __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) EXPORT_SYMBOL_GPL(system_unbound_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) struct workqueue_struct *system_freezable_wq __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) EXPORT_SYMBOL_GPL(system_freezable_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) struct workqueue_struct *system_power_efficient_wq __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) EXPORT_SYMBOL_GPL(system_power_efficient_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) static int worker_thread(void *__worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) static void show_pwq(struct pool_workqueue *pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) #include <trace/events/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) EXPORT_TRACEPOINT_SYMBOL_GPL(workqueue_execute_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) EXPORT_TRACEPOINT_SYMBOL_GPL(workqueue_execute_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) #define assert_rcu_or_pool_mutex()					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 			 !lockdep_is_held(&wq_pool_mutex),		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 			 "RCU or wq_pool_mutex should be held")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) #define assert_rcu_or_wq_mutex_or_pool_mutex(wq)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 			 !lockdep_is_held(&wq->mutex) &&		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 			 !lockdep_is_held(&wq_pool_mutex),		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 			 "RCU, wq->mutex or wq_pool_mutex should be held")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) #define for_each_cpu_worker_pool(pool, cpu)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	     (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	     (pool)++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390)  * for_each_pool - iterate through all worker_pools in the system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391)  * @pool: iteration cursor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392)  * @pi: integer used for iteration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394)  * This must be called either with wq_pool_mutex held or RCU read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395)  * locked.  If the pool needs to be used beyond the locking in effect, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396)  * caller is responsible for guaranteeing that the pool stays online.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398)  * The if/else clause exists only for the lockdep assertion and can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399)  * ignored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) #define for_each_pool(pool, pi)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	idr_for_each_entry(&worker_pool_idr, pool, pi)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		if (({ assert_rcu_or_pool_mutex(); false; })) { }	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407)  * for_each_pool_worker - iterate through all workers of a worker_pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408)  * @worker: iteration cursor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409)  * @pool: worker_pool to iterate workers of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411)  * This must be called with wq_pool_attach_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413)  * The if/else clause exists only for the lockdep assertion and can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414)  * ignored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) #define for_each_pool_worker(worker, pool)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	list_for_each_entry((worker), &(pool)->workers, node)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422)  * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423)  * @pwq: iteration cursor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424)  * @wq: the target workqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426)  * This must be called either with wq->mutex held or RCU read locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427)  * If the pwq needs to be used beyond the locking in effect, the caller is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428)  * responsible for guaranteeing that the pwq stays online.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430)  * The if/else clause exists only for the lockdep assertion and can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431)  * ignored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) #define for_each_pwq(pwq, wq)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node,		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 				 lockdep_is_held(&(wq->mutex)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) #ifdef CONFIG_DEBUG_OBJECTS_WORK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) static const struct debug_obj_descr work_debug_descr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) static void *work_debug_hint(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	return ((struct work_struct *) addr)->func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) static bool work_is_static_object(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	struct work_struct *work = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454)  * fixup_init is called when:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455)  * - an active object is initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) static bool work_fixup_init(void *addr, enum debug_obj_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	struct work_struct *work = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	case ODEBUG_STATE_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		cancel_work_sync(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		debug_object_init(work, &work_debug_descr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472)  * fixup_free is called when:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473)  * - an active object is freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) static bool work_fixup_free(void *addr, enum debug_obj_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	struct work_struct *work = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	case ODEBUG_STATE_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		cancel_work_sync(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		debug_object_free(work, &work_debug_descr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) static const struct debug_obj_descr work_debug_descr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	.name		= "work_struct",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	.debug_hint	= work_debug_hint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	.is_static_object = work_is_static_object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	.fixup_init	= work_fixup_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	.fixup_free	= work_fixup_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) static inline void debug_work_activate(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	debug_object_activate(work, &work_debug_descr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) static inline void debug_work_deactivate(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	debug_object_deactivate(work, &work_debug_descr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) void __init_work(struct work_struct *work, int onstack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	if (onstack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		debug_object_init_on_stack(work, &work_debug_descr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		debug_object_init(work, &work_debug_descr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) EXPORT_SYMBOL_GPL(__init_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) void destroy_work_on_stack(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	debug_object_free(work, &work_debug_descr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) EXPORT_SYMBOL_GPL(destroy_work_on_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) void destroy_delayed_work_on_stack(struct delayed_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	destroy_timer_on_stack(&work->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	debug_object_free(&work->work, &work_debug_descr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) static inline void debug_work_activate(struct work_struct *work) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) static inline void debug_work_deactivate(struct work_struct *work) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535)  * worker_pool_assign_id - allocate ID and assing it to @pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536)  * @pool: the pool pointer of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538)  * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539)  * successfully, -errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) static int worker_pool_assign_id(struct worker_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	lockdep_assert_held(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 			GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	if (ret >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		pool->id = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557)  * unbound_pwq_by_node - return the unbound pool_workqueue for the given node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558)  * @wq: the target workqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559)  * @node: the node ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561)  * This must be called with any of wq_pool_mutex, wq->mutex or RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562)  * read locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563)  * If the pwq needs to be used beyond the locking in effect, the caller is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564)  * responsible for guaranteeing that the pwq stays online.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566)  * Return: The unbound pool_workqueue for @node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 						  int node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	assert_rcu_or_wq_mutex_or_pool_mutex(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	 * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	 * delayed item is pending.  The plan is to keep CPU -> NODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	 * mapping valid and stable across CPU on/offlines.  Once that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	 * happens, this workaround can be removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	if (unlikely(node == NUMA_NO_NODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		return wq->dfl_pwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) static unsigned int work_color_to_flags(int color)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	return color << WORK_STRUCT_COLOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) static int get_work_color(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		((1 << WORK_STRUCT_COLOR_BITS) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) static int work_next_color(int color)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	return (color + 1) % WORK_NR_COLORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602)  * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603)  * contain the pointer to the queued pwq.  Once execution starts, the flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604)  * is cleared and the high bits contain OFFQ flags and pool ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606)  * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607)  * and clear_work_data() can be used to set the pwq, pool or clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608)  * work->data.  These functions should only be called while the work is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609)  * owned - ie. while the PENDING bit is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611)  * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612)  * corresponding to a work.  Pool is available once the work has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613)  * queued anywhere after initialization until it is sync canceled.  pwq is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614)  * available only while the work item is queued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616)  * %WORK_OFFQ_CANCELING is used to mark a work item which is being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617)  * canceled.  While being canceled, a work item may have its PENDING set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618)  * but stay off timer and worklist for arbitrarily long and nobody should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619)  * try to steal the PENDING bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) static inline void set_work_data(struct work_struct *work, unsigned long data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 				 unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	WARN_ON_ONCE(!work_pending(work));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	atomic_long_set(&work->data, data | flags | work_static(work));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 			 unsigned long extra_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	set_work_data(work, (unsigned long)pwq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		      WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) static void set_work_pool_and_keep_pending(struct work_struct *work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 					   int pool_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		      WORK_STRUCT_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) static void set_work_pool_and_clear_pending(struct work_struct *work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 					    int pool_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	 * The following wmb is paired with the implied mb in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	 * test_and_set_bit(PENDING) and ensures all updates to @work made
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	 * here are visible to and precede any updates by the next PENDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	 * owner.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	 * The following mb guarantees that previous clear of a PENDING bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	 * will not be reordered with any speculative LOADS or STORES from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	 * work->current_func, which is executed afterwards.  This possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	 * reordering can lead to a missed execution on attempt to queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	 * the same @work.  E.g. consider this case:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	 *   CPU#0                         CPU#1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	 *   ----------------------------  --------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	 * 1  STORE event_indicated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	 * 2  queue_work_on() {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	 * 3    test_and_set_bit(PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	 * 4 }                             set_..._and_clear_pending() {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	 * 5                                 set_work_data() # clear bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	 * 6                                 smp_mb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	 * 7                               work->current_func() {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	 * 8				      LOAD event_indicated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	 *				   }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	 * Without an explicit full barrier speculative LOAD on line 8 can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	 * be executed before CPU#0 does STORE on line 1.  If that happens,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	 * CPU#0 observes the PENDING bit is still set and new execution of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	 * a @work is not queued in a hope, that CPU#1 will eventually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	 * finish the queued @work.  Meanwhile CPU#1 does not see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	 * event_indicated is set, because speculative LOAD was executed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	 * before actual STORE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) static void clear_work_data(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	smp_wmb();	/* see set_work_pool_and_clear_pending() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	set_work_data(work, WORK_STRUCT_NO_POOL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) static struct pool_workqueue *get_work_pwq(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	unsigned long data = atomic_long_read(&work->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	if (data & WORK_STRUCT_PWQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701)  * get_work_pool - return the worker_pool a given work was associated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702)  * @work: the work item of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704)  * Pools are created and destroyed under wq_pool_mutex, and allows read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705)  * access under RCU read lock.  As such, this function should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706)  * called under wq_pool_mutex or inside of a rcu_read_lock() region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708)  * All fields of the returned pool are accessible as long as the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709)  * mentioned locking is in effect.  If the returned pool needs to be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710)  * beyond the critical section, the caller is responsible for ensuring the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711)  * returned pool is and stays online.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713)  * Return: The worker_pool @work was last associated with.  %NULL if none.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) static struct worker_pool *get_work_pool(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	unsigned long data = atomic_long_read(&work->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	int pool_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	assert_rcu_or_pool_mutex();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	if (data & WORK_STRUCT_PWQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		return ((struct pool_workqueue *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 			(data & WORK_STRUCT_WQ_DATA_MASK))->pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	pool_id = data >> WORK_OFFQ_POOL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	if (pool_id == WORK_OFFQ_POOL_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	return idr_find(&worker_pool_idr, pool_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734)  * get_work_pool_id - return the worker pool ID a given work is associated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735)  * @work: the work item of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737)  * Return: The worker_pool ID @work was last associated with.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738)  * %WORK_OFFQ_POOL_NONE if none.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) static int get_work_pool_id(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	unsigned long data = atomic_long_read(&work->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	if (data & WORK_STRUCT_PWQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		return ((struct pool_workqueue *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 			(data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	return data >> WORK_OFFQ_POOL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) static void mark_work_canceling(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	unsigned long pool_id = get_work_pool_id(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	pool_id <<= WORK_OFFQ_POOL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) static bool work_is_canceling(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	unsigned long data = atomic_long_read(&work->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767)  * Policy functions.  These define the policies on how the global worker
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768)  * pools are managed.  Unless noted otherwise, these functions assume that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769)  * they're being called with pool->lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) static bool __need_more_worker(struct worker_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	return !atomic_read(&pool->nr_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778)  * Need to wake up a worker?  Called from anything but currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779)  * running workers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781)  * Note that, because unbound workers never contribute to nr_running, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782)  * function will always return %true for unbound pools as long as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783)  * worklist isn't empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) static bool need_more_worker(struct worker_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	return !list_empty(&pool->worklist) && __need_more_worker(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) /* Can I start working?  Called from busy but !running workers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) static bool may_start_working(struct worker_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	return pool->nr_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) /* Do I need to keep working?  Called from currently running workers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) static bool keep_working(struct worker_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	return !list_empty(&pool->worklist) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		atomic_read(&pool->nr_running) <= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) /* Do we need a new worker?  Called from manager. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) static bool need_to_create_worker(struct worker_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	return need_more_worker(pool) && !may_start_working(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) /* Do we have too many workers and should some go away? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) static bool too_many_workers(struct worker_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	bool managing = pool->flags & POOL_MANAGER_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	int nr_busy = pool->nr_workers - nr_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820)  * Wake up functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) /* Return the first idle worker.  Safe with preemption disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) static struct worker *first_idle_worker(struct worker_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	if (unlikely(list_empty(&pool->idle_list)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	return list_first_entry(&pool->idle_list, struct worker, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833)  * wake_up_worker - wake up an idle worker
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834)  * @pool: worker pool to wake worker from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836)  * Wake up the first idle worker of @pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838)  * CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839)  * raw_spin_lock_irq(pool->lock).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) static void wake_up_worker(struct worker_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	struct worker *worker = first_idle_worker(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	if (likely(worker))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		wake_up_process(worker->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850)  * wq_worker_running - a worker is running again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851)  * @task: task waking up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853)  * This function is called when a worker returns from schedule()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) void wq_worker_running(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	struct worker *worker = kthread_data(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	if (!worker->sleeping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	 * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	 * and the nr_running increment below, we may ruin the nr_running reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	 * and leave with an unexpected pool->nr_running == 1 on the newly unbound
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	 * pool. Protect against such race.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	if (!(worker->flags & WORKER_NOT_RUNNING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		atomic_inc(&worker->pool->nr_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	worker->sleeping = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876)  * wq_worker_sleeping - a worker is going to sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877)  * @task: task going to sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879)  * This function is called from schedule() when a busy worker is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880)  * going to sleep. Preemption needs to be disabled to protect ->sleeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881)  * assignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) void wq_worker_sleeping(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	struct worker *next, *worker = kthread_data(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	struct worker_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	 * Rescuers, which may not have all the fields set up like normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	 * workers, also reach here, let's not access anything before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	 * checking NOT_RUNNING.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	if (worker->flags & WORKER_NOT_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	pool = worker->pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	/* Return if preempted before wq_worker_running() was reached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	if (worker->sleeping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	worker->sleeping = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	raw_spin_lock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	 * The counterpart of the following dec_and_test, implied mb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	 * worklist not empty test sequence is in insert_work().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	 * Please read comment there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	 * NOT_RUNNING is clear.  This means that we're bound to and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	 * running on the local cpu w/ rq lock held and preemption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	 * disabled, which in turn means that none else could be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	 * manipulating idle_list, so dereferencing idle_list without pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	 * lock is safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	if (atomic_dec_and_test(&pool->nr_running) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	    !list_empty(&pool->worklist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		next = first_idle_worker(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		if (next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 			wake_up_process(next->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	raw_spin_unlock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926)  * wq_worker_last_func - retrieve worker's last work function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927)  * @task: Task to retrieve last work function of.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929)  * Determine the last function a worker executed. This is called from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930)  * the scheduler to get a worker's last known identity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932)  * CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933)  * raw_spin_lock_irq(rq->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935)  * This function is called during schedule() when a kworker is going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936)  * to sleep. It's used by psi to identify aggregation workers during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937)  * dequeuing, to allow periodic aggregation to shut-off when that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938)  * worker is the last task in the system or cgroup to go to sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940)  * As this function doesn't involve any workqueue-related locking, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941)  * only returns stable values when called from inside the scheduler's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942)  * queuing and dequeuing paths, when @task, which must be a kworker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943)  * is guaranteed to not be processing any works.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946)  * The last work function %current executed as a worker, NULL if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947)  * hasn't executed any work yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) work_func_t wq_worker_last_func(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	struct worker *worker = kthread_data(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	return worker->last_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957)  * worker_set_flags - set worker flags and adjust nr_running accordingly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958)  * @worker: self
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959)  * @flags: flags to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961)  * Set @flags in @worker->flags and adjust nr_running accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963)  * CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964)  * raw_spin_lock_irq(pool->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) static inline void worker_set_flags(struct worker *worker, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	struct worker_pool *pool = worker->pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	WARN_ON_ONCE(worker->task != current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	/* If transitioning into NOT_RUNNING, adjust nr_running. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	if ((flags & WORKER_NOT_RUNNING) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	    !(worker->flags & WORKER_NOT_RUNNING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		atomic_dec(&pool->nr_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	worker->flags |= flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982)  * worker_clr_flags - clear worker flags and adjust nr_running accordingly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983)  * @worker: self
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984)  * @flags: flags to clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986)  * Clear @flags in @worker->flags and adjust nr_running accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988)  * CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989)  * raw_spin_lock_irq(pool->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	struct worker_pool *pool = worker->pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	unsigned int oflags = worker->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	WARN_ON_ONCE(worker->task != current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	worker->flags &= ~flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	 * If transitioning out of NOT_RUNNING, increment nr_running.  Note
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	 * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	 * of multiple flags, not a single flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		if (!(worker->flags & WORKER_NOT_RUNNING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 			atomic_inc(&pool->nr_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)  * find_worker_executing_work - find worker which is executing a work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)  * @pool: pool of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)  * @work: work to find worker for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)  * Find a worker which is executing @work on @pool by searching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)  * @pool->busy_hash which is keyed by the address of @work.  For a worker
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)  * to match, its current execution should match the address of @work and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)  * its work function.  This is to avoid unwanted dependency between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)  * unrelated work executions through a work item being recycled while still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)  * being executed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)  * This is a bit tricky.  A work item may be freed once its execution
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)  * starts and nothing prevents the freed area from being recycled for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)  * another work item.  If the same work item address ends up being reused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)  * before the original execution finishes, workqueue will identify the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)  * recycled work item as currently executing and make it wait until the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)  * current execution finishes, introducing an unwanted dependency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)  * This function checks the work item address and work function to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)  * false positives.  Note that this isn't complete as one may construct a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)  * work function which can introduce dependency onto itself through a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)  * recycled work item.  Well, if somebody wants to shoot oneself in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)  * foot that badly, there's only so much we can do, and if such deadlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)  * actually occurs, it should be easy to locate the culprit work function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)  * CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)  * raw_spin_lock_irq(pool->lock).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)  * Pointer to worker which is executing @work if found, %NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)  * otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) static struct worker *find_worker_executing_work(struct worker_pool *pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 						 struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	struct worker *worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	hash_for_each_possible(pool->busy_hash, worker, hentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 			       (unsigned long)work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		if (worker->current_work == work &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		    worker->current_func == work->func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 			return worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)  * move_linked_works - move linked works to a list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)  * @work: start of series of works to be scheduled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)  * @head: target list to append @work to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)  * @nextp: out parameter for nested worklist walking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)  * Schedule linked works starting from @work to @head.  Work series to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)  * be scheduled starts at @work and includes any consecutive work with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)  * WORK_STRUCT_LINKED set in its predecessor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)  * If @nextp is not NULL, it's updated to point to the next work of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)  * the last scheduled work.  This allows move_linked_works() to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)  * nested inside outer list_for_each_entry_safe().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)  * CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)  * raw_spin_lock_irq(pool->lock).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) static void move_linked_works(struct work_struct *work, struct list_head *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 			      struct work_struct **nextp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	struct work_struct *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	 * Linked worklist will always end before the end of the list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	 * use NULL for list head.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	list_for_each_entry_safe_from(work, n, NULL, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		list_move_tail(&work->entry, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	 * If we're already inside safe list traversal and have moved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	 * multiple works to the scheduled queue, the next position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	 * needs to be updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	if (nextp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		*nextp = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)  * get_pwq - get an extra reference on the specified pool_workqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)  * @pwq: pool_workqueue to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)  * Obtain an extra reference on @pwq.  The caller should guarantee that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)  * @pwq has positive refcnt and be holding the matching pool->lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) static void get_pwq(struct pool_workqueue *pwq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	lockdep_assert_held(&pwq->pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	WARN_ON_ONCE(pwq->refcnt <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	pwq->refcnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)  * put_pwq - put a pool_workqueue reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)  * @pwq: pool_workqueue to put
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)  * Drop a reference of @pwq.  If its refcnt reaches zero, schedule its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)  * destruction.  The caller should be holding the matching pool->lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) static void put_pwq(struct pool_workqueue *pwq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	lockdep_assert_held(&pwq->pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	if (likely(--pwq->refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	 * @pwq can't be released under pool->lock, bounce to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	 * pwq_unbound_release_workfn().  This never recurses on the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	 * pool->lock as this path is taken only for unbound workqueues and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	 * the release work item is scheduled on a per-cpu workqueue.  To
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	 * avoid lockdep warning, unbound pool->locks are given lockdep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	 * subclass of 1 in get_unbound_pool().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	schedule_work(&pwq->unbound_release_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)  * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)  * @pwq: pool_workqueue to put (can be %NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)  * put_pwq() with locking.  This function also allows %NULL @pwq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) static void put_pwq_unlocked(struct pool_workqueue *pwq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	if (pwq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		 * As both pwqs and pools are RCU protected, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		 * following lock operations are safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		raw_spin_lock_irq(&pwq->pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		put_pwq(pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		raw_spin_unlock_irq(&pwq->pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) static void pwq_activate_delayed_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	struct pool_workqueue *pwq = get_work_pwq(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	trace_workqueue_activate_work(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	if (list_empty(&pwq->pool->worklist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 		pwq->pool->watchdog_ts = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	move_linked_works(work, &pwq->pool->worklist, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	__clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	pwq->nr_active++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	struct work_struct *work = list_first_entry(&pwq->delayed_works,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 						    struct work_struct, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	pwq_activate_delayed_work(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)  * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)  * @pwq: pwq of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)  * @color: color of work which left the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)  * A work either has completed or is removed from pending queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)  * decrement nr_in_flight of its pwq and handle workqueue flushing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)  * CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)  * raw_spin_lock_irq(pool->lock).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	/* uncolored work items don't participate in flushing or nr_active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	if (color == WORK_NO_COLOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	pwq->nr_in_flight[color]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	pwq->nr_active--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	if (!list_empty(&pwq->delayed_works)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		/* one down, submit a delayed one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		if (pwq->nr_active < pwq->max_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 			pwq_activate_first_delayed(pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	/* is flush in progress and are we at the flushing tip? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	if (likely(pwq->flush_color != color))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	/* are there still in-flight works? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	if (pwq->nr_in_flight[color])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	/* this pwq is done, clear flush_color */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	pwq->flush_color = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	 * If this was the last pwq, wake up the first flusher.  It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	 * will handle the rest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		complete(&pwq->wq->first_flusher->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	put_pwq(pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)  * try_to_grab_pending - steal work item from worklist and disable irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)  * @work: work item to steal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)  * @is_dwork: @work is a delayed_work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)  * @flags: place to store irq state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)  * Try to grab PENDING bit of @work.  This function can handle @work in any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)  * stable state - idle, on timer or on worklist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)  *  ========	================================================================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)  *  1		if @work was pending and we successfully stole PENDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)  *  0		if @work was idle and we claimed PENDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)  *  -EAGAIN	if PENDING couldn't be grabbed at the moment, safe to busy-retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)  *  -ENOENT	if someone else is canceling @work, this state may persist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)  *		for arbitrarily long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)  *  ========	================================================================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)  * Note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)  * On >= 0 return, the caller owns @work's PENDING bit.  To avoid getting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)  * interrupted while holding PENDING and @work off queue, irq must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)  * disabled on entry.  This, combined with delayed_work->timer being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)  * irqsafe, ensures that we return -EAGAIN for finite short period of time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)  * On successful return, >= 0, irq is disabled and the caller is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)  * responsible for releasing it using local_irq_restore(*@flags).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)  * This function is safe to call from any context including IRQ handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 			       unsigned long *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	struct worker_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	struct pool_workqueue *pwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	local_irq_save(*flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	/* try to steal the timer if it exists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	if (is_dwork) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		struct delayed_work *dwork = to_delayed_work(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		 * dwork->timer is irqsafe.  If del_timer() fails, it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 		 * guaranteed that the timer is not queued anywhere and not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		 * running on the local CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		if (likely(del_timer(&dwork->timer)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	/* try to claim PENDING the normal way */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	 * The queueing is in progress, or it is already queued. Try to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	pool = get_work_pool(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	if (!pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	raw_spin_lock(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	 * work->data is guaranteed to point to pwq only while the work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	 * item is queued on pwq->wq, and both updating work->data to point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	 * to pwq on queueing and to pool on dequeueing are done under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	 * pwq->pool->lock.  This in turn guarantees that, if work->data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	 * points to pwq which is associated with a locked pool, the work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	 * item is currently queued on that pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	pwq = get_work_pwq(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	if (pwq && pwq->pool == pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 		debug_work_deactivate(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		 * A delayed work item cannot be grabbed directly because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		 * it might have linked NO_COLOR work items which, if left
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		 * on the delayed_list, will confuse pwq->nr_active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		 * management later on and cause stall.  Make sure the work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 		 * item is activated before grabbing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 		if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 			pwq_activate_delayed_work(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		list_del_init(&work->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		pwq_dec_nr_in_flight(pwq, get_work_color(work));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		/* work->data points to pwq iff queued, point to pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		set_work_pool_and_keep_pending(work, pool->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		raw_spin_unlock(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	raw_spin_unlock(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	local_irq_restore(*flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	if (work_is_canceling(work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)  * insert_work - insert a work into a pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)  * @pwq: pwq @work belongs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)  * @work: work to insert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)  * @head: insertion point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)  * @extra_flags: extra WORK_STRUCT_* flags to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)  * Insert @work which belongs to @pwq after @head.  @extra_flags is or'd to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)  * work_struct flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)  * CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)  * raw_spin_lock_irq(pool->lock).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 			struct list_head *head, unsigned int extra_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	struct worker_pool *pool = pwq->pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	/* record the work call stack in order to print it in KASAN reports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	kasan_record_aux_stack(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	/* we own @work, set data and link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	set_work_pwq(work, pwq, extra_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	list_add_tail(&work->entry, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	get_pwq(pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	 * Ensure either wq_worker_sleeping() sees the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	 * list_add_tail() or we see zero nr_running to avoid workers lying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	 * around lazily while there are works to be processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	if (__need_more_worker(pool))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		wake_up_worker(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)  * Test whether @work is being queued from another work executing on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)  * same workqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) static bool is_chained_work(struct workqueue_struct *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	struct worker *worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	worker = current_wq_worker();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	 * Return %true iff I'm a worker executing a work item on @wq.  If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	 * I'm @worker, it's safe to dereference it without locking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	return worker && worker->current_pwq->wq == wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)  * When queueing an unbound work item to a wq, prefer local CPU if allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)  * by wq_unbound_cpumask.  Otherwise, round robin among the allowed ones to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)  * avoid perturbing sensitive tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) static int wq_select_unbound_cpu(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	static bool printed_dbg_warning;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	int new_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	if (likely(!wq_debug_force_rr_cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 		if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 			return cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	} else if (!printed_dbg_warning) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 		pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		printed_dbg_warning = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	if (cpumask_empty(wq_unbound_cpumask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 		return cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	new_cpu = __this_cpu_read(wq_rr_cpu_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	if (unlikely(new_cpu >= nr_cpu_ids)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 		new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 		if (unlikely(new_cpu >= nr_cpu_ids))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 			return cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	__this_cpu_write(wq_rr_cpu_last, new_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	return new_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) static void __queue_work(int cpu, struct workqueue_struct *wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 			 struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	struct pool_workqueue *pwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	struct worker_pool *last_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	struct list_head *worklist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	unsigned int work_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	unsigned int req_cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	 * While a work item is PENDING && off queue, a task trying to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	 * steal the PENDING will busy-loop waiting for it to either get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	 * queued or lose PENDING.  Grabbing PENDING and queueing should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	 * happen with IRQ disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	lockdep_assert_irqs_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	/* if draining, only works from the same workqueue are allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	if (unlikely(wq->flags & __WQ_DRAINING) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	    WARN_ON_ONCE(!is_chained_work(wq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	/* pwq which will be used unless @work is executing elsewhere */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	if (wq->flags & WQ_UNBOUND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 		if (req_cpu == WORK_CPU_UNBOUND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 			cpu = wq_select_unbound_cpu(raw_smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 		pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		if (req_cpu == WORK_CPU_UNBOUND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 			cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	 * If @work was previously on a different pool, it might still be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	 * running there, in which case the work needs to be queued on that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	 * pool to guarantee non-reentrancy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	last_pool = get_work_pool(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	if (last_pool && last_pool != pwq->pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		struct worker *worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		raw_spin_lock(&last_pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 		worker = find_worker_executing_work(last_pool, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		if (worker && worker->current_pwq->wq == wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 			pwq = worker->current_pwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 			/* meh... not running there, queue here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 			raw_spin_unlock(&last_pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 			raw_spin_lock(&pwq->pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		raw_spin_lock(&pwq->pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	 * pwq is determined and locked.  For unbound pools, we could have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	 * raced with pwq release and it could already be dead.  If its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	 * refcnt is zero, repeat pwq selection.  Note that pwqs never die
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	 * without another pwq replacing it in the numa_pwq_tbl or while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	 * work items are executing on it, so the retrying is guaranteed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	 * make forward-progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	if (unlikely(!pwq->refcnt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		if (wq->flags & WQ_UNBOUND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 			raw_spin_unlock(&pwq->pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 			cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 			goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 		/* oops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 			  wq->name, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	/* pwq determined, queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	trace_workqueue_queue_work(req_cpu, pwq, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	if (WARN_ON(!list_empty(&work->entry)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	pwq->nr_in_flight[pwq->work_color]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	work_flags = work_color_to_flags(pwq->work_color);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	if (likely(pwq->nr_active < pwq->max_active)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 		trace_workqueue_activate_work(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 		pwq->nr_active++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 		worklist = &pwq->pool->worklist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		if (list_empty(worklist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 			pwq->pool->watchdog_ts = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 		work_flags |= WORK_STRUCT_DELAYED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		worklist = &pwq->delayed_works;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	debug_work_activate(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	insert_work(pwq, work, worklist, work_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	raw_spin_unlock(&pwq->pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)  * queue_work_on - queue work on specific cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)  * @cpu: CPU number to execute work on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)  * @wq: workqueue to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)  * @work: work to queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)  * We queue the work to a specific CPU, the caller must ensure it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)  * can't go away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)  * Return: %false if @work was already on a queue, %true otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) bool queue_work_on(int cpu, struct workqueue_struct *wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 		   struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 		__queue_work(cpu, wq, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 		ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) EXPORT_SYMBOL(queue_work_on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)  * workqueue_select_cpu_near - Select a CPU based on NUMA node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)  * @node: NUMA node ID that we want to select a CPU from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)  * This function will attempt to find a "random" cpu available on a given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)  * node. If there are no CPUs available on the given node it will return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)  * WORK_CPU_UNBOUND indicating that we should just schedule to any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)  * available CPU if we need to schedule this work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) static int workqueue_select_cpu_near(int node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	/* No point in doing this if NUMA isn't enabled for workqueues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	if (!wq_numa_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 		return WORK_CPU_UNBOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	/* Delay binding to CPU if node is not valid or online */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	if (node < 0 || node >= MAX_NUMNODES || !node_online(node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		return WORK_CPU_UNBOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	/* Use local node/cpu if we are already there */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	if (node == cpu_to_node(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 		return cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	/* Use "random" otherwise know as "first" online CPU of node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	/* If CPU is valid return that, otherwise just defer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)  * queue_work_node - queue work on a "random" cpu for a given NUMA node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)  * @node: NUMA node that we are targeting the work for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)  * @wq: workqueue to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)  * @work: work to queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)  * We queue the work to a "random" CPU within a given NUMA node. The basic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)  * idea here is to provide a way to somehow associate work with a given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)  * NUMA node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)  * This function will only make a best effort attempt at getting this onto
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)  * the right NUMA node. If no node is requested or the requested node is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)  * offline then we just fall back to standard queue_work behavior.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)  * Currently the "random" CPU ends up being the first available CPU in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)  * intersection of cpu_online_mask and the cpumask of the node, unless we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)  * are running on the node. In that case we just use the current CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)  * Return: %false if @work was already on a queue, %true otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) bool queue_work_node(int node, struct workqueue_struct *wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 		     struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	 * This current implementation is specific to unbound workqueues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	 * Specifically we only return the first available CPU for a given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	 * node instead of cycling through individual CPUs within the node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	 * If this is used with a per-cpu workqueue then the logic in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	 * workqueue_select_cpu_near would need to be updated to allow for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	 * some round robin type logic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		int cpu = workqueue_select_cpu_near(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 		__queue_work(cpu, wq, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 		ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) EXPORT_SYMBOL_GPL(queue_work_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) void delayed_work_timer_fn(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	struct delayed_work *dwork = from_timer(dwork, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	/* should have been called from irqsafe timer with irq already off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	__queue_work(dwork->cpu, dwork->wq, &dwork->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) EXPORT_SYMBOL(delayed_work_timer_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 				struct delayed_work *dwork, unsigned long delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	struct timer_list *timer = &dwork->timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	struct work_struct *work = &dwork->work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	WARN_ON_ONCE(!wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	 * With CFI, timer->function can point to a jump table entry in a module,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	 * which fails the comparison. Disable the warning if CFI and modules are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	 * both enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	if (!IS_ENABLED(CONFIG_CFI_CLANG) || !IS_ENABLED(CONFIG_MODULES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 		WARN_ON_ONCE(timer->function != delayed_work_timer_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	WARN_ON_ONCE(timer_pending(timer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	WARN_ON_ONCE(!list_empty(&work->entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	 * If @delay is 0, queue @dwork->work immediately.  This is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	 * both optimization and correctness.  The earliest @timer can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	 * expire is on the closest next tick and delayed_work users depend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	 * on that there's no such delay when @delay is 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	if (!delay) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 		__queue_work(cpu, wq, &dwork->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	dwork->wq = wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	dwork->cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	timer->expires = jiffies + delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	if (unlikely(cpu != WORK_CPU_UNBOUND))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		add_timer_on(timer, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 		add_timer(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)  * queue_delayed_work_on - queue work on specific CPU after delay
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)  * @cpu: CPU number to execute work on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)  * @wq: workqueue to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)  * @dwork: work to queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)  * @delay: number of jiffies to wait before queueing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)  * Return: %false if @work was already on a queue, %true otherwise.  If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)  * @delay is zero and @dwork is idle, it will be scheduled for immediate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)  * execution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 			   struct delayed_work *dwork, unsigned long delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	struct work_struct *work = &dwork->work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	/* read the comment in __queue_work() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 		__queue_delayed_work(cpu, wq, dwork, delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 		ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) EXPORT_SYMBOL(queue_delayed_work_on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)  * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)  * @cpu: CPU number to execute work on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)  * @wq: workqueue to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)  * @dwork: work to queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)  * @delay: number of jiffies to wait before queueing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)  * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)  * modify @dwork's timer so that it expires after @delay.  If @delay is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)  * zero, @work is guaranteed to be scheduled immediately regardless of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)  * current state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)  * Return: %false if @dwork was idle and queued, %true if @dwork was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)  * pending and its timer was modified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)  * This function is safe to call from any context including IRQ handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)  * See try_to_grab_pending() for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 			 struct delayed_work *dwork, unsigned long delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		ret = try_to_grab_pending(&dwork->work, true, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	} while (unlikely(ret == -EAGAIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	if (likely(ret >= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		__queue_delayed_work(cpu, wq, dwork, delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 		local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	/* -ENOENT from try_to_grab_pending() becomes %true */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) EXPORT_SYMBOL_GPL(mod_delayed_work_on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) static void rcu_work_rcufn(struct rcu_head *rcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	/* read the comment in __queue_work() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	__queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)  * queue_rcu_work - queue work after a RCU grace period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)  * @wq: workqueue to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)  * @rwork: work to queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)  * Return: %false if @rwork was already pending, %true otherwise.  Note
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)  * that a full RCU grace period is guaranteed only after a %true return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)  * While @rwork is guaranteed to be executed after a %false return, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)  * execution may happen before a full RCU grace period has passed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	struct work_struct *work = &rwork->work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 		rwork->wq = wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 		call_rcu(&rwork->rcu, rcu_work_rcufn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) EXPORT_SYMBOL(queue_rcu_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)  * worker_enter_idle - enter idle state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)  * @worker: worker which is entering idle state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)  * @worker is entering idle state.  Update stats and idle timer if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)  * necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)  * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)  * raw_spin_lock_irq(pool->lock).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) static void worker_enter_idle(struct worker *worker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	struct worker_pool *pool = worker->pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	    WARN_ON_ONCE(!list_empty(&worker->entry) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 			 (worker->hentry.next || worker->hentry.pprev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	/* can't use worker_set_flags(), also called from create_worker() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	worker->flags |= WORKER_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	pool->nr_idle++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	worker->last_active = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	/* idle_list is LIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	list_add(&worker->entry, &pool->idle_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 		mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	 * Sanity check nr_running.  Because unbind_workers() releases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	 * pool->lock between setting %WORKER_UNBOUND and zapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	 * nr_running, the warning may trigger spuriously.  Check iff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	 * unbind is not in progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 		     pool->nr_workers == pool->nr_idle &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 		     atomic_read(&pool->nr_running));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)  * worker_leave_idle - leave idle state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)  * @worker: worker which is leaving idle state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)  * @worker is leaving idle state.  Update stats.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)  * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)  * raw_spin_lock_irq(pool->lock).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) static void worker_leave_idle(struct worker *worker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	struct worker_pool *pool = worker->pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	worker_clr_flags(worker, WORKER_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	pool->nr_idle--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	list_del_init(&worker->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) static struct worker *alloc_worker(int node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	struct worker *worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	if (worker) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 		INIT_LIST_HEAD(&worker->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 		INIT_LIST_HEAD(&worker->scheduled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 		INIT_LIST_HEAD(&worker->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 		/* on creation a worker is in !idle && prep state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 		worker->flags = WORKER_PREP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	return worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)  * worker_attach_to_pool() - attach a worker to a pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)  * @worker: worker to be attached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)  * @pool: the target pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)  * Attach @worker to @pool.  Once attached, the %WORKER_UNBOUND flag and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)  * cpu-binding of @worker are kept coordinated with the pool across
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)  * cpu-[un]hotplugs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) static void worker_attach_to_pool(struct worker *worker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 				   struct worker_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	mutex_lock(&wq_pool_attach_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	 * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	 * stable across this function.  See the comments above the flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	 * definition for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	if (pool->flags & POOL_DISASSOCIATED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 		worker->flags |= WORKER_UNBOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	if (worker->rescue_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 		set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	list_add_tail(&worker->node, &pool->workers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	worker->pool = pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	mutex_unlock(&wq_pool_attach_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)  * worker_detach_from_pool() - detach a worker from its pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)  * @worker: worker which is attached to its pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)  * Undo the attaching which had been done in worker_attach_to_pool().  The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)  * caller worker shouldn't access to the pool after detached except it has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899)  * other reference to the pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) static void worker_detach_from_pool(struct worker *worker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	struct worker_pool *pool = worker->pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	struct completion *detach_completion = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	mutex_lock(&wq_pool_attach_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	list_del(&worker->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	worker->pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	if (list_empty(&pool->workers))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 		detach_completion = pool->detach_completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	mutex_unlock(&wq_pool_attach_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	/* clear leftover flags without pool->lock after it is detached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	if (detach_completion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		complete(detach_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)  * create_worker - create a new workqueue worker
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)  * @pool: pool the new worker will belong to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)  * Create and start a new worker which is attached to @pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)  * CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)  * Might sleep.  Does GFP_KERNEL allocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)  * Pointer to the newly created worker.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) static struct worker *create_worker(struct worker_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	struct worker *worker = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 	int id = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	char id_buf[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	/* ID is needed to determine kthread name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	if (id < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	worker = alloc_worker(pool->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	if (!worker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	worker->id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	if (pool->cpu >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 		snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 			 pool->attrs->nice < 0  ? "H" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 		snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 					      "kworker/%s", id_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	if (IS_ERR(worker->task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 	set_user_nice(worker->task, pool->attrs->nice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	if (IS_ENABLED(CONFIG_ROCKCHIP_OPTIMIZE_RT_PRIO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 		struct sched_param param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 		if (pool->attrs->nice == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 			param.sched_priority = MAX_RT_PRIO / 2 - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 			param.sched_priority = MAX_RT_PRIO / 2 - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 		sched_setscheduler_nocheck(worker->task, SCHED_RR, &param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	kthread_bind_mask(worker->task, pool->attrs->cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	/* successful, attach the worker to the pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	worker_attach_to_pool(worker, pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	/* start the newly created worker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	raw_spin_lock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	worker->pool->nr_workers++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	worker_enter_idle(worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	wake_up_process(worker->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	raw_spin_unlock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	return worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	if (id >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 		ida_simple_remove(&pool->worker_ida, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 	kfree(worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)  * destroy_worker - destroy a workqueue worker
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)  * @worker: worker to be destroyed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)  * Destroy @worker and adjust @pool stats accordingly.  The worker should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)  * be idle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)  * CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)  * raw_spin_lock_irq(pool->lock).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) static void destroy_worker(struct worker *worker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	struct worker_pool *pool = worker->pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	lockdep_assert_held(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	/* sanity check frenzy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	if (WARN_ON(worker->current_work) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	    WARN_ON(!list_empty(&worker->scheduled)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 	    WARN_ON(!(worker->flags & WORKER_IDLE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	pool->nr_workers--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	pool->nr_idle--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	list_del_init(&worker->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	worker->flags |= WORKER_DIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 	wake_up_process(worker->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) static void idle_worker_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 	struct worker_pool *pool = from_timer(pool, t, idle_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	raw_spin_lock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	while (too_many_workers(pool)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 		struct worker *worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 		unsigned long expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 		/* idle_list is kept in LIFO order, check the last one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 		worker = list_entry(pool->idle_list.prev, struct worker, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 		if (time_before(jiffies, expires)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 			mod_timer(&pool->idle_timer, expires);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 		destroy_worker(worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 	raw_spin_unlock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) static void send_mayday(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	struct pool_workqueue *pwq = get_work_pwq(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	struct workqueue_struct *wq = pwq->wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	lockdep_assert_held(&wq_mayday_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	if (!wq->rescuer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	/* mayday mayday mayday */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	if (list_empty(&pwq->mayday_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 		 * If @pwq is for an unbound wq, its base ref may be put at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 		 * any time due to an attribute change.  Pin @pwq until the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 		 * rescuer is done with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 		get_pwq(pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 		list_add_tail(&pwq->mayday_node, &wq->maydays);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 		wake_up_process(wq->rescuer->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) static void pool_mayday_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	struct worker_pool *pool = from_timer(pool, t, mayday_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	struct work_struct *work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	raw_spin_lock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	raw_spin_lock(&wq_mayday_lock);		/* for wq->maydays */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	if (need_to_create_worker(pool)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 		 * We've been trying to create a new worker but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 		 * haven't been successful.  We might be hitting an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 		 * allocation deadlock.  Send distress signals to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 		 * rescuers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 		list_for_each_entry(work, &pool->worklist, entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 			send_mayday(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	raw_spin_unlock(&wq_mayday_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	raw_spin_unlock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)  * maybe_create_worker - create a new worker if necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)  * @pool: pool to create a new worker for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)  * Create a new worker for @pool if necessary.  @pool is guaranteed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)  * have at least one idle worker on return from this function.  If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102)  * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)  * sent to all rescuers with works scheduled on @pool to resolve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)  * possible allocation deadlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)  * On return, need_to_create_worker() is guaranteed to be %false and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)  * may_start_working() %true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)  * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110)  * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)  * multiple times.  Does GFP_KERNEL allocations.  Called only from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112)  * manager.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) static void maybe_create_worker(struct worker_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) __releases(&pool->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) __acquires(&pool->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	raw_spin_unlock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 	mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 		if (create_worker(pool) || !need_to_create_worker(pool))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 		schedule_timeout_interruptible(CREATE_COOLDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 		if (!need_to_create_worker(pool))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 	del_timer_sync(&pool->mayday_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	raw_spin_lock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	 * This is necessary even after a new worker was just successfully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 	 * created as @pool->lock was dropped and the new worker might have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 	 * already become busy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 	if (need_to_create_worker(pool))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 		goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)  * manage_workers - manage worker pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147)  * @worker: self
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)  * Assume the manager role and manage the worker pool @worker belongs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)  * to.  At any given time, there can be only zero or one manager per
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)  * pool.  The exclusion is handled automatically by this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)  * The caller can safely start processing works on false return.  On
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)  * true return, it's guaranteed that need_to_create_worker() is false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155)  * and may_start_working() is true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)  * CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158)  * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)  * multiple times.  Does GFP_KERNEL allocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162)  * %false if the pool doesn't need management and the caller can safely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163)  * start processing works, %true if management function was performed and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)  * the conditions that the caller verified before calling the function may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)  * no longer be true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) static bool manage_workers(struct worker *worker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 	struct worker_pool *pool = worker->pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	if (pool->flags & POOL_MANAGER_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 	pool->flags |= POOL_MANAGER_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 	pool->manager = worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 	maybe_create_worker(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 	pool->manager = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 	pool->flags &= ~POOL_MANAGER_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	rcuwait_wake_up(&manager_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186)  * process_one_work - process single work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187)  * @worker: self
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188)  * @work: work to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190)  * Process @work.  This function contains all the logics necessary to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191)  * process a single work including synchronization against and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192)  * interaction with other workers on the same cpu, queueing and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193)  * flushing.  As long as context requirement is met, any worker can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)  * call this function to process a work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196)  * CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)  * raw_spin_lock_irq(pool->lock) which is released and regrabbed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) static void process_one_work(struct worker *worker, struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) __releases(&pool->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) __acquires(&pool->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	struct pool_workqueue *pwq = get_work_pwq(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	struct worker_pool *pool = worker->pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 	bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 	int work_color;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 	struct worker *collision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) #ifdef CONFIG_LOCKDEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 	 * It is permissible to free the struct work_struct from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	 * inside the function that is called from it, this we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 	 * take into account for lockdep too.  To avoid bogus "held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 	 * lock freed" warnings as well as problems when looking into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	 * work->lockdep_map, make a copy and use that here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	struct lockdep_map lockdep_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 	lockdep_copy_map(&lockdep_map, &work->lockdep_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 	/* ensure we're on the correct CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 	WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 		     raw_smp_processor_id() != pool->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 	 * A single work shouldn't be executed concurrently by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 	 * multiple workers on a single cpu.  Check whether anyone is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 	 * already processing the work.  If so, defer the work to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 	 * currently executing one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 	collision = find_worker_executing_work(pool, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	if (unlikely(collision)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 		move_linked_works(work, &collision->scheduled, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	/* claim and dequeue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 	debug_work_deactivate(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 	hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 	worker->current_work = work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 	worker->current_func = work->func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	worker->current_pwq = pwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	work_color = get_work_color(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 	 * Record wq name for cmdline and debug reporting, may get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 	 * overridden through set_worker_desc().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 	strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 	list_del_init(&work->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 	 * CPU intensive works don't participate in concurrency management.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	 * They're the scheduler's responsibility.  This takes @worker out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 	 * of concurrency management and the next code block will chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 	 * execution of the pending work items.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 	if (unlikely(cpu_intensive))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 		worker_set_flags(worker, WORKER_CPU_INTENSIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 	 * Wake up another worker if necessary.  The condition is always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	 * false for normal per-cpu workers since nr_running would always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 	 * be >= 1 at this point.  This is used to chain execution of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 	 * pending work items for WORKER_NOT_RUNNING workers such as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 	 * UNBOUND and CPU_INTENSIVE ones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 	if (need_more_worker(pool))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 		wake_up_worker(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	 * Record the last pool and clear PENDING which should be the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	 * update to @work.  Also, do this inside @pool->lock so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	 * PENDING and queued state changes happen together while IRQ is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 	 * disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	set_work_pool_and_clear_pending(work, pool->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 	raw_spin_unlock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	lock_map_acquire(&pwq->wq->lockdep_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 	lock_map_acquire(&lockdep_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	 * Strictly speaking we should mark the invariant state without holding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	 * any locks, that is, before these two lock_map_acquire()'s.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	 * However, that would result in:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 	 *   A(W1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	 *   WFC(C)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 	 *		A(W1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 	 *		C(C)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 	 * Which would create W1->C->W1 dependencies, even though there is no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	 * actual deadlock possible. There are two solutions, using a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	 * read-recursive acquire on the work(queue) 'locks', but this will then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 	 * hit the lockdep limitation on recursive locks, or simply discard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	 * these locks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	 * AFAICT there is no possible deadlock scenario between the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	 * flush_work() and complete() primitives (except for single-threaded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 	 * workqueues), so hiding them isn't a problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 	lockdep_invariant_state(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 	trace_workqueue_execute_start(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 	worker->current_func(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	 * While we must be careful to not use "work" after this, the trace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	 * point will only record its address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	trace_workqueue_execute_end(work, worker->current_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 	lock_map_release(&lockdep_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	lock_map_release(&pwq->wq->lockdep_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 	if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 		pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 		       "     last function: %ps\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 		       current->comm, preempt_count(), task_pid_nr(current),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 		       worker->current_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 		debug_show_held_locks(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 		dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 	 * The following prevents a kworker from hogging CPU on !PREEMPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 	 * kernels, where a requeueing work item waiting for something to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	 * happen could deadlock with stop_machine as such work item could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 	 * indefinitely requeue itself while all other CPUs are trapped in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 	 * stop_machine. At the same time, report a quiescent RCU state so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	 * the same condition doesn't freeze RCU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 	raw_spin_lock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 	/* clear cpu intensive status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 	if (unlikely(cpu_intensive))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 		worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	/* tag the worker for identification in schedule() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 	worker->last_func = worker->current_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	/* we're done with it, release */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 	hash_del(&worker->hentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	worker->current_work = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	worker->current_func = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 	worker->current_pwq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 	pwq_dec_nr_in_flight(pwq, work_color);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352)  * process_scheduled_works - process scheduled works
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)  * @worker: self
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355)  * Process all scheduled works.  Please note that the scheduled list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356)  * may change while processing a work, so this function repeatedly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)  * fetches a work from the top and executes it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359)  * CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360)  * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361)  * multiple times.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) static void process_scheduled_works(struct worker *worker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	while (!list_empty(&worker->scheduled)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 		struct work_struct *work = list_first_entry(&worker->scheduled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 						struct work_struct, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 		process_one_work(worker, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) static void set_pf_worker(bool val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	mutex_lock(&wq_pool_attach_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 	if (val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 		current->flags |= PF_WQ_WORKER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 		current->flags &= ~PF_WQ_WORKER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 	mutex_unlock(&wq_pool_attach_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383)  * worker_thread - the worker thread function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384)  * @__worker: self
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386)  * The worker thread function.  All workers belong to a worker_pool -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387)  * either a per-cpu one or dynamic unbound one.  These workers process all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388)  * work items regardless of their specific target workqueue.  The only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389)  * exception is work items which belong to workqueues with a rescuer which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390)  * will be explained in rescuer_thread().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392)  * Return: 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) static int worker_thread(void *__worker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 	struct worker *worker = __worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 	struct worker_pool *pool = worker->pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	/* tell the scheduler that this is a workqueue worker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	set_pf_worker(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) woke_up:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 	raw_spin_lock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	/* am I supposed to die? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 	if (unlikely(worker->flags & WORKER_DIE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 		raw_spin_unlock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 		WARN_ON_ONCE(!list_empty(&worker->entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 		set_pf_worker(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 		set_task_comm(worker->task, "kworker/dying");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 		ida_simple_remove(&pool->worker_ida, worker->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 		worker_detach_from_pool(worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 		kfree(worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 	worker_leave_idle(worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) recheck:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 	/* no more worker necessary? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 	if (!need_more_worker(pool))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 		goto sleep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 	/* do we need to manage? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 	if (unlikely(!may_start_working(pool)) && manage_workers(worker))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 		goto recheck;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 	 * ->scheduled list can only be filled while a worker is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 	 * preparing to process a work or actually processing it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 	 * Make sure nobody diddled with it while I was sleeping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 	WARN_ON_ONCE(!list_empty(&worker->scheduled));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 	 * Finish PREP stage.  We're guaranteed to have at least one idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 	 * worker or that someone else has already assumed the manager
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 	 * role.  This is where @worker starts participating in concurrency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 	 * management if applicable and concurrency management is restored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 	 * after being rebound.  See rebind_workers() for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 	worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 		struct work_struct *work =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 			list_first_entry(&pool->worklist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 					 struct work_struct, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 		pool->watchdog_ts = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 		if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 			/* optimization path, not strictly necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 			process_one_work(worker, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 			if (unlikely(!list_empty(&worker->scheduled)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 				process_scheduled_works(worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 			move_linked_works(work, &worker->scheduled, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 			process_scheduled_works(worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 	} while (keep_working(pool));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 	worker_set_flags(worker, WORKER_PREP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) sleep:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 	 * pool->lock is held and there's no work to process and no need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 	 * manage, sleep.  Workers are woken up only while holding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 	 * pool->lock or from local cpu, so setting the current state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 	 * before releasing pool->lock is enough to prevent losing any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 	 * event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 	worker_enter_idle(worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 	__set_current_state(TASK_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 	raw_spin_unlock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 	schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 	goto woke_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)  * rescuer_thread - the rescuer thread function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479)  * @__rescuer: self
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481)  * Workqueue rescuer thread function.  There's one rescuer for each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482)  * workqueue which has WQ_MEM_RECLAIM set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484)  * Regular work processing on a pool may block trying to create a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485)  * worker which uses GFP_KERNEL allocation which has slight chance of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486)  * developing into deadlock if some works currently on the same queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487)  * need to be processed to satisfy the GFP_KERNEL allocation.  This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488)  * the problem rescuer solves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490)  * When such condition is possible, the pool summons rescuers of all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491)  * workqueues which have works queued on the pool and let them process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492)  * those works so that forward progress can be guaranteed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494)  * This should happen rarely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496)  * Return: 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) static int rescuer_thread(void *__rescuer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 	struct worker *rescuer = __rescuer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 	struct workqueue_struct *wq = rescuer->rescue_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 	struct list_head *scheduled = &rescuer->scheduled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 	bool should_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	set_user_nice(current, RESCUER_NICE_LEVEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	 * Mark rescuer as worker too.  As WORKER_PREP is never cleared, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 	 * doesn't participate in concurrency management.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 	set_pf_worker(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) repeat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 	set_current_state(TASK_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 	 * By the time the rescuer is requested to stop, the workqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	 * shouldn't have any work pending, but @wq->maydays may still have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 	 * pwq(s) queued.  This can happen by non-rescuer workers consuming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	 * all the work items before the rescuer got to them.  Go through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 	 * @wq->maydays processing before acting on should_stop so that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 	 * list is always empty on exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 	should_stop = kthread_should_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	/* see whether any pwq is asking for help */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 	raw_spin_lock_irq(&wq_mayday_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 	while (!list_empty(&wq->maydays)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 		struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 					struct pool_workqueue, mayday_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 		struct worker_pool *pool = pwq->pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 		struct work_struct *work, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 		bool first = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 		__set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 		list_del_init(&pwq->mayday_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 		raw_spin_unlock_irq(&wq_mayday_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 		worker_attach_to_pool(rescuer, pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 		raw_spin_lock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 		 * Slurp in all works issued via this workqueue and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 		 * process'em.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 		WARN_ON_ONCE(!list_empty(scheduled));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 		list_for_each_entry_safe(work, n, &pool->worklist, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 			if (get_work_pwq(work) == pwq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 				if (first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 					pool->watchdog_ts = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 				move_linked_works(work, scheduled, &n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 			first = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 		if (!list_empty(scheduled)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 			process_scheduled_works(rescuer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 			 * The above execution of rescued work items could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 			 * have created more to rescue through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 			 * pwq_activate_first_delayed() or chained
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 			 * queueing.  Let's put @pwq back on mayday list so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 			 * that such back-to-back work items, which may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 			 * being used to relieve memory pressure, don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 			 * incur MAYDAY_INTERVAL delay inbetween.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 			if (pwq->nr_active && need_to_create_worker(pool)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 				raw_spin_lock(&wq_mayday_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 				 * Queue iff we aren't racing destruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 				 * and somebody else hasn't queued it already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 				if (wq->rescuer && list_empty(&pwq->mayday_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 					get_pwq(pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 					list_add_tail(&pwq->mayday_node, &wq->maydays);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 				raw_spin_unlock(&wq_mayday_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 		 * Put the reference grabbed by send_mayday().  @pool won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 		 * go away while we're still attached to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 		put_pwq(pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 		 * Leave this pool.  If need_more_worker() is %true, notify a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 		 * regular worker; otherwise, we end up with 0 concurrency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 		 * and stalling the execution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 		if (need_more_worker(pool))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 			wake_up_worker(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 		raw_spin_unlock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 		worker_detach_from_pool(rescuer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 		raw_spin_lock_irq(&wq_mayday_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 	raw_spin_unlock_irq(&wq_mayday_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 	if (should_stop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 		__set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 		set_pf_worker(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 	/* rescuers should never participate in concurrency management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 	WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 	schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 	goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620)  * check_flush_dependency - check for flush dependency sanity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621)  * @target_wq: workqueue being flushed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)  * @target_work: work item being flushed (NULL for workqueue flushes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624)  * %current is trying to flush the whole @target_wq or @target_work on it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)  * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626)  * reclaiming memory or running on a workqueue which doesn't have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627)  * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628)  * a deadlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) static void check_flush_dependency(struct workqueue_struct *target_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 				   struct work_struct *target_work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 	work_func_t target_func = target_work ? target_work->func : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 	struct worker *worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 	if (target_wq->flags & WQ_MEM_RECLAIM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 	worker = current_wq_worker();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 	WARN_ONCE(current->flags & PF_MEMALLOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 		  "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 		  current->pid, current->comm, target_wq->name, target_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 	WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 			      (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 		  "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 		  worker->current_pwq->wq->name, worker->current_func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 		  target_wq->name, target_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) struct wq_barrier {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 	struct work_struct	work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 	struct completion	done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 	struct task_struct	*task;	/* purely informational */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) static void wq_barrier_func(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 	complete(&barr->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664)  * insert_wq_barrier - insert a barrier work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665)  * @pwq: pwq to insert barrier into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666)  * @barr: wq_barrier to insert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667)  * @target: target work to attach @barr to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668)  * @worker: worker currently executing @target, NULL if @target is not executing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670)  * @barr is linked to @target such that @barr is completed only after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671)  * @target finishes execution.  Please note that the ordering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672)  * guarantee is observed only with respect to @target and on the local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673)  * cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675)  * Currently, a queued barrier can't be canceled.  This is because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676)  * try_to_grab_pending() can't determine whether the work to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677)  * grabbed is at the head of the queue and thus can't clear LINKED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678)  * flag of the previous work while there must be a valid next work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679)  * after a work with LINKED flag set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)  * Note that when @worker is non-NULL, @target may be modified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682)  * underneath us, so we can't reliably determine pwq from @target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684)  * CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685)  * raw_spin_lock_irq(pool->lock).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) static void insert_wq_barrier(struct pool_workqueue *pwq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 			      struct wq_barrier *barr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 			      struct work_struct *target, struct worker *worker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 	struct list_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 	unsigned int linked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 	 * debugobject calls are safe here even with pool->lock locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 	 * as we know for sure that this will not trigger any of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 	 * checks and call back into the fixup functions where we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 	 * might deadlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 	INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 	__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 	init_completion_map(&barr->done, &target->lockdep_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 	barr->task = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 	 * If @target is currently being executed, schedule the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 	 * barrier to the worker; otherwise, put it after @target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 	if (worker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 		head = worker->scheduled.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 		unsigned long *bits = work_data_bits(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 		head = target->entry.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 		/* there can already be other linked works, inherit and set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 		linked = *bits & WORK_STRUCT_LINKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 		__set_bit(WORK_STRUCT_LINKED_BIT, bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 	debug_work_activate(&barr->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 	insert_work(pwq, &barr->work, head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 		    work_color_to_flags(WORK_NO_COLOR) | linked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728)  * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729)  * @wq: workqueue being flushed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730)  * @flush_color: new flush color, < 0 for no-op
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731)  * @work_color: new work color, < 0 for no-op
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)  * Prepare pwqs for workqueue flushing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735)  * If @flush_color is non-negative, flush_color on all pwqs should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736)  * -1.  If no pwq has in-flight commands at the specified color, all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737)  * pwq->flush_color's stay at -1 and %false is returned.  If any pwq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)  * has in flight commands, its pwq->flush_color is set to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739)  * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740)  * wakeup logic is armed and %true is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742)  * The caller should have initialized @wq->first_flusher prior to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743)  * calling this function with non-negative @flush_color.  If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744)  * @flush_color is negative, no flush color update is done and %false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745)  * is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747)  * If @work_color is non-negative, all pwqs should have the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748)  * work_color which is previous to @work_color and all will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749)  * advanced to @work_color.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751)  * CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752)  * mutex_lock(wq->mutex).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755)  * %true if @flush_color >= 0 and there's something to flush.  %false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756)  * otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 				      int flush_color, int work_color)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 	bool wait = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 	struct pool_workqueue *pwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 	if (flush_color >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 		WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 		atomic_set(&wq->nr_pwqs_to_flush, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 	for_each_pwq(pwq, wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 		struct worker_pool *pool = pwq->pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 		raw_spin_lock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 		if (flush_color >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 			WARN_ON_ONCE(pwq->flush_color != -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 			if (pwq->nr_in_flight[flush_color]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 				pwq->flush_color = flush_color;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 				atomic_inc(&wq->nr_pwqs_to_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 				wait = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 		if (work_color >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 			WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 			pwq->work_color = work_color;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 		raw_spin_unlock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 	if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 		complete(&wq->first_flusher->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 	return wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799)  * flush_workqueue - ensure that any scheduled work has run to completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800)  * @wq: workqueue to flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802)  * This function sleeps until all work items which were queued on entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803)  * have finished execution, but it is not livelocked by new incoming ones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) void flush_workqueue(struct workqueue_struct *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 	struct wq_flusher this_flusher = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 		.list = LIST_HEAD_INIT(this_flusher.list),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 		.flush_color = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 		.done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 	int next_color;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 	if (WARN_ON(!wq_online))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 	lock_map_acquire(&wq->lockdep_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 	lock_map_release(&wq->lockdep_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 	mutex_lock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 	 * Start-to-wait phase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 	next_color = work_next_color(wq->work_color);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 	if (next_color != wq->flush_color) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 		 * Color space is not full.  The current work_color
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 		 * becomes our flush_color and work_color is advanced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 		 * by one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 		WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 		this_flusher.flush_color = wq->work_color;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 		wq->work_color = next_color;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 		if (!wq->first_flusher) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 			/* no flush in progress, become the first flusher */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 			WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 			wq->first_flusher = &this_flusher;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 			if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 						       wq->work_color)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 				/* nothing to flush, done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 				wq->flush_color = next_color;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 				wq->first_flusher = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 				goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 			/* wait in queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 			WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 			list_add_tail(&this_flusher.list, &wq->flusher_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 			flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 		 * Oops, color space is full, wait on overflow queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 		 * The next flush completion will assign us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 		 * flush_color and transfer to flusher_queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 		list_add_tail(&this_flusher.list, &wq->flusher_overflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 	check_flush_dependency(wq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 	mutex_unlock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 	wait_for_completion(&this_flusher.done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 	 * Wake-up-and-cascade phase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 	 * First flushers are responsible for cascading flushes and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 	 * handling overflow.  Non-first flushers can simply return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 	if (READ_ONCE(wq->first_flusher) != &this_flusher)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 	mutex_lock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 	/* we might have raced, check again with mutex held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 	if (wq->first_flusher != &this_flusher)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 	WRITE_ONCE(wq->first_flusher, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 	WARN_ON_ONCE(!list_empty(&this_flusher.list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 	WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 	while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 		struct wq_flusher *next, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 		/* complete all the flushers sharing the current flush color */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 		list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 			if (next->flush_color != wq->flush_color)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 			list_del_init(&next->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 			complete(&next->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 		WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 			     wq->flush_color != work_next_color(wq->work_color));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 		/* this flush_color is finished, advance by one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 		wq->flush_color = work_next_color(wq->flush_color);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 		/* one color has been freed, handle overflow queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 		if (!list_empty(&wq->flusher_overflow)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 			 * Assign the same color to all overflowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 			 * flushers, advance work_color and append to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 			 * flusher_queue.  This is the start-to-wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 			 * phase for these overflowed flushers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 			list_for_each_entry(tmp, &wq->flusher_overflow, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 				tmp->flush_color = wq->work_color;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 			wq->work_color = work_next_color(wq->work_color);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 			list_splice_tail_init(&wq->flusher_overflow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 					      &wq->flusher_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 			flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 		if (list_empty(&wq->flusher_queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 			WARN_ON_ONCE(wq->flush_color != wq->work_color);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 		 * Need to flush more colors.  Make the next flusher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 		 * the new first flusher and arm pwqs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 		WARN_ON_ONCE(wq->flush_color == wq->work_color);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 		WARN_ON_ONCE(wq->flush_color != next->flush_color);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 		list_del_init(&next->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 		wq->first_flusher = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 		if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 		 * Meh... this color is already done, clear first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 		 * flusher and repeat cascading.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 		wq->first_flusher = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 	mutex_unlock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) EXPORT_SYMBOL(flush_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957)  * drain_workqueue - drain a workqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958)  * @wq: workqueue to drain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960)  * Wait until the workqueue becomes empty.  While draining is in progress,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961)  * only chain queueing is allowed.  IOW, only currently pending or running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962)  * work items on @wq can queue further work items on it.  @wq is flushed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963)  * repeatedly until it becomes empty.  The number of flushing is determined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964)  * by the depth of chaining and should be relatively short.  Whine if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965)  * takes too long.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) void drain_workqueue(struct workqueue_struct *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 	unsigned int flush_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 	struct pool_workqueue *pwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 	 * __queue_work() needs to test whether there are drainers, is much
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 	 * hotter than drain_workqueue() and already looks at @wq->flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 	 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 	mutex_lock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 	if (!wq->nr_drainers++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 		wq->flags |= __WQ_DRAINING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 	mutex_unlock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) reflush:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 	flush_workqueue(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 	mutex_lock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 	for_each_pwq(pwq, wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 		bool drained;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 		raw_spin_lock_irq(&pwq->pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 		drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 		raw_spin_unlock_irq(&pwq->pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 		if (drained)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 		if (++flush_cnt == 10 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 		    (flush_cnt % 100 == 0 && flush_cnt <= 1000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 			pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 				wq->name, flush_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 		mutex_unlock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 		goto reflush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 	if (!--wq->nr_drainers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 		wq->flags &= ~__WQ_DRAINING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 	mutex_unlock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) EXPORT_SYMBOL_GPL(drain_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 			     bool from_cancel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 	struct worker *worker = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 	struct worker_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 	struct pool_workqueue *pwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 	pool = get_work_pool(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 	if (!pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 	raw_spin_lock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 	/* see the comment in try_to_grab_pending() with the same code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 	pwq = get_work_pwq(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 	if (pwq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 		if (unlikely(pwq->pool != pool))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 			goto already_gone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 		worker = find_worker_executing_work(pool, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 		if (!worker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 			goto already_gone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 		pwq = worker->current_pwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 	check_flush_dependency(pwq->wq, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 	insert_wq_barrier(pwq, barr, work, worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 	raw_spin_unlock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 	 * Force a lock recursion deadlock when using flush_work() inside a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 	 * single-threaded or rescuer equipped workqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 	 * For single threaded workqueues the deadlock happens when the work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 	 * is after the work issuing the flush_work(). For rescuer equipped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 	 * workqueues the deadlock happens when the rescuer stalls, blocking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 	 * forward progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 	if (!from_cancel &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 	    (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 		lock_map_acquire(&pwq->wq->lockdep_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 		lock_map_release(&pwq->wq->lockdep_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) already_gone:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 	raw_spin_unlock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) static bool __flush_work(struct work_struct *work, bool from_cancel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 	struct wq_barrier barr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 	if (WARN_ON(!wq_online))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 	if (WARN_ON(!work->func))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 	if (!from_cancel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 		lock_map_acquire(&work->lockdep_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 		lock_map_release(&work->lockdep_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 	if (start_flush_work(work, &barr, from_cancel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 		wait_for_completion(&barr.done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 		destroy_work_on_stack(&barr.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092)  * flush_work - wait for a work to finish executing the last queueing instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093)  * @work: the work to flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095)  * Wait until @work has finished execution.  @work is guaranteed to be idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096)  * on return if it hasn't been requeued since flush started.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099)  * %true if flush_work() waited for the work to finish execution,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100)  * %false if it was already idle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) bool flush_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 	return __flush_work(work, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) EXPORT_SYMBOL_GPL(flush_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) struct cwt_wait {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 	wait_queue_entry_t		wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 	struct work_struct	*work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 	struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 	if (cwait->work != key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 	return autoremove_wake_function(wait, mode, sync, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 	static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 		ret = try_to_grab_pending(work, is_dwork, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 		 * If someone else is already canceling, wait for it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 		 * finish.  flush_work() doesn't work for PREEMPT_NONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 		 * because we may get scheduled between @work's completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 		 * and the other canceling task resuming and clearing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 		 * CANCELING - flush_work() will return false immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 		 * as @work is no longer busy, try_to_grab_pending() will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 		 * return -ENOENT as @work is still being canceled and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 		 * other canceling task won't be able to clear CANCELING as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 		 * we're hogging the CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 		 * Let's wait for completion using a waitqueue.  As this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 		 * may lead to the thundering herd problem, use a custom
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 		 * wake function which matches @work along with exclusive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 		 * wait and wakeup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 		if (unlikely(ret == -ENOENT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 			struct cwt_wait cwait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 			init_wait(&cwait.wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 			cwait.wait.func = cwt_wakefn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 			cwait.work = work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 			prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 						  TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 			if (work_is_canceling(work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 				schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 			finish_wait(&cancel_waitq, &cwait.wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 	} while (unlikely(ret < 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 	/* tell other tasks trying to grab @work to back off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 	mark_work_canceling(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) 	 * This allows canceling during early boot.  We know that @work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 	 * isn't executing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) 	if (wq_online)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) 		__flush_work(work, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 	clear_work_data(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 	 * Paired with prepare_to_wait() above so that either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 	 * waitqueue_active() is visible here or !work_is_canceling() is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 	 * visible there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 	if (waitqueue_active(&cancel_waitq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 		__wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187)  * cancel_work_sync - cancel a work and wait for it to finish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188)  * @work: the work to cancel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190)  * Cancel @work and wait for its execution to finish.  This function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191)  * can be used even if the work re-queues itself or migrates to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192)  * another workqueue.  On return from this function, @work is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193)  * guaranteed to be not pending or executing on any CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195)  * cancel_work_sync(&delayed_work->work) must not be used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196)  * delayed_work's.  Use cancel_delayed_work_sync() instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198)  * The caller must ensure that the workqueue on which @work was last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199)  * queued can't be destroyed before this function returns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202)  * %true if @work was pending, %false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) bool cancel_work_sync(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 	return __cancel_work_timer(work, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) EXPORT_SYMBOL_GPL(cancel_work_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211)  * flush_delayed_work - wait for a dwork to finish executing the last queueing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212)  * @dwork: the delayed work to flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214)  * Delayed timer is cancelled and the pending work is queued for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215)  * immediate execution.  Like flush_work(), this function only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216)  * considers the last queueing instance of @dwork.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219)  * %true if flush_work() waited for the work to finish execution,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220)  * %false if it was already idle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) bool flush_delayed_work(struct delayed_work *dwork)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 	local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 	if (del_timer_sync(&dwork->timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 		__queue_work(dwork->cpu, dwork->wq, &dwork->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 	local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 	return flush_work(&dwork->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) EXPORT_SYMBOL(flush_delayed_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233)  * flush_rcu_work - wait for a rwork to finish executing the last queueing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234)  * @rwork: the rcu work to flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237)  * %true if flush_rcu_work() waited for the work to finish execution,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238)  * %false if it was already idle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) bool flush_rcu_work(struct rcu_work *rwork)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 	if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 		rcu_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 		flush_work(&rwork->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 		return flush_work(&rwork->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) EXPORT_SYMBOL(flush_rcu_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) static bool __cancel_work(struct work_struct *work, bool is_dwork)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 		ret = try_to_grab_pending(work, is_dwork, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 	} while (unlikely(ret == -EAGAIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 	if (unlikely(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) 	set_work_pool_and_clear_pending(work, get_work_pool_id(work));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270)  * cancel_delayed_work - cancel a delayed work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271)  * @dwork: delayed_work to cancel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273)  * Kill off a pending delayed_work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275)  * Return: %true if @dwork was pending and canceled; %false if it wasn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276)  * pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278)  * Note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279)  * The work callback function may still be running on return, unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280)  * it returns %true and the work doesn't re-arm itself.  Explicitly flush or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281)  * use cancel_delayed_work_sync() to wait on it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283)  * This function is safe to call from any context including IRQ handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) bool cancel_delayed_work(struct delayed_work *dwork)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 	return __cancel_work(&dwork->work, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) EXPORT_SYMBOL(cancel_delayed_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292)  * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293)  * @dwork: the delayed work cancel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295)  * This is cancel_work_sync() for delayed works.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298)  * %true if @dwork was pending, %false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) bool cancel_delayed_work_sync(struct delayed_work *dwork)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) 	return __cancel_work_timer(&dwork->work, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) EXPORT_SYMBOL(cancel_delayed_work_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307)  * schedule_on_each_cpu - execute a function synchronously on each online CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308)  * @func: the function to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310)  * schedule_on_each_cpu() executes @func on each online CPU using the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311)  * system workqueue and blocks until all CPUs have completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312)  * schedule_on_each_cpu() is very slow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315)  * 0 on success, -errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) int schedule_on_each_cpu(work_func_t func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) 	struct work_struct __percpu *works;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 	works = alloc_percpu(struct work_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 	if (!works)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 	get_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 	for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 		struct work_struct *work = per_cpu_ptr(works, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 		INIT_WORK(work, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 		schedule_work_on(cpu, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 	for_each_online_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) 		flush_work(per_cpu_ptr(works, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) 	put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) 	free_percpu(works);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344)  * execute_in_process_context - reliably execute the routine with user context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345)  * @fn:		the function to execute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346)  * @ew:		guaranteed storage for the execute work structure (must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347)  *		be available when the work executes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349)  * Executes the function immediately if process context is available,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350)  * otherwise schedules the function for delayed execution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352)  * Return:	0 - function was executed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353)  *		1 - function was scheduled for execution
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) int execute_in_process_context(work_func_t fn, struct execute_work *ew)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) 	if (!in_interrupt()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) 		fn(&ew->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) 	INIT_WORK(&ew->work, fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) 	schedule_work(&ew->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) EXPORT_SYMBOL_GPL(execute_in_process_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370)  * free_workqueue_attrs - free a workqueue_attrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371)  * @attrs: workqueue_attrs to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373)  * Undo alloc_workqueue_attrs().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) void free_workqueue_attrs(struct workqueue_attrs *attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) 	if (attrs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) 		free_cpumask_var(attrs->cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) 		kfree(attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384)  * alloc_workqueue_attrs - allocate a workqueue_attrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386)  * Allocate a new workqueue_attrs, initialize with default settings and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387)  * return it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389)  * Return: The allocated new workqueue_attr on success. %NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) struct workqueue_attrs *alloc_workqueue_attrs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 	struct workqueue_attrs *attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 	attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) 	if (!attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) 	if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) 	cpumask_copy(attrs->cpumask, cpu_possible_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 	return attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) 	free_workqueue_attrs(attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) static void copy_workqueue_attrs(struct workqueue_attrs *to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) 				 const struct workqueue_attrs *from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 	to->nice = from->nice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) 	cpumask_copy(to->cpumask, from->cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) 	 * Unlike hash and equality test, this function doesn't ignore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) 	 * ->no_numa as it is used for both pool and wq attrs.  Instead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) 	 * get_unbound_pool() explicitly clears ->no_numa after copying.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) 	to->no_numa = from->no_numa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) /* hash value of the content of @attr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) 	u32 hash = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) 	hash = jhash_1word(attrs->nice, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) 	hash = jhash(cpumask_bits(attrs->cpumask),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) 		     BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) 	return hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) /* content equality test */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) static bool wqattrs_equal(const struct workqueue_attrs *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) 			  const struct workqueue_attrs *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) 	if (a->nice != b->nice)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) 	if (!cpumask_equal(a->cpumask, b->cpumask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444)  * init_worker_pool - initialize a newly zalloc'd worker_pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445)  * @pool: worker_pool to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447)  * Initialize a newly zalloc'd @pool.  It also allocates @pool->attrs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449)  * Return: 0 on success, -errno on failure.  Even on failure, all fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450)  * inside @pool proper are initialized and put_unbound_pool() can be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451)  * on @pool safely to release it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) static int init_worker_pool(struct worker_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) 	raw_spin_lock_init(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) 	pool->id = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) 	pool->cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) 	pool->node = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) 	pool->flags |= POOL_DISASSOCIATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) 	pool->watchdog_ts = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) 	INIT_LIST_HEAD(&pool->worklist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) 	INIT_LIST_HEAD(&pool->idle_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) 	hash_init(pool->busy_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) 	timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) 	timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 	INIT_LIST_HEAD(&pool->workers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) 	ida_init(&pool->worker_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 	INIT_HLIST_NODE(&pool->hash_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 	pool->refcnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 	/* shouldn't fail above this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) 	pool->attrs = alloc_workqueue_attrs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) 	if (!pool->attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) #ifdef CONFIG_LOCKDEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) static void wq_init_lockdep(struct workqueue_struct *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) 	char *lock_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) 	lockdep_register_key(&wq->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) 	lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) 	if (!lock_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) 		lock_name = wq->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) 	wq->lock_name = lock_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) 	lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) static void wq_unregister_lockdep(struct workqueue_struct *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) 	lockdep_unregister_key(&wq->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) static void wq_free_lockdep(struct workqueue_struct *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 	if (wq->lock_name != wq->name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) 		kfree(wq->lock_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) static void wq_init_lockdep(struct workqueue_struct *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) static void wq_unregister_lockdep(struct workqueue_struct *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) static void wq_free_lockdep(struct workqueue_struct *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) static void rcu_free_wq(struct rcu_head *rcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) 	struct workqueue_struct *wq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 		container_of(rcu, struct workqueue_struct, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 	wq_free_lockdep(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) 	if (!(wq->flags & WQ_UNBOUND))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) 		free_percpu(wq->cpu_pwqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) 		free_workqueue_attrs(wq->unbound_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) 	kfree(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) static void rcu_free_pool(struct rcu_head *rcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) 	struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) 	ida_destroy(&pool->worker_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) 	free_workqueue_attrs(pool->attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) 	kfree(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) /* This returns with the lock held on success (pool manager is inactive). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) static bool wq_manager_inactive(struct worker_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) 	raw_spin_lock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) 	if (pool->flags & POOL_MANAGER_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) 		raw_spin_unlock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557)  * put_unbound_pool - put a worker_pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558)  * @pool: worker_pool to put
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560)  * Put @pool.  If its refcnt reaches zero, it gets destroyed in RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561)  * safe manner.  get_unbound_pool() calls this function on its failure path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562)  * and this function should be able to release pools which went through,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563)  * successfully or not, init_worker_pool().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565)  * Should be called with wq_pool_mutex held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) static void put_unbound_pool(struct worker_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) 	DECLARE_COMPLETION_ONSTACK(detach_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) 	struct worker *worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) 	lockdep_assert_held(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) 	if (--pool->refcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) 	/* sanity checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) 	if (WARN_ON(!(pool->cpu < 0)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) 	    WARN_ON(!list_empty(&pool->worklist)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) 	/* release id and unhash */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) 	if (pool->id >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) 		idr_remove(&worker_pool_idr, pool->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) 	hash_del(&pool->hash_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) 	 * Become the manager and destroy all workers.  This prevents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) 	 * @pool's workers from blocking on attach_mutex.  We're the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) 	 * manager and @pool gets freed with the flag set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) 	 * Because of how wq_manager_inactive() works, we will hold the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) 	 * spinlock after a successful wait.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) 	rcuwait_wait_event(&manager_wait, wq_manager_inactive(pool),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) 			   TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) 	pool->flags |= POOL_MANAGER_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) 	while ((worker = first_idle_worker(pool)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) 		destroy_worker(worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) 	WARN_ON(pool->nr_workers || pool->nr_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) 	raw_spin_unlock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) 	mutex_lock(&wq_pool_attach_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) 	if (!list_empty(&pool->workers))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) 		pool->detach_completion = &detach_completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) 	mutex_unlock(&wq_pool_attach_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) 	if (pool->detach_completion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) 		wait_for_completion(pool->detach_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) 	/* shut down the timers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) 	del_timer_sync(&pool->idle_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) 	del_timer_sync(&pool->mayday_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) 	/* RCU protected to allow dereferences from get_work_pool() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) 	call_rcu(&pool->rcu, rcu_free_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620)  * get_unbound_pool - get a worker_pool with the specified attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621)  * @attrs: the attributes of the worker_pool to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623)  * Obtain a worker_pool which has the same attributes as @attrs, bump the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624)  * reference count and return it.  If there already is a matching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625)  * worker_pool, it will be used; otherwise, this function attempts to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626)  * create a new one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628)  * Should be called with wq_pool_mutex held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630)  * Return: On success, a worker_pool with the same attributes as @attrs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631)  * On failure, %NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) 	u32 hash = wqattrs_hash(attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) 	struct worker_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) 	int node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) 	int target_node = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) 	lockdep_assert_held(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) 	/* do we already have a matching pool? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) 	hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) 		if (wqattrs_equal(pool->attrs, attrs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) 			pool->refcnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) 			return pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) 	/* if cpumask is contained inside a NUMA node, we belong to that node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) 	if (wq_numa_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) 		for_each_node(node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) 			if (cpumask_subset(attrs->cpumask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) 					   wq_numa_possible_cpumask[node])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) 				target_node = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) 	/* nope, create a new one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) 	pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) 	if (!pool || init_worker_pool(pool) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) 	lockdep_set_subclass(&pool->lock, 1);	/* see put_pwq() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) 	copy_workqueue_attrs(pool->attrs, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) 	pool->node = target_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) 	 * no_numa isn't a worker_pool attribute, always clear it.  See
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) 	 * 'struct workqueue_attrs' comments for detail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) 	pool->attrs->no_numa = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) 	if (worker_pool_assign_id(pool) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) 	/* create and start the initial worker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) 	if (wq_online && !create_worker(pool))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) 	/* install */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) 	hash_add(unbound_pool_hash, &pool->hash_node, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) 	return pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) 	if (pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) 		put_unbound_pool(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) static void rcu_free_pwq(struct rcu_head *rcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) 	kmem_cache_free(pwq_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) 			container_of(rcu, struct pool_workqueue, rcu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700)  * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701)  * and needs to be destroyed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) static void pwq_unbound_release_workfn(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) 	struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) 						  unbound_release_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) 	struct workqueue_struct *wq = pwq->wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) 	struct worker_pool *pool = pwq->pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) 	bool is_last = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) 	 * when @pwq is not linked, it doesn't hold any reference to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) 	 * @wq, and @wq is invalid to access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) 	if (!list_empty(&pwq->pwqs_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) 		if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) 		mutex_lock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) 		list_del_rcu(&pwq->pwqs_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) 		is_last = list_empty(&wq->pwqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) 		mutex_unlock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) 	mutex_lock(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) 	put_unbound_pool(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) 	mutex_unlock(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) 	call_rcu(&pwq->rcu, rcu_free_pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) 	 * If we're the last pwq going away, @wq is already dead and no one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) 	 * is gonna access it anymore.  Schedule RCU free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) 	if (is_last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) 		wq_unregister_lockdep(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) 		call_rcu(&wq->rcu, rcu_free_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742)  * pwq_adjust_max_active - update a pwq's max_active to the current setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743)  * @pwq: target pool_workqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745)  * If @pwq isn't freezing, set @pwq->max_active to the associated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746)  * workqueue's saved_max_active and activate delayed work items
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747)  * accordingly.  If @pwq is freezing, clear @pwq->max_active to zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) static void pwq_adjust_max_active(struct pool_workqueue *pwq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) 	struct workqueue_struct *wq = pwq->wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) 	bool freezable = wq->flags & WQ_FREEZABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) 	/* for @wq->saved_max_active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) 	lockdep_assert_held(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) 	/* fast exit for non-freezable wqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) 	if (!freezable && pwq->max_active == wq->saved_max_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) 	/* this function can be called during early boot w/ irq disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) 	raw_spin_lock_irqsave(&pwq->pool->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) 	 * During [un]freezing, the caller is responsible for ensuring that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) 	 * this function is called at least once after @workqueue_freezing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) 	 * is updated and visible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) 	if (!freezable || !workqueue_freezing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) 		bool kick = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) 		pwq->max_active = wq->saved_max_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) 		while (!list_empty(&pwq->delayed_works) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) 		       pwq->nr_active < pwq->max_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) 			pwq_activate_first_delayed(pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) 			kick = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) 		 * Need to kick a worker after thawed or an unbound wq's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) 		 * max_active is bumped. In realtime scenarios, always kicking a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) 		 * worker will cause interference on the isolated cpu cores, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) 		 * let's kick iff work items were activated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) 		if (kick)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) 			wake_up_worker(pwq->pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) 		pwq->max_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) 	raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) /* initialize newly alloced @pwq which is associated with @wq and @pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) 		     struct worker_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) 	BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) 	memset(pwq, 0, sizeof(*pwq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) 	pwq->pool = pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) 	pwq->wq = wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) 	pwq->flush_color = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) 	pwq->refcnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) 	INIT_LIST_HEAD(&pwq->delayed_works);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) 	INIT_LIST_HEAD(&pwq->pwqs_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) 	INIT_LIST_HEAD(&pwq->mayday_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) 	INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) /* sync @pwq with the current state of its associated wq and link it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) static void link_pwq(struct pool_workqueue *pwq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) 	struct workqueue_struct *wq = pwq->wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) 	lockdep_assert_held(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) 	/* may be called multiple times, ignore if already linked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) 	if (!list_empty(&pwq->pwqs_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) 	/* set the matching work_color */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) 	pwq->work_color = wq->work_color;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) 	/* sync max_active to the current setting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) 	pwq_adjust_max_active(pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) 	/* link in @pwq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) 	list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) /* obtain a pool matching @attr and create a pwq associating the pool and @wq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) 					const struct workqueue_attrs *attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) 	struct worker_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) 	struct pool_workqueue *pwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) 	lockdep_assert_held(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) 	pool = get_unbound_pool(attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) 	if (!pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) 	pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) 	if (!pwq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) 		put_unbound_pool(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) 	init_pwq(pwq, wq, pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) 	return pwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859)  * wq_calc_node_cpumask - calculate a wq_attrs' cpumask for the specified node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860)  * @attrs: the wq_attrs of the default pwq of the target workqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861)  * @node: the target NUMA node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862)  * @cpu_going_down: if >= 0, the CPU to consider as offline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863)  * @cpumask: outarg, the resulting cpumask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865)  * Calculate the cpumask a workqueue with @attrs should use on @node.  If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866)  * @cpu_going_down is >= 0, that cpu is considered offline during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867)  * calculation.  The result is stored in @cpumask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869)  * If NUMA affinity is not enabled, @attrs->cpumask is always used.  If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870)  * enabled and @node has online CPUs requested by @attrs, the returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871)  * cpumask is the intersection of the possible CPUs of @node and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872)  * @attrs->cpumask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874)  * The caller is responsible for ensuring that the cpumask of @node stays
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875)  * stable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877)  * Return: %true if the resulting @cpumask is different from @attrs->cpumask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878)  * %false if equal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) 				 int cpu_going_down, cpumask_t *cpumask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) 	if (!wq_numa_enabled || attrs->no_numa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) 		goto use_dfl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) 	/* does @node have any online CPUs @attrs wants? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) 	cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) 	if (cpu_going_down >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) 		cpumask_clear_cpu(cpu_going_down, cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) 	if (cpumask_empty(cpumask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) 		goto use_dfl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) 	/* yeap, return possible CPUs in @node that @attrs wants */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) 	cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) 	if (cpumask_empty(cpumask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) 		pr_warn_once("WARNING: workqueue cpumask: online intersect > "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) 				"possible intersect\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) 	return !cpumask_equal(cpumask, attrs->cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) use_dfl:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) 	cpumask_copy(cpumask, attrs->cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) /* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) 						   int node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) 						   struct pool_workqueue *pwq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) 	struct pool_workqueue *old_pwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) 	lockdep_assert_held(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) 	lockdep_assert_held(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) 	/* link_pwq() can handle duplicate calls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) 	link_pwq(pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) 	old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) 	rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) 	return old_pwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) /* context to store the prepared attrs & pwqs before applying */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) struct apply_wqattrs_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) 	struct workqueue_struct	*wq;		/* target workqueue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) 	struct workqueue_attrs	*attrs;		/* attrs to apply */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) 	struct list_head	list;		/* queued for batching commit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) 	struct pool_workqueue	*dfl_pwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) 	struct pool_workqueue	*pwq_tbl[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) /* free the resources after success or abort */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) 	if (ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) 		int node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) 		for_each_node(node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) 			put_pwq_unlocked(ctx->pwq_tbl[node]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) 		put_pwq_unlocked(ctx->dfl_pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) 		free_workqueue_attrs(ctx->attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) 		kfree(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) /* allocate the attrs and pwqs for later installation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) static struct apply_wqattrs_ctx *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) apply_wqattrs_prepare(struct workqueue_struct *wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) 		      const struct workqueue_attrs *attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) 	struct apply_wqattrs_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) 	struct workqueue_attrs *new_attrs, *tmp_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) 	int node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) 	lockdep_assert_held(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) 	ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) 	new_attrs = alloc_workqueue_attrs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) 	tmp_attrs = alloc_workqueue_attrs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) 	if (!ctx || !new_attrs || !tmp_attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) 	 * Calculate the attrs of the default pwq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) 	 * If the user configured cpumask doesn't overlap with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) 	 * wq_unbound_cpumask, we fallback to the wq_unbound_cpumask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) 	copy_workqueue_attrs(new_attrs, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) 	cpumask_and(new_attrs->cpumask, new_attrs->cpumask, wq_unbound_cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) 	if (unlikely(cpumask_empty(new_attrs->cpumask)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) 		cpumask_copy(new_attrs->cpumask, wq_unbound_cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) 	 * We may create multiple pwqs with differing cpumasks.  Make a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) 	 * copy of @new_attrs which will be modified and used to obtain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) 	 * pools.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) 	copy_workqueue_attrs(tmp_attrs, new_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) 	 * If something goes wrong during CPU up/down, we'll fall back to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) 	 * the default pwq covering whole @attrs->cpumask.  Always create
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) 	 * it even if we don't use it immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) 	ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) 	if (!ctx->dfl_pwq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) 	for_each_node(node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) 		if (wq_calc_node_cpumask(new_attrs, node, -1, tmp_attrs->cpumask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) 			ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) 			if (!ctx->pwq_tbl[node])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) 				goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) 			ctx->dfl_pwq->refcnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) 			ctx->pwq_tbl[node] = ctx->dfl_pwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) 	/* save the user configured attrs and sanitize it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) 	copy_workqueue_attrs(new_attrs, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) 	cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) 	ctx->attrs = new_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) 	ctx->wq = wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) 	free_workqueue_attrs(tmp_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) 	return ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) 	free_workqueue_attrs(tmp_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) 	free_workqueue_attrs(new_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) 	apply_wqattrs_cleanup(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) /* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) 	int node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) 	/* all pwqs have been created successfully, let's install'em */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) 	mutex_lock(&ctx->wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) 	copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) 	/* save the previous pwq and install the new one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) 	for_each_node(node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) 		ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) 							  ctx->pwq_tbl[node]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) 	/* @dfl_pwq might not have been used, ensure it's linked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) 	link_pwq(ctx->dfl_pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) 	swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) 	mutex_unlock(&ctx->wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) static void apply_wqattrs_lock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) 	/* CPUs should stay stable across pwq creations and installations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) 	get_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) 	mutex_lock(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) static void apply_wqattrs_unlock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) 	mutex_unlock(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) 	put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) 					const struct workqueue_attrs *attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) 	struct apply_wqattrs_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) 	/* only unbound workqueues can change attributes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) 	if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) 	/* creating multiple pwqs breaks ordering guarantee */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) 	if (!list_empty(&wq->pwqs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) 		if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) 		wq->flags &= ~__WQ_ORDERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) 	ctx = apply_wqattrs_prepare(wq, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) 	if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) 	/* the ctx has been prepared successfully, let's commit it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) 	apply_wqattrs_commit(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) 	apply_wqattrs_cleanup(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088)  * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089)  * @wq: the target workqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090)  * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092)  * Apply @attrs to an unbound workqueue @wq.  Unless disabled, on NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093)  * machines, this function maps a separate pwq to each NUMA node with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094)  * possibles CPUs in @attrs->cpumask so that work items are affine to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095)  * NUMA node it was issued on.  Older pwqs are released as in-flight work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096)  * items finish.  Note that a work item which repeatedly requeues itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097)  * back-to-back will stay on its current pwq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099)  * Performs GFP_KERNEL allocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101)  * Assumes caller has CPU hotplug read exclusion, i.e. get_online_cpus().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103)  * Return: 0 on success and -errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) int apply_workqueue_attrs(struct workqueue_struct *wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) 			  const struct workqueue_attrs *attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) 	lockdep_assert_cpus_held();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) 	mutex_lock(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) 	ret = apply_workqueue_attrs_locked(wq, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) 	mutex_unlock(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120)  * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121)  * @wq: the target workqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122)  * @cpu: the CPU coming up or going down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123)  * @online: whether @cpu is coming up or going down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125)  * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126)  * %CPU_DOWN_FAILED.  @cpu is being hot[un]plugged, update NUMA affinity of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127)  * @wq accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129)  * If NUMA affinity can't be adjusted due to memory allocation failure, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130)  * falls back to @wq->dfl_pwq which may not be optimal but is always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131)  * correct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133)  * Note that when the last allowed CPU of a NUMA node goes offline for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134)  * workqueue with a cpumask spanning multiple nodes, the workers which were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135)  * already executing the work items for the workqueue will lose their CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136)  * affinity and may execute on any CPU.  This is similar to how per-cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137)  * workqueues behave on CPU_DOWN.  If a workqueue user wants strict
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138)  * affinity, it's the user's responsibility to flush the work item from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139)  * CPU_DOWN_PREPARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) 				   bool online)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) 	int node = cpu_to_node(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) 	int cpu_off = online ? -1 : cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) 	struct pool_workqueue *old_pwq = NULL, *pwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) 	struct workqueue_attrs *target_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) 	cpumask_t *cpumask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) 	lockdep_assert_held(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) 	if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) 	    wq->unbound_attrs->no_numa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) 	 * We don't wanna alloc/free wq_attrs for each wq for each CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) 	 * Let's use a preallocated one.  The following buf is protected by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) 	 * CPU hotplug exclusion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) 	target_attrs = wq_update_unbound_numa_attrs_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) 	cpumask = target_attrs->cpumask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) 	copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) 	pwq = unbound_pwq_by_node(wq, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) 	 * Let's determine what needs to be done.  If the target cpumask is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) 	 * different from the default pwq's, we need to compare it to @pwq's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) 	 * and create a new one if they don't match.  If the target cpumask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) 	 * equals the default pwq's, the default pwq should be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) 	if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) 		if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) 		goto use_dfl_pwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) 	/* create a new pwq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) 	pwq = alloc_unbound_pwq(wq, target_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) 	if (!pwq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) 		pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) 			wq->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) 		goto use_dfl_pwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) 	/* Install the new pwq. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) 	mutex_lock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) 	old_pwq = numa_pwq_tbl_install(wq, node, pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) 	goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) use_dfl_pwq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) 	mutex_lock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) 	raw_spin_lock_irq(&wq->dfl_pwq->pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) 	get_pwq(wq->dfl_pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) 	raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) 	old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) 	mutex_unlock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) 	put_pwq_unlocked(old_pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) static int alloc_and_link_pwqs(struct workqueue_struct *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) 	bool highpri = wq->flags & WQ_HIGHPRI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) 	int cpu, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) 	if (!(wq->flags & WQ_UNBOUND)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) 		wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) 		if (!wq->cpu_pwqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) 		for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) 			struct pool_workqueue *pwq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) 				per_cpu_ptr(wq->cpu_pwqs, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) 			struct worker_pool *cpu_pools =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) 				per_cpu(cpu_worker_pools, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) 			init_pwq(pwq, wq, &cpu_pools[highpri]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) 			mutex_lock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) 			link_pwq(pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) 			mutex_unlock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) 	get_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) 	if (wq->flags & __WQ_ORDERED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) 		ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) 		/* there should only be single pwq for ordering guarantee */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) 		WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) 			      wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) 		     "ordering guarantee broken for workqueue %s\n", wq->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) 		ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) 	put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) static int wq_clamp_max_active(int max_active, unsigned int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) 			       const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) 	int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) 	if (max_active < 1 || max_active > lim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) 		pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) 			max_active, name, 1, lim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) 	return clamp_val(max_active, 1, lim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257)  * Workqueues which may be used during memory reclaim should have a rescuer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258)  * to guarantee forward progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) static int init_rescuer(struct workqueue_struct *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) 	struct worker *rescuer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) 	if (!(wq->flags & WQ_MEM_RECLAIM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) 	rescuer = alloc_worker(NUMA_NO_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) 	if (!rescuer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) 	rescuer->rescue_wq = wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) 	rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) 	if (IS_ERR(rescuer->task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) 		ret = PTR_ERR(rescuer->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) 		kfree(rescuer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) 	wq->rescuer = rescuer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) 	kthread_bind_mask(rescuer->task, cpu_possible_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) 	wake_up_process(rescuer->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) __printf(1, 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) struct workqueue_struct *alloc_workqueue(const char *fmt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) 					 unsigned int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) 					 int max_active, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) 	size_t tbl_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) 	va_list args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) 	struct workqueue_struct *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) 	struct pool_workqueue *pwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) 	 * Unbound && max_active == 1 used to imply ordered, which is no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) 	 * longer the case on NUMA machines due to per-node pools.  While
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) 	 * alloc_ordered_workqueue() is the right way to create an ordered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) 	 * workqueue, keep the previous behavior to avoid subtle breakages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) 	 * on NUMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) 	if ((flags & WQ_UNBOUND) && max_active == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) 		flags |= __WQ_ORDERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) 	/* see the comment above the definition of WQ_POWER_EFFICIENT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) 	if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) 		flags |= WQ_UNBOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) 	/* allocate wq and format name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) 	if (flags & WQ_UNBOUND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) 		tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) 	wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) 	if (!wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) 	if (flags & WQ_UNBOUND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) 		wq->unbound_attrs = alloc_workqueue_attrs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) 		if (!wq->unbound_attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) 			goto err_free_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) 	va_start(args, max_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) 	vsnprintf(wq->name, sizeof(wq->name), fmt, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) 	va_end(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) 	max_active = max_active ?: WQ_DFL_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) 	max_active = wq_clamp_max_active(max_active, flags, wq->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) 	/* init wq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) 	wq->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) 	wq->saved_max_active = max_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) 	mutex_init(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) 	atomic_set(&wq->nr_pwqs_to_flush, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) 	INIT_LIST_HEAD(&wq->pwqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) 	INIT_LIST_HEAD(&wq->flusher_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) 	INIT_LIST_HEAD(&wq->flusher_overflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) 	INIT_LIST_HEAD(&wq->maydays);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) 	wq_init_lockdep(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) 	INIT_LIST_HEAD(&wq->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) 	if (alloc_and_link_pwqs(wq) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) 		goto err_unreg_lockdep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) 	if (wq_online && init_rescuer(wq) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) 		goto err_destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) 	if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) 		goto err_destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) 	 * wq_pool_mutex protects global freeze state and workqueues list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) 	 * Grab it, adjust max_active and add the new @wq to workqueues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) 	 * list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) 	mutex_lock(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) 	mutex_lock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) 	for_each_pwq(pwq, wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) 		pwq_adjust_max_active(pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) 	mutex_unlock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) 	list_add_tail_rcu(&wq->list, &workqueues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) 	mutex_unlock(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) 	return wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) err_unreg_lockdep:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) 	wq_unregister_lockdep(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) 	wq_free_lockdep(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) err_free_wq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) 	free_workqueue_attrs(wq->unbound_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) 	kfree(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) err_destroy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) 	destroy_workqueue(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) EXPORT_SYMBOL_GPL(alloc_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) static bool pwq_busy(struct pool_workqueue *pwq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) 	for (i = 0; i < WORK_NR_COLORS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) 		if (pwq->nr_in_flight[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) 	if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) 	if (pwq->nr_active || !list_empty(&pwq->delayed_works))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402)  * destroy_workqueue - safely terminate a workqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403)  * @wq: target workqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405)  * Safely destroy a workqueue. All work currently pending will be done first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) void destroy_workqueue(struct workqueue_struct *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) 	struct pool_workqueue *pwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) 	int node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) 	 * Remove it from sysfs first so that sanity check failure doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) 	 * lead to sysfs name conflicts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) 	workqueue_sysfs_unregister(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) 	/* drain it before proceeding with destruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) 	drain_workqueue(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) 	/* kill rescuer, if sanity checks fail, leave it w/o rescuer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) 	if (wq->rescuer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) 		struct worker *rescuer = wq->rescuer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) 		/* this prevents new queueing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) 		raw_spin_lock_irq(&wq_mayday_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) 		wq->rescuer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) 		raw_spin_unlock_irq(&wq_mayday_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) 		/* rescuer will empty maydays list before exiting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) 		kthread_stop(rescuer->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) 		kfree(rescuer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) 	 * Sanity checks - grab all the locks so that we wait for all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) 	 * in-flight operations which may do put_pwq().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) 	mutex_lock(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) 	mutex_lock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) 	for_each_pwq(pwq, wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) 		raw_spin_lock_irq(&pwq->pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) 		if (WARN_ON(pwq_busy(pwq))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) 			pr_warn("%s: %s has the following busy pwq\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) 				__func__, wq->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) 			show_pwq(pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) 			raw_spin_unlock_irq(&pwq->pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) 			mutex_unlock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) 			mutex_unlock(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) 			show_workqueue_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) 		raw_spin_unlock_irq(&pwq->pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) 	mutex_unlock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) 	 * wq list is used to freeze wq, remove from list after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) 	 * flushing is complete in case freeze races us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) 	list_del_rcu(&wq->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) 	mutex_unlock(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) 	if (!(wq->flags & WQ_UNBOUND)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) 		wq_unregister_lockdep(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) 		 * The base ref is never dropped on per-cpu pwqs.  Directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) 		 * schedule RCU free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) 		call_rcu(&wq->rcu, rcu_free_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) 		 * We're the sole accessor of @wq at this point.  Directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) 		 * access numa_pwq_tbl[] and dfl_pwq to put the base refs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) 		 * @wq will be freed when the last pwq is released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) 		for_each_node(node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) 			pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) 			RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) 			put_pwq_unlocked(pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) 		 * Put dfl_pwq.  @wq may be freed any time after dfl_pwq is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) 		 * put.  Don't access it afterwards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) 		pwq = wq->dfl_pwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) 		wq->dfl_pwq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) 		put_pwq_unlocked(pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) EXPORT_SYMBOL_GPL(destroy_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495)  * workqueue_set_max_active - adjust max_active of a workqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496)  * @wq: target workqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497)  * @max_active: new max_active value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499)  * Set max_active of @wq to @max_active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501)  * CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502)  * Don't call from IRQ context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) 	struct pool_workqueue *pwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) 	/* disallow meddling with max_active for ordered workqueues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) 	if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) 	max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) 	mutex_lock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) 	wq->flags &= ~__WQ_ORDERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) 	wq->saved_max_active = max_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) 	for_each_pwq(pwq, wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) 		pwq_adjust_max_active(pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) 	mutex_unlock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) EXPORT_SYMBOL_GPL(workqueue_set_max_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527)  * current_work - retrieve %current task's work struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529)  * Determine if %current task is a workqueue worker and what it's working on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530)  * Useful to find out the context that the %current task is running in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532)  * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) struct work_struct *current_work(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) 	struct worker *worker = current_wq_worker();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) 	return worker ? worker->current_work : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) EXPORT_SYMBOL(current_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543)  * current_is_workqueue_rescuer - is %current workqueue rescuer?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545)  * Determine whether %current is a workqueue rescuer.  Can be used from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546)  * work functions to determine whether it's being run off the rescuer task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548)  * Return: %true if %current is a workqueue rescuer. %false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) bool current_is_workqueue_rescuer(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) 	struct worker *worker = current_wq_worker();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) 	return worker && worker->rescue_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558)  * workqueue_congested - test whether a workqueue is congested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559)  * @cpu: CPU in question
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560)  * @wq: target workqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562)  * Test whether @wq's cpu workqueue for @cpu is congested.  There is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563)  * no synchronization around this function and the test result is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564)  * unreliable and only useful as advisory hints or for debugging.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566)  * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567)  * Note that both per-cpu and unbound workqueues may be associated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568)  * multiple pool_workqueues which have separate congested states.  A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569)  * workqueue being congested on one CPU doesn't mean the workqueue is also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570)  * contested on other CPUs / NUMA nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573)  * %true if congested, %false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) bool workqueue_congested(int cpu, struct workqueue_struct *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) 	struct pool_workqueue *pwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) 	bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) 	if (cpu == WORK_CPU_UNBOUND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) 		cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) 	if (!(wq->flags & WQ_UNBOUND))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) 		pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) 		pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) 	ret = !list_empty(&pwq->delayed_works);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) EXPORT_SYMBOL_GPL(workqueue_congested);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600)  * work_busy - test whether a work is currently pending or running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601)  * @work: the work to be tested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603)  * Test whether @work is currently pending or running.  There is no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604)  * synchronization around this function and the test result is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605)  * unreliable and only useful as advisory hints or for debugging.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608)  * OR'd bitmask of WORK_BUSY_* bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) unsigned int work_busy(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) 	struct worker_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) 	unsigned int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) 	if (work_pending(work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) 		ret |= WORK_BUSY_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) 	pool = get_work_pool(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) 	if (pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) 		raw_spin_lock_irqsave(&pool->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) 		if (find_worker_executing_work(pool, work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) 			ret |= WORK_BUSY_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) 		raw_spin_unlock_irqrestore(&pool->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) EXPORT_SYMBOL_GPL(work_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634)  * set_worker_desc - set description for the current work item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635)  * @fmt: printf-style format string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636)  * @...: arguments for the format string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638)  * This function can be called by a running work function to describe what
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639)  * the work item is about.  If the worker task gets dumped, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640)  * information will be printed out together to help debugging.  The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641)  * description can be at most WORKER_DESC_LEN including the trailing '\0'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) void set_worker_desc(const char *fmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) 	struct worker *worker = current_wq_worker();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) 	va_list args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) 	if (worker) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) 		va_start(args, fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) 		vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) 		va_end(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) EXPORT_SYMBOL_GPL(set_worker_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657)  * print_worker_info - print out worker information and description
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658)  * @log_lvl: the log level to use when printing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659)  * @task: target task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661)  * If @task is a worker and currently executing a work item, print out the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662)  * name of the workqueue being serviced and worker description set with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663)  * set_worker_desc() by the currently executing work item.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665)  * This function can be safely called on any task as long as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666)  * task_struct itself is accessible.  While safe, this function isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667)  * synchronized and may print out mixups or garbages of limited length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) void print_worker_info(const char *log_lvl, struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) 	work_func_t *fn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) 	char name[WQ_NAME_LEN] = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) 	char desc[WORKER_DESC_LEN] = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) 	struct pool_workqueue *pwq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) 	struct workqueue_struct *wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) 	struct worker *worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) 	if (!(task->flags & PF_WQ_WORKER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) 	 * This function is called without any synchronization and @task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) 	 * could be in any state.  Be careful with dereferences.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) 	worker = kthread_probe_data(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) 	 * Carefully copy the associated workqueue's workfn, name and desc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) 	 * Keep the original last '\0' in case the original is garbage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) 	copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) 	copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) 	copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) 	copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) 	copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) 	if (fn || name[0] || desc[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) 		printk("%sWorkqueue: %s %ps", log_lvl, name, fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) 		if (strcmp(name, desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) 			pr_cont(" (%s)", desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) 		pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) static void pr_cont_pool_info(struct worker_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) 	pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) 	if (pool->node != NUMA_NO_NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) 		pr_cont(" node=%d", pool->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) 	pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) static void pr_cont_work(bool comma, struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) 	if (work->func == wq_barrier_func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) 		struct wq_barrier *barr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) 		barr = container_of(work, struct wq_barrier, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) 		pr_cont("%s BAR(%d)", comma ? "," : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) 			task_pid_nr(barr->task));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) 		pr_cont("%s %ps", comma ? "," : "", work->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) static void show_pwq(struct pool_workqueue *pwq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) 	struct worker_pool *pool = pwq->pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) 	struct work_struct *work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) 	struct worker *worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) 	bool has_in_flight = false, has_pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) 	int bkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) 	pr_info("  pwq %d:", pool->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) 	pr_cont_pool_info(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) 	pr_cont(" active=%d/%d refcnt=%d%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) 		pwq->nr_active, pwq->max_active, pwq->refcnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) 		!list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) 	hash_for_each(pool->busy_hash, bkt, worker, hentry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) 		if (worker->current_pwq == pwq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) 			has_in_flight = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) 	if (has_in_flight) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) 		bool comma = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) 		pr_info("    in-flight:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) 		hash_for_each(pool->busy_hash, bkt, worker, hentry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) 			if (worker->current_pwq != pwq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) 			pr_cont("%s %d%s:%ps", comma ? "," : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) 				task_pid_nr(worker->task),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) 				worker->rescue_wq ? "(RESCUER)" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) 				worker->current_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) 			list_for_each_entry(work, &worker->scheduled, entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) 				pr_cont_work(false, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) 			comma = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) 		pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) 	list_for_each_entry(work, &pool->worklist, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) 		if (get_work_pwq(work) == pwq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) 			has_pending = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) 	if (has_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) 		bool comma = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) 		pr_info("    pending:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) 		list_for_each_entry(work, &pool->worklist, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) 			if (get_work_pwq(work) != pwq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) 			pr_cont_work(comma, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) 			comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) 		pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) 	if (!list_empty(&pwq->delayed_works)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) 		bool comma = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) 		pr_info("    delayed:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) 		list_for_each_entry(work, &pwq->delayed_works, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) 			pr_cont_work(comma, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) 			comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) 		pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800)  * show_workqueue_state - dump workqueue state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802)  * Called from a sysrq handler or try_to_freeze_tasks() and prints out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803)  * all busy workqueues and pools.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) void show_workqueue_state(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) 	struct workqueue_struct *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) 	struct worker_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) 	int pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) 	pr_info("Showing busy workqueues and worker pools:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) 	list_for_each_entry_rcu(wq, &workqueues, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) 		struct pool_workqueue *pwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) 		bool idle = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) 		for_each_pwq(pwq, wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) 			if (pwq->nr_active || !list_empty(&pwq->delayed_works)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) 				idle = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) 		if (idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) 		pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) 		for_each_pwq(pwq, wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) 			raw_spin_lock_irqsave(&pwq->pool->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) 			if (pwq->nr_active || !list_empty(&pwq->delayed_works))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) 				show_pwq(pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) 			raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) 			 * We could be printing a lot from atomic context, e.g.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) 			 * sysrq-t -> show_workqueue_state(). Avoid triggering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) 			 * hard lockup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) 			touch_nmi_watchdog();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) 	for_each_pool(pool, pi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) 		struct worker *worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) 		bool first = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) 		raw_spin_lock_irqsave(&pool->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) 		if (pool->nr_workers == pool->nr_idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) 			goto next_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) 		pr_info("pool %d:", pool->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) 		pr_cont_pool_info(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) 		pr_cont(" hung=%us workers=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) 			jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) 			pool->nr_workers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) 		if (pool->manager)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) 			pr_cont(" manager: %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) 				task_pid_nr(pool->manager->task));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) 		list_for_each_entry(worker, &pool->idle_list, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) 			pr_cont(" %s%d", first ? "idle: " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) 				task_pid_nr(worker->task));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) 			first = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) 		pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) 	next_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) 		raw_spin_unlock_irqrestore(&pool->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) 		 * We could be printing a lot from atomic context, e.g.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) 		 * sysrq-t -> show_workqueue_state(). Avoid triggering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) 		 * hard lockup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) 		touch_nmi_watchdog();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) /* used to show worker information through /proc/PID/{comm,stat,status} */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) 	int off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) 	/* always show the actual comm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) 	off = strscpy(buf, task->comm, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) 	if (off < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) 	/* stabilize PF_WQ_WORKER and worker pool association */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) 	mutex_lock(&wq_pool_attach_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) 	if (task->flags & PF_WQ_WORKER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) 		struct worker *worker = kthread_data(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) 		struct worker_pool *pool = worker->pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) 		if (pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) 			raw_spin_lock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) 			 * ->desc tracks information (wq name or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) 			 * set_worker_desc()) for the latest execution.  If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) 			 * current, prepend '+', otherwise '-'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) 			if (worker->desc[0] != '\0') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) 				if (worker->current_work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) 					scnprintf(buf + off, size - off, "+%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) 						  worker->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) 					scnprintf(buf + off, size - off, "-%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) 						  worker->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) 			raw_spin_unlock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) 	mutex_unlock(&wq_pool_attach_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) EXPORT_SYMBOL_GPL(wq_worker_comm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923)  * CPU hotplug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925)  * There are two challenges in supporting CPU hotplug.  Firstly, there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926)  * are a lot of assumptions on strong associations among work, pwq and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927)  * pool which make migrating pending and scheduled works very
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928)  * difficult to implement without impacting hot paths.  Secondly,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929)  * worker pools serve mix of short, long and very long running works making
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930)  * blocked draining impractical.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932)  * This is solved by allowing the pools to be disassociated from the CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933)  * running as an unbound one and allowing it to be reattached later if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934)  * cpu comes back online.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) static void unbind_workers(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) 	struct worker_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) 	struct worker *worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) 	for_each_cpu_worker_pool(pool, cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) 		mutex_lock(&wq_pool_attach_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) 		raw_spin_lock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) 		 * We've blocked all attach/detach operations. Make all workers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) 		 * unbound and set DISASSOCIATED.  Before this, all workers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) 		 * except for the ones which are still executing works from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) 		 * before the last CPU down must be on the cpu.  After
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) 		 * this, they may become diasporas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) 		for_each_pool_worker(worker, pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) 			worker->flags |= WORKER_UNBOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) 		pool->flags |= POOL_DISASSOCIATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) 		raw_spin_unlock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) 		mutex_unlock(&wq_pool_attach_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) 		 * Call schedule() so that we cross rq->lock and thus can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) 		 * guarantee sched callbacks see the %WORKER_UNBOUND flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964) 		 * This is necessary as scheduler callbacks may be invoked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) 		 * from other cpus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) 		schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970) 		 * Sched callbacks are disabled now.  Zap nr_running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) 		 * After this, nr_running stays zero and need_more_worker()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) 		 * and keep_working() are always true as long as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) 		 * worklist is not empty.  This pool now behaves as an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) 		 * unbound (in terms of concurrency management) pool which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975) 		 * are served by workers tied to the pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) 		atomic_set(&pool->nr_running, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) 		 * With concurrency management just turned off, a busy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) 		 * worker blocking could lead to lengthy stalls.  Kick off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) 		 * unbound chain execution of currently pending work items.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) 		raw_spin_lock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985) 		wake_up_worker(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) 		raw_spin_unlock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991)  * rebind_workers - rebind all workers of a pool to the associated CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992)  * @pool: pool of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994)  * @pool->cpu is coming online.  Rebind all workers to the CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996) static void rebind_workers(struct worker_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) 	struct worker *worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) 	lockdep_assert_held(&wq_pool_attach_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) 	 * Restore CPU affinity of all workers.  As all idle workers should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) 	 * be on the run-queue of the associated CPU before any local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) 	 * wake-ups for concurrency management happen, restore CPU affinity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) 	 * of all workers first and then clear UNBOUND.  As we're called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) 	 * from CPU_ONLINE, the following shouldn't fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) 	for_each_pool_worker(worker, pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) 		WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) 						  pool->attrs->cpumask) < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013) 	raw_spin_lock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) 	pool->flags &= ~POOL_DISASSOCIATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017) 	for_each_pool_worker(worker, pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) 		unsigned int worker_flags = worker->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) 		 * A bound idle worker should actually be on the runqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) 		 * of the associated CPU for local wake-ups targeting it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) 		 * work.  Kick all idle workers so that they migrate to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024) 		 * associated CPU.  Doing this in the same loop as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) 		 * replacing UNBOUND with REBOUND is safe as no worker will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) 		 * be bound before @pool->lock is released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028) 		if (worker_flags & WORKER_IDLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029) 			wake_up_process(worker->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032) 		 * We want to clear UNBOUND but can't directly call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) 		 * worker_clr_flags() or adjust nr_running.  Atomically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034) 		 * replace UNBOUND with another NOT_RUNNING flag REBOUND.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035) 		 * @worker will clear REBOUND using worker_clr_flags() when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036) 		 * it initiates the next execution cycle thus restoring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037) 		 * concurrency management.  Note that when or whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) 		 * @worker clears REBOUND doesn't affect correctness.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040) 		 * WRITE_ONCE() is necessary because @worker->flags may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) 		 * tested without holding any lock in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) 		 * wq_worker_running().  Without it, NOT_RUNNING test may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) 		 * fail incorrectly leading to premature concurrency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044) 		 * management operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) 		WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) 		worker_flags |= WORKER_REBOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) 		worker_flags &= ~WORKER_UNBOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) 		WRITE_ONCE(worker->flags, worker_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) 	raw_spin_unlock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056)  * restore_unbound_workers_cpumask - restore cpumask of unbound workers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057)  * @pool: unbound pool of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058)  * @cpu: the CPU which is coming up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060)  * An unbound pool may end up with a cpumask which doesn't have any online
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061)  * CPUs.  When a worker of such pool get scheduled, the scheduler resets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062)  * its cpus_allowed.  If @cpu is in @pool's cpumask which didn't have any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063)  * online CPU before, cpus_allowed of all its workers should be restored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067) 	static cpumask_t cpumask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) 	struct worker *worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070) 	lockdep_assert_held(&wq_pool_attach_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072) 	/* is @cpu allowed for @pool? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073) 	if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076) 	cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078) 	/* as we're called from CPU_ONLINE, the following shouldn't fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) 	for_each_pool_worker(worker, pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) 		WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083) int workqueue_prepare_cpu(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085) 	struct worker_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087) 	for_each_cpu_worker_pool(pool, cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) 		if (pool->nr_workers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090) 		if (!create_worker(pool))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096) int workqueue_online_cpu(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098) 	struct worker_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) 	struct workqueue_struct *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100) 	int pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102) 	mutex_lock(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) 	for_each_pool(pool, pi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105) 		mutex_lock(&wq_pool_attach_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107) 		if (pool->cpu == cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108) 			rebind_workers(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109) 		else if (pool->cpu < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110) 			restore_unbound_workers_cpumask(pool, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) 		mutex_unlock(&wq_pool_attach_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) 	/* update NUMA affinity of unbound workqueues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) 	list_for_each_entry(wq, &workqueues, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117) 		wq_update_unbound_numa(wq, cpu, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) 	mutex_unlock(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) int workqueue_offline_cpu(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) 	struct workqueue_struct *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127) 	/* unbinding per-cpu workers should happen on the local CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128) 	if (WARN_ON(cpu != smp_processor_id()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131) 	unbind_workers(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133) 	/* update NUMA affinity of unbound workqueues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134) 	mutex_lock(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135) 	list_for_each_entry(wq, &workqueues, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136) 		wq_update_unbound_numa(wq, cpu, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) 	mutex_unlock(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142) struct work_for_cpu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143) 	struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144) 	long (*fn)(void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) 	void *arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146) 	long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149) static void work_for_cpu_fn(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151) 	struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153) 	wfc->ret = wfc->fn(wfc->arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157)  * work_on_cpu - run a function in thread context on a particular cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158)  * @cpu: the cpu to run on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159)  * @fn: the function to run
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160)  * @arg: the function arg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162)  * It is up to the caller to ensure that the cpu doesn't go offline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163)  * The caller must not hold any locks which would prevent @fn from completing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165)  * Return: The value @fn returns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167) long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169) 	struct work_for_cpu wfc = { .fn = fn, .arg = arg };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171) 	INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172) 	schedule_work_on(cpu, &wfc.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173) 	flush_work(&wfc.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174) 	destroy_work_on_stack(&wfc.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175) 	return wfc.ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177) EXPORT_SYMBOL_GPL(work_on_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180)  * work_on_cpu_safe - run a function in thread context on a particular cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181)  * @cpu: the cpu to run on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182)  * @fn:  the function to run
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183)  * @arg: the function argument
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185)  * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186)  * any locks which would prevent @fn from completing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188)  * Return: The value @fn returns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192) 	long ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) 	get_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195) 	if (cpu_online(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196) 		ret = work_on_cpu(cpu, fn, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) 	put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200) EXPORT_SYMBOL_GPL(work_on_cpu_safe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203) #ifdef CONFIG_FREEZER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206)  * freeze_workqueues_begin - begin freezing workqueues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208)  * Start freezing workqueues.  After this function returns, all freezable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209)  * workqueues will queue new works to their delayed_works list instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210)  * pool->worklist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212)  * CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213)  * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215) void freeze_workqueues_begin(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217) 	struct workqueue_struct *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218) 	struct pool_workqueue *pwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220) 	mutex_lock(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222) 	WARN_ON_ONCE(workqueue_freezing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223) 	workqueue_freezing = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225) 	list_for_each_entry(wq, &workqueues, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226) 		mutex_lock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227) 		for_each_pwq(pwq, wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228) 			pwq_adjust_max_active(pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229) 		mutex_unlock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232) 	mutex_unlock(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236)  * freeze_workqueues_busy - are freezable workqueues still busy?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238)  * Check whether freezing is complete.  This function must be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239)  * between freeze_workqueues_begin() and thaw_workqueues().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241)  * CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242)  * Grabs and releases wq_pool_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245)  * %true if some freezable workqueues are still busy.  %false if freezing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246)  * is complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248) bool freeze_workqueues_busy(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250) 	bool busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251) 	struct workqueue_struct *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252) 	struct pool_workqueue *pwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254) 	mutex_lock(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256) 	WARN_ON_ONCE(!workqueue_freezing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258) 	list_for_each_entry(wq, &workqueues, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259) 		if (!(wq->flags & WQ_FREEZABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5261) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5262) 		 * nr_active is monotonically decreasing.  It's safe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5263) 		 * to peek without lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5264) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5265) 		rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5266) 		for_each_pwq(pwq, wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5267) 			WARN_ON_ONCE(pwq->nr_active < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5268) 			if (pwq->nr_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5269) 				busy = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5270) 				rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5271) 				goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5272) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5273) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5274) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5275) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5276) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5277) 	mutex_unlock(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5278) 	return busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5281) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5282)  * thaw_workqueues - thaw workqueues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5283)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5284)  * Thaw workqueues.  Normal queueing is restored and all collected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5285)  * frozen works are transferred to their respective pool worklists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5286)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5287)  * CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5288)  * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5289)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5290) void thaw_workqueues(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5292) 	struct workqueue_struct *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5293) 	struct pool_workqueue *pwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5295) 	mutex_lock(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5297) 	if (!workqueue_freezing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5298) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5300) 	workqueue_freezing = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5302) 	/* restore max_active and repopulate worklist */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5303) 	list_for_each_entry(wq, &workqueues, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5304) 		mutex_lock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5305) 		for_each_pwq(pwq, wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5306) 			pwq_adjust_max_active(pwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5307) 		mutex_unlock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5308) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5310) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5311) 	mutex_unlock(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5313) #endif /* CONFIG_FREEZER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5315) static int workqueue_apply_unbound_cpumask(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5317) 	LIST_HEAD(ctxs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5318) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5319) 	struct workqueue_struct *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5320) 	struct apply_wqattrs_ctx *ctx, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5322) 	lockdep_assert_held(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5324) 	list_for_each_entry(wq, &workqueues, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5325) 		if (!(wq->flags & WQ_UNBOUND))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5326) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5327) 		/* creating multiple pwqs breaks ordering guarantee */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5328) 		if (wq->flags & __WQ_ORDERED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5329) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5331) 		ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5332) 		if (!ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5333) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5334) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5335) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5337) 		list_add_tail(&ctx->list, &ctxs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5338) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5340) 	list_for_each_entry_safe(ctx, n, &ctxs, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5341) 		if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5342) 			apply_wqattrs_commit(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5343) 		apply_wqattrs_cleanup(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5344) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5346) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5349) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5350)  *  workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5351)  *  @cpumask: the cpumask to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5352)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5353)  *  The low-level workqueues cpumask is a global cpumask that limits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5354)  *  the affinity of all unbound workqueues.  This function check the @cpumask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5355)  *  and apply it to all unbound workqueues and updates all pwqs of them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5356)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5357)  *  Retun:	0	- Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5358)  *  		-EINVAL	- Invalid @cpumask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5359)  *  		-ENOMEM	- Failed to allocate memory for attrs or pwqs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5360)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5361) int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5363) 	int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5364) 	cpumask_var_t saved_cpumask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5366) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5367) 	 * Not excluding isolated cpus on purpose.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5368) 	 * If the user wishes to include them, we allow that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5369) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5370) 	cpumask_and(cpumask, cpumask, cpu_possible_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5371) 	if (!cpumask_empty(cpumask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5372) 		apply_wqattrs_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5373) 		if (cpumask_equal(cpumask, wq_unbound_cpumask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5374) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5375) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5376) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5378) 		if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5379) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5380) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5381) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5383) 		/* save the old wq_unbound_cpumask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5384) 		cpumask_copy(saved_cpumask, wq_unbound_cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5386) 		/* update wq_unbound_cpumask at first and apply it to wqs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5387) 		cpumask_copy(wq_unbound_cpumask, cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5388) 		ret = workqueue_apply_unbound_cpumask();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5390) 		/* restore the wq_unbound_cpumask when failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5391) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5392) 			cpumask_copy(wq_unbound_cpumask, saved_cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5394) 		free_cpumask_var(saved_cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5395) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5396) 		apply_wqattrs_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5397) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5399) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5402) #ifdef CONFIG_SYSFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5403) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5404)  * Workqueues with WQ_SYSFS flag set is visible to userland via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5405)  * /sys/bus/workqueue/devices/WQ_NAME.  All visible workqueues have the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5406)  * following attributes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5407)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5408)  *  per_cpu	RO bool	: whether the workqueue is per-cpu or unbound
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5409)  *  max_active	RW int	: maximum number of in-flight work items
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5410)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5411)  * Unbound workqueues have the following extra attributes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5412)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5413)  *  pool_ids	RO int	: the associated pool IDs for each node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5414)  *  nice	RW int	: nice value of the workers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5415)  *  cpumask	RW mask	: bitmask of allowed CPUs for the workers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5416)  *  numa	RW bool	: whether enable NUMA affinity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5417)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5418) struct wq_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5419) 	struct workqueue_struct		*wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5420) 	struct device			dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5421) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5423) static struct workqueue_struct *dev_to_wq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5425) 	struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5427) 	return wq_dev->wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5430) static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5431) 			    char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5433) 	struct workqueue_struct *wq = dev_to_wq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5435) 	return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5437) static DEVICE_ATTR_RO(per_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5439) static ssize_t max_active_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5440) 			       struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5442) 	struct workqueue_struct *wq = dev_to_wq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5444) 	return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5447) static ssize_t max_active_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5448) 				struct device_attribute *attr, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5449) 				size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5451) 	struct workqueue_struct *wq = dev_to_wq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5452) 	int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5454) 	if (sscanf(buf, "%d", &val) != 1 || val <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5455) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5457) 	workqueue_set_max_active(wq, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5458) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5460) static DEVICE_ATTR_RW(max_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5462) static struct attribute *wq_sysfs_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5463) 	&dev_attr_per_cpu.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5464) 	&dev_attr_max_active.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5465) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5466) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5467) ATTRIBUTE_GROUPS(wq_sysfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5469) static ssize_t wq_pool_ids_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5470) 				struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5472) 	struct workqueue_struct *wq = dev_to_wq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5473) 	const char *delim = "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5474) 	int node, written = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5476) 	get_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5477) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5478) 	for_each_node(node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5479) 		written += scnprintf(buf + written, PAGE_SIZE - written,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5480) 				     "%s%d:%d", delim, node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5481) 				     unbound_pwq_by_node(wq, node)->pool->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5482) 		delim = " ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5483) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5484) 	written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5485) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5486) 	put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5488) 	return written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5491) static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5492) 			    char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5494) 	struct workqueue_struct *wq = dev_to_wq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5495) 	int written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5497) 	mutex_lock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5498) 	written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5499) 	mutex_unlock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5501) 	return written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5504) /* prepare workqueue_attrs for sysfs store operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5505) static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5507) 	struct workqueue_attrs *attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5509) 	lockdep_assert_held(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5511) 	attrs = alloc_workqueue_attrs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5512) 	if (!attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5513) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5515) 	copy_workqueue_attrs(attrs, wq->unbound_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5516) 	return attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5519) static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5520) 			     const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5522) 	struct workqueue_struct *wq = dev_to_wq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5523) 	struct workqueue_attrs *attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5524) 	int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5526) 	apply_wqattrs_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5528) 	attrs = wq_sysfs_prep_attrs(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5529) 	if (!attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5530) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5532) 	if (sscanf(buf, "%d", &attrs->nice) == 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5533) 	    attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5534) 		ret = apply_workqueue_attrs_locked(wq, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5535) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5536) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5538) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5539) 	apply_wqattrs_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5540) 	free_workqueue_attrs(attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5541) 	return ret ?: count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5544) static ssize_t wq_cpumask_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5545) 			       struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5547) 	struct workqueue_struct *wq = dev_to_wq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5548) 	int written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5550) 	mutex_lock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5551) 	written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5552) 			    cpumask_pr_args(wq->unbound_attrs->cpumask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5553) 	mutex_unlock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5554) 	return written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5557) static ssize_t wq_cpumask_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5558) 				struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5559) 				const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5561) 	struct workqueue_struct *wq = dev_to_wq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5562) 	struct workqueue_attrs *attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5563) 	int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5565) 	apply_wqattrs_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5567) 	attrs = wq_sysfs_prep_attrs(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5568) 	if (!attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5569) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5571) 	ret = cpumask_parse(buf, attrs->cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5572) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5573) 		ret = apply_workqueue_attrs_locked(wq, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5575) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5576) 	apply_wqattrs_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5577) 	free_workqueue_attrs(attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5578) 	return ret ?: count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5581) static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5582) 			    char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5584) 	struct workqueue_struct *wq = dev_to_wq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5585) 	int written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5587) 	mutex_lock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5588) 	written = scnprintf(buf, PAGE_SIZE, "%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5589) 			    !wq->unbound_attrs->no_numa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5590) 	mutex_unlock(&wq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5592) 	return written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5595) static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5596) 			     const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5598) 	struct workqueue_struct *wq = dev_to_wq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5599) 	struct workqueue_attrs *attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5600) 	int v, ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5602) 	apply_wqattrs_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5604) 	attrs = wq_sysfs_prep_attrs(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5605) 	if (!attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5606) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5608) 	ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5609) 	if (sscanf(buf, "%d", &v) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5610) 		attrs->no_numa = !v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5611) 		ret = apply_workqueue_attrs_locked(wq, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5612) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5614) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5615) 	apply_wqattrs_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5616) 	free_workqueue_attrs(attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5617) 	return ret ?: count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5620) static struct device_attribute wq_sysfs_unbound_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5621) 	__ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5622) 	__ATTR(nice, 0644, wq_nice_show, wq_nice_store),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5623) 	__ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5624) 	__ATTR(numa, 0644, wq_numa_show, wq_numa_store),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5625) 	__ATTR_NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5626) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5628) static struct bus_type wq_subsys = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5629) 	.name				= "workqueue",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5630) 	.dev_groups			= wq_sysfs_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5631) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5633) static ssize_t wq_unbound_cpumask_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5634) 		struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5636) 	int written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5638) 	mutex_lock(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5639) 	written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5640) 			    cpumask_pr_args(wq_unbound_cpumask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5641) 	mutex_unlock(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5643) 	return written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5646) static ssize_t wq_unbound_cpumask_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5647) 		struct device_attribute *attr, const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5649) 	cpumask_var_t cpumask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5650) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5652) 	if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5653) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5655) 	ret = cpumask_parse(buf, cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5656) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5657) 		ret = workqueue_set_unbound_cpumask(cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5659) 	free_cpumask_var(cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5660) 	return ret ? ret : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5663) static struct device_attribute wq_sysfs_cpumask_attr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5664) 	__ATTR(cpumask, 0644, wq_unbound_cpumask_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5665) 	       wq_unbound_cpumask_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5667) static int __init wq_sysfs_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5669) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5671) 	err = subsys_virtual_register(&wq_subsys, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5672) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5673) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5675) 	return device_create_file(wq_subsys.dev_root, &wq_sysfs_cpumask_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5677) core_initcall(wq_sysfs_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5679) static void wq_device_release(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5681) 	struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5683) 	kfree(wq_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5686) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5687)  * workqueue_sysfs_register - make a workqueue visible in sysfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5688)  * @wq: the workqueue to register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5689)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5690)  * Expose @wq in sysfs under /sys/bus/workqueue/devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5691)  * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5692)  * which is the preferred method.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5693)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5694)  * Workqueue user should use this function directly iff it wants to apply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5695)  * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5696)  * apply_workqueue_attrs() may race against userland updating the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5697)  * attributes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5698)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5699)  * Return: 0 on success, -errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5700)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5701) int workqueue_sysfs_register(struct workqueue_struct *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5703) 	struct wq_device *wq_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5704) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5706) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5707) 	 * Adjusting max_active or creating new pwqs by applying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5708) 	 * attributes breaks ordering guarantee.  Disallow exposing ordered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5709) 	 * workqueues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5710) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5711) 	if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5712) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5714) 	wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5715) 	if (!wq_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5716) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5718) 	wq_dev->wq = wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5719) 	wq_dev->dev.bus = &wq_subsys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5720) 	wq_dev->dev.release = wq_device_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5721) 	dev_set_name(&wq_dev->dev, "%s", wq->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5723) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5724) 	 * unbound_attrs are created separately.  Suppress uevent until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5725) 	 * everything is ready.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5726) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5727) 	dev_set_uevent_suppress(&wq_dev->dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5729) 	ret = device_register(&wq_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5730) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5731) 		put_device(&wq_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5732) 		wq->wq_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5733) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5734) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5736) 	if (wq->flags & WQ_UNBOUND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5737) 		struct device_attribute *attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5739) 		for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5740) 			ret = device_create_file(&wq_dev->dev, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5741) 			if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5742) 				device_unregister(&wq_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5743) 				wq->wq_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5744) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5745) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5746) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5747) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5749) 	dev_set_uevent_suppress(&wq_dev->dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5750) 	kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5751) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5754) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5755)  * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5756)  * @wq: the workqueue to unregister
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5757)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5758)  * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5759)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5760) static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5762) 	struct wq_device *wq_dev = wq->wq_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5764) 	if (!wq->wq_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5765) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5767) 	wq->wq_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5768) 	device_unregister(&wq_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5770) #else	/* CONFIG_SYSFS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5771) static void workqueue_sysfs_unregister(struct workqueue_struct *wq)	{ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5772) #endif	/* CONFIG_SYSFS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5774) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5775)  * Workqueue watchdog.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5776)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5777)  * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5778)  * flush dependency, a concurrency managed work item which stays RUNNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5779)  * indefinitely.  Workqueue stalls can be very difficult to debug as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5780)  * usual warning mechanisms don't trigger and internal workqueue state is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5781)  * largely opaque.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5782)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5783)  * Workqueue watchdog monitors all worker pools periodically and dumps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5784)  * state if some pools failed to make forward progress for a while where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5785)  * forward progress is defined as the first item on ->worklist changing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5786)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5787)  * This mechanism is controlled through the kernel parameter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5788)  * "workqueue.watchdog_thresh" which can be updated at runtime through the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5789)  * corresponding sysfs parameter file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5790)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5791) #ifdef CONFIG_WQ_WATCHDOG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5793) static unsigned long wq_watchdog_thresh = 30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5794) static struct timer_list wq_watchdog_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5796) static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5797) static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5799) static void wq_watchdog_reset_touched(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5801) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5803) 	wq_watchdog_touched = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5804) 	for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5805) 		per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5808) static void wq_watchdog_timer_fn(struct timer_list *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5810) 	unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5811) 	bool lockup_detected = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5812) 	unsigned long now = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5813) 	struct worker_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5814) 	int pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5816) 	if (!thresh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5817) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5819) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5821) 	for_each_pool(pool, pi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5822) 		unsigned long pool_ts, touched, ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5824) 		if (list_empty(&pool->worklist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5825) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5827) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5828) 		 * If a virtual machine is stopped by the host it can look to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5829) 		 * the watchdog like a stall.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5830) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5831) 		kvm_check_and_clear_guest_paused();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5833) 		/* get the latest of pool and touched timestamps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5834) 		pool_ts = READ_ONCE(pool->watchdog_ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5835) 		touched = READ_ONCE(wq_watchdog_touched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5837) 		if (time_after(pool_ts, touched))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5838) 			ts = pool_ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5839) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5840) 			ts = touched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5842) 		if (pool->cpu >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5843) 			unsigned long cpu_touched =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5844) 				READ_ONCE(per_cpu(wq_watchdog_touched_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5845) 						  pool->cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5846) 			if (time_after(cpu_touched, ts))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5847) 				ts = cpu_touched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5848) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5850) 		/* did we stall? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5851) 		if (time_after(now, ts + thresh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5852) 			lockup_detected = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5853) 			pr_emerg("BUG: workqueue lockup - pool");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5854) 			pr_cont_pool_info(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5855) 			pr_cont(" stuck for %us!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5856) 				jiffies_to_msecs(now - pool_ts) / 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5857) 			trace_android_vh_wq_lockup_pool(pool->cpu, pool_ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5858) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5859) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5861) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5863) 	if (lockup_detected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5864) 		show_workqueue_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5866) 	wq_watchdog_reset_touched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5867) 	mod_timer(&wq_watchdog_timer, jiffies + thresh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5870) notrace void wq_watchdog_touch(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5872) 	if (cpu >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5873) 		per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5874) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5875) 		wq_watchdog_touched = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5878) static void wq_watchdog_set_thresh(unsigned long thresh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5880) 	wq_watchdog_thresh = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5881) 	del_timer_sync(&wq_watchdog_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5883) 	if (thresh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5884) 		wq_watchdog_thresh = thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5885) 		wq_watchdog_reset_touched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5886) 		mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5887) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5890) static int wq_watchdog_param_set_thresh(const char *val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5891) 					const struct kernel_param *kp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5893) 	unsigned long thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5894) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5896) 	ret = kstrtoul(val, 0, &thresh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5897) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5898) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5900) 	if (system_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5901) 		wq_watchdog_set_thresh(thresh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5902) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5903) 		wq_watchdog_thresh = thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5905) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5908) static const struct kernel_param_ops wq_watchdog_thresh_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5909) 	.set	= wq_watchdog_param_set_thresh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5910) 	.get	= param_get_ulong,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5911) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5913) module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5914) 		0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5916) static void wq_watchdog_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5918) 	timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5919) 	wq_watchdog_set_thresh(wq_watchdog_thresh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5922) #else	/* CONFIG_WQ_WATCHDOG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5924) static inline void wq_watchdog_init(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5926) #endif	/* CONFIG_WQ_WATCHDOG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5928) static void __init wq_numa_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5929) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5930) 	cpumask_var_t *tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5931) 	int node, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5933) 	if (num_possible_nodes() <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5934) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5936) 	if (wq_disable_numa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5937) 		pr_info("workqueue: NUMA affinity support disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5938) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5939) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5941) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5942) 		if (WARN_ON(cpu_to_node(cpu) == NUMA_NO_NODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5943) 			pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5944) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5945) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5946) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5948) 	wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5949) 	BUG_ON(!wq_update_unbound_numa_attrs_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5951) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5952) 	 * We want masks of possible CPUs of each node which isn't readily
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5953) 	 * available.  Build one from cpu_to_node() which should have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5954) 	 * fully initialized by now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5955) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5956) 	tbl = kcalloc(nr_node_ids, sizeof(tbl[0]), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5957) 	BUG_ON(!tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5959) 	for_each_node(node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5960) 		BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5961) 				node_online(node) ? node : NUMA_NO_NODE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5963) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5964) 		node = cpu_to_node(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5965) 		cpumask_set_cpu(cpu, tbl[node]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5966) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5968) 	wq_numa_possible_cpumask = tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5969) 	wq_numa_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5972) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5973)  * workqueue_init_early - early init for workqueue subsystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5974)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5975)  * This is the first half of two-staged workqueue subsystem initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5976)  * and invoked as soon as the bare basics - memory allocation, cpumasks and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5977)  * idr are up.  It sets up all the data structures and system workqueues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5978)  * and allows early boot code to create workqueues and queue/cancel work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5979)  * items.  Actual work item execution starts only after kthreads can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5980)  * created and scheduled right before early initcalls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5981)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5982) void __init workqueue_init_early(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5984) 	int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5985) 	int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5986) 	int i, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5988) 	BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5990) 	BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5991) 	cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(hk_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5993) 	pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5995) 	/* initialize CPU pools */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5996) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5997) 		struct worker_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5999) 		i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6000) 		for_each_cpu_worker_pool(pool, cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6001) 			BUG_ON(init_worker_pool(pool));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6002) 			pool->cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6003) 			cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6004) 			pool->attrs->nice = std_nice[i++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6005) 			pool->node = cpu_to_node(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6007) 			/* alloc pool ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6008) 			mutex_lock(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6009) 			BUG_ON(worker_pool_assign_id(pool));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6010) 			mutex_unlock(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6011) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6012) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6014) 	/* create default unbound and ordered wq attrs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6015) 	for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6016) 		struct workqueue_attrs *attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6018) 		BUG_ON(!(attrs = alloc_workqueue_attrs()));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6019) 		attrs->nice = std_nice[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6020) 		unbound_std_wq_attrs[i] = attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6022) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6023) 		 * An ordered wq should have only one pwq as ordering is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6024) 		 * guaranteed by max_active which is enforced by pwqs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6025) 		 * Turn off NUMA so that dfl_pwq is used for all nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6026) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6027) 		BUG_ON(!(attrs = alloc_workqueue_attrs()));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6028) 		attrs->nice = std_nice[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6029) 		attrs->no_numa = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6030) 		ordered_wq_attrs[i] = attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6031) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6033) 	system_wq = alloc_workqueue("events", 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6034) 	system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6035) 	system_long_wq = alloc_workqueue("events_long", 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6036) 	system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6037) 					    WQ_UNBOUND_MAX_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6038) 	system_freezable_wq = alloc_workqueue("events_freezable",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6039) 					      WQ_FREEZABLE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6040) 	system_power_efficient_wq = alloc_workqueue("events_power_efficient",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6041) 					      WQ_POWER_EFFICIENT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6042) 	system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6043) 					      WQ_FREEZABLE | WQ_POWER_EFFICIENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6044) 					      0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6045) 	BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6046) 	       !system_unbound_wq || !system_freezable_wq ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6047) 	       !system_power_efficient_wq ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6048) 	       !system_freezable_power_efficient_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6051) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6052)  * workqueue_init - bring workqueue subsystem fully online
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6053)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6054)  * This is the latter half of two-staged workqueue subsystem initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6055)  * and invoked as soon as kthreads can be created and scheduled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6056)  * Workqueues have been created and work items queued on them, but there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6057)  * are no kworkers executing the work items yet.  Populate the worker pools
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6058)  * with the initial workers and enable future kworker creations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6059)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6060) void __init workqueue_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6061) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6062) 	struct workqueue_struct *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6063) 	struct worker_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6064) 	int cpu, bkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6066) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6067) 	 * It'd be simpler to initialize NUMA in workqueue_init_early() but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6068) 	 * CPU to node mapping may not be available that early on some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6069) 	 * archs such as power and arm64.  As per-cpu pools created
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6070) 	 * previously could be missing node hint and unbound pools NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6071) 	 * affinity, fix them up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6072) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6073) 	 * Also, while iterating workqueues, create rescuers if requested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6074) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6075) 	wq_numa_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6077) 	mutex_lock(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6079) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6080) 		for_each_cpu_worker_pool(pool, cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6081) 			pool->node = cpu_to_node(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6082) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6083) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6085) 	list_for_each_entry(wq, &workqueues, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6086) 		wq_update_unbound_numa(wq, smp_processor_id(), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6087) 		WARN(init_rescuer(wq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6088) 		     "workqueue: failed to create early rescuer for %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6089) 		     wq->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6090) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6092) 	mutex_unlock(&wq_pool_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6094) 	/* create the initial workers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6095) 	for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6096) 		for_each_cpu_worker_pool(pool, cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6097) 			pool->flags &= ~POOL_DISASSOCIATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6098) 			BUG_ON(!create_worker(pool));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6099) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6100) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6102) 	hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6103) 		BUG_ON(!create_worker(pool));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6105) 	wq_online = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6106) 	wq_watchdog_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6107) }