^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Kernel thread helper functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2004 IBM Corporation, Rusty Russell.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2009 Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Creation is done via kthreadd, so that we get a clean environment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * even if we're invoked from userspace (think modprobe, hotplug cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * etc.).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <uapi/linux/sched/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/cgroup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/cpuset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/freezer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/numa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/sched/isolation.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <trace/events/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static DEFINE_SPINLOCK(kthread_create_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static LIST_HEAD(kthread_create_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct task_struct *kthreadd_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct kthread_create_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /* Information passed to kthread() from kthreadd. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) int (*threadfn)(void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) int node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* Result passed back to kthread_create() from kthreadd. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct task_struct *result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct completion *done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct kthread {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) int (*threadfn)(void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) mm_segment_t oldfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct completion parked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct completion exited;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #ifdef CONFIG_BLK_CGROUP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct cgroup_subsys_state *blkcg_css;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) enum KTHREAD_BITS {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) KTHREAD_IS_PER_CPU = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) KTHREAD_SHOULD_STOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) KTHREAD_SHOULD_PARK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static inline void set_kthread_struct(void *kthread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * We abuse ->set_child_tid to avoid the new member and because it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * can't be wrongly copied by copy_process(). We also rely on fact
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * that the caller can't exec, so PF_KTHREAD can't be cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) current->set_child_tid = (__force void __user *)kthread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static inline struct kthread *to_kthread(struct task_struct *k)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) WARN_ON(!(k->flags & PF_KTHREAD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return (__force void *)k->set_child_tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * Variant of to_kthread() that doesn't assume @p is a kthread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * Per construction; when:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * (p->flags & PF_KTHREAD) && p->set_child_tid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * the task is both a kthread and struct kthread is persistent. However
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * begin_new_exec()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static inline struct kthread *__to_kthread(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) void *kthread = (__force void *)p->set_child_tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (kthread && !(p->flags & PF_KTHREAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) kthread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return kthread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) void free_kthread_struct(struct task_struct *k)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct kthread *kthread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * Can be NULL if this kthread was created by kernel_thread()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * or if kmalloc() in kthread() failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) kthread = to_kthread(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #ifdef CONFIG_BLK_CGROUP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) WARN_ON_ONCE(kthread && kthread->blkcg_css);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) kfree(kthread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * kthread_should_stop - should this kthread return now?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * When someone calls kthread_stop() on your kthread, it will be woken
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * and this will return true. You should then return, and your return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * value will be passed through to kthread_stop().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) bool kthread_should_stop(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) EXPORT_SYMBOL(kthread_should_stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) bool __kthread_should_park(struct task_struct *k)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) EXPORT_SYMBOL_GPL(__kthread_should_park);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * kthread_should_park - should this kthread park now?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * When someone calls kthread_park() on your kthread, it will be woken
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * and this will return true. You should then do the necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * cleanup and call kthread_parkme()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * Similar to kthread_should_stop(), but this keeps the thread alive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * and in a park position. kthread_unpark() "restarts" the thread and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * calls the thread function again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) bool kthread_should_park(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return __kthread_should_park(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) EXPORT_SYMBOL_GPL(kthread_should_park);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * kthread_freezable_should_stop - should this freezable kthread return now?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * @was_frozen: optional out parameter, indicates whether %current was frozen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * kthread_should_stop() for freezable kthreads, which will enter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * refrigerator if necessary. This function is safe from kthread_stop() /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * freezer deadlock and freezable kthreads should use this function instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * of calling try_to_freeze() directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) bool kthread_freezable_should_stop(bool *was_frozen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) bool frozen = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (unlikely(freezing(current)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) frozen = __refrigerator(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (was_frozen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) *was_frozen = frozen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return kthread_should_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * kthread_func - return the function specified on kthread creation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * @task: kthread task in question
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * Returns NULL if the task is not a kthread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) void *kthread_func(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct kthread *kthread = __to_kthread(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (kthread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return kthread->threadfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) EXPORT_SYMBOL_GPL(kthread_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * kthread_data - return data value specified on kthread creation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * @task: kthread task in question
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * Return the data value specified when kthread @task was created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * The caller is responsible for ensuring the validity of @task when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * calling this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) void *kthread_data(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return to_kthread(task)->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) EXPORT_SYMBOL_GPL(kthread_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * kthread_probe_data - speculative version of kthread_data()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * @task: possible kthread task in question
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * @task could be a kthread task. Return the data value specified when it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * was created if accessible. If @task isn't a kthread task or its data is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * inaccessible for any reason, %NULL is returned. This function requires
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * that @task itself is safe to dereference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) void *kthread_probe_data(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct kthread *kthread = __to_kthread(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) void *data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (kthread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static void __kthread_parkme(struct kthread *self)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * TASK_PARKED is a special state; we must serialize against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * possible pending wakeups to avoid store-store collisions on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * task->state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * Such a collision might possibly result in the task state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * changin from TASK_PARKED and us failing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * wait_task_inactive() in kthread_park().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) set_special_state(TASK_PARKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * Thread is going to call schedule(), do not preempt it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * or the caller of kthread_park() may spend more time in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * wait_task_inactive().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) complete(&self->parked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) schedule_preempt_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) void kthread_parkme(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) __kthread_parkme(to_kthread(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) EXPORT_SYMBOL_GPL(kthread_parkme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static int kthread(void *_create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /* Copy data: it's on kthread's stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct kthread_create_info *create = _create;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) int (*threadfn)(void *data) = create->threadfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) void *data = create->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct completion *done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct kthread *self;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) self = kzalloc(sizeof(*self), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) set_kthread_struct(self);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* If user was SIGKILLed, I release the structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) done = xchg(&create->done, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (!done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) kfree(create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) do_exit(-EINTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (!self) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) create->result = ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) complete(done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) do_exit(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) self->threadfn = threadfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) self->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) init_completion(&self->exited);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) init_completion(&self->parked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) current->vfork_done = &self->exited;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /* OK, tell user we're spawned, wait for stop or wakeup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) __set_current_state(TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) create->result = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * Thread is going to call schedule(), do not preempt it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * or the creator may spend more time in wait_task_inactive().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) complete(done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) schedule_preempt_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) ret = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) cgroup_kthread_ready();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) __kthread_parkme(self);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) ret = threadfn(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) do_exit(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) /* called from do_fork() to get node information for about to be created task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) int tsk_fork_get_node(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) #ifdef CONFIG_NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (tsk == kthreadd_task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return tsk->pref_node_fork;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static void create_kthread(struct kthread_create_info *create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) int pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) #ifdef CONFIG_NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) current->pref_node_fork = create->node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* We want our own signal handler (we take no signals by default). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (pid < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /* If user was SIGKILLed, I release the structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct completion *done = xchg(&create->done, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (!done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) kfree(create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) create->result = ERR_PTR(pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) complete(done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static __printf(4, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) void *data, int node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) const char namefmt[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) va_list args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) DECLARE_COMPLETION_ONSTACK(done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct kthread_create_info *create = kmalloc(sizeof(*create),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (!create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) create->threadfn = threadfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) create->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) create->node = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) create->done = &done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) spin_lock(&kthread_create_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) list_add_tail(&create->list, &kthread_create_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) spin_unlock(&kthread_create_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) wake_up_process(kthreadd_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * Wait for completion in killable state, for I might be chosen by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * the OOM killer while kthreadd is trying to allocate memory for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * new kernel thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (unlikely(wait_for_completion_killable(&done))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * If I was SIGKILLed before kthreadd (or new kernel thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * calls complete(), leave the cleanup of this structure to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * that thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (xchg(&create->done, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return ERR_PTR(-EINTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * kthreadd (or new kernel thread) will call complete()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * shortly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) wait_for_completion(&done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) task = create->result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (!IS_ERR(task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static const struct sched_param param = { .sched_priority = 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) char name[TASK_COMM_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * task is already visible to other tasks, so updating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * COMM must be protected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) vsnprintf(name, sizeof(name), namefmt, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) set_task_comm(task, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * root may have changed our (kthreadd's) priority or CPU mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * The kernel thread should not inherit these properties.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) set_cpus_allowed_ptr(task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) housekeeping_cpumask(HK_FLAG_KTHREAD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) kfree(create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * kthread_create_on_node - create a kthread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * @threadfn: the function to run until signal_pending(current).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * @data: data ptr for @threadfn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * @node: task and thread structures for the thread are allocated on this node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * @namefmt: printf-style name for the thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * Description: This helper function creates and names a kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * thread. The thread will be stopped: use wake_up_process() to start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * is affine to all CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * If thread is going to be bound on a particular cpu, give its node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * When woken, the thread will run @threadfn() with @data as its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * argument. @threadfn() can either call do_exit() directly if it is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * standalone thread for which no one will call kthread_stop(), or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * return when 'kthread_should_stop()' is true (which means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * kthread_stop() has been called). The return value should be zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * or a negative error number; it will be passed to kthread_stop().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) void *data, int node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) const char namefmt[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) va_list args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) va_start(args, namefmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) va_end(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) EXPORT_SYMBOL(kthread_create_on_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (!wait_task_inactive(p, state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /* It's safe because the task is inactive. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) raw_spin_lock_irqsave(&p->pi_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) do_set_cpus_allowed(p, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) p->flags |= PF_NO_SETAFFINITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) raw_spin_unlock_irqrestore(&p->pi_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) __kthread_bind_mask(p, cpumask_of(cpu), state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) EXPORT_SYMBOL_GPL(kthread_bind_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * kthread_bind - bind a just-created kthread to a cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * @p: thread created by kthread_create().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * @cpu: cpu (might not be online, must be possible) for @k to run on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * Description: This function is equivalent to set_cpus_allowed(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * except that @cpu doesn't need to be online, and the thread must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * stopped (i.e., just returned from kthread_create()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) void kthread_bind(struct task_struct *p, unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) EXPORT_SYMBOL(kthread_bind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * kthread_create_on_cpu - Create a cpu bound kthread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * @threadfn: the function to run until signal_pending(current).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * @data: data ptr for @threadfn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * @cpu: The cpu on which the thread should be bound,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * @namefmt: printf-style name for the thread. Format is restricted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * to "name.*%u". Code fills in cpu number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * Description: This helper function creates and names a kernel thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) void *data, unsigned int cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) const char *namefmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct task_struct *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (IS_ERR(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) kthread_bind(p, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /* CPU hotplug need to bind once again when unparking the thread. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) to_kthread(p)->cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) void kthread_set_per_cpu(struct task_struct *k, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) struct kthread *kthread = to_kthread(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (!kthread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (cpu < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) kthread->cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) bool kthread_is_per_cpu(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) struct kthread *kthread = __to_kthread(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (!kthread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * kthread_unpark - unpark a thread created by kthread_create().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * @k: thread created by kthread_create().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * Sets kthread_should_park() for @k to return false, wakes it, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * waits for it to return. If the thread is marked percpu then its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * bound to the cpu again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) void kthread_unpark(struct task_struct *k)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct kthread *kthread = to_kthread(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * Newly created kthread was parked when the CPU was offline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * The binding was lost and we need to set it again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) __kthread_bind(k, kthread->cpu, TASK_PARKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) wake_up_state(k, TASK_PARKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) EXPORT_SYMBOL_GPL(kthread_unpark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * kthread_park - park a thread created by kthread_create().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * @k: thread created by kthread_create().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * Sets kthread_should_park() for @k to return true, wakes it, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * waits for it to return. This can also be called after kthread_create()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * instead of calling wake_up_process(): the thread will park without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * calling threadfn().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * If called by the kthread itself just the park bit is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) int kthread_park(struct task_struct *k)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) struct kthread *kthread = to_kthread(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (WARN_ON(k->flags & PF_EXITING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (k != current) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) wake_up_process(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * Wait for __kthread_parkme() to complete(), this means we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * _will_ have TASK_PARKED and are about to call schedule().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) wait_for_completion(&kthread->parked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * Now wait for that schedule() to complete and the task to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * get scheduled out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) EXPORT_SYMBOL_GPL(kthread_park);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * kthread_stop - stop a thread created by kthread_create().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * @k: thread created by kthread_create().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * Sets kthread_should_stop() for @k to return true, wakes it, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * waits for it to exit. This can also be called after kthread_create()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * instead of calling wake_up_process(): the thread will exit without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * calling threadfn().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * If threadfn() may call do_exit() itself, the caller must ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * task_struct can't go away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * Returns the result of threadfn(), or %-EINTR if wake_up_process()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * was never called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) int kthread_stop(struct task_struct *k)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) struct kthread *kthread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) trace_sched_kthread_stop(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) get_task_struct(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) kthread = to_kthread(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) kthread_unpark(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) wake_up_process(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) wait_for_completion(&kthread->exited);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) ret = k->exit_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) put_task_struct(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) trace_sched_kthread_stop_ret(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) EXPORT_SYMBOL(kthread_stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) int kthreadd(void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) struct task_struct *tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /* Setup a clean context for our children to inherit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) set_task_comm(tsk, "kthreadd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) ignore_signals(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_FLAG_KTHREAD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) set_mems_allowed(node_states[N_MEMORY]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) current->flags |= PF_NOFREEZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) cgroup_init_kthreadd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (list_empty(&kthread_create_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) spin_lock(&kthread_create_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) while (!list_empty(&kthread_create_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct kthread_create_info *create;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) create = list_entry(kthread_create_list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) struct kthread_create_info, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) list_del_init(&create->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) spin_unlock(&kthread_create_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) create_kthread(create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) spin_lock(&kthread_create_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) spin_unlock(&kthread_create_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) void __kthread_init_worker(struct kthread_worker *worker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) struct lock_class_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) memset(worker, 0, sizeof(struct kthread_worker));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) raw_spin_lock_init(&worker->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) lockdep_set_class_and_name(&worker->lock, key, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) INIT_LIST_HEAD(&worker->work_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) INIT_LIST_HEAD(&worker->delayed_work_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) EXPORT_SYMBOL_GPL(__kthread_init_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * kthread_worker_fn - kthread function to process kthread_worker
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * @worker_ptr: pointer to initialized kthread_worker
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * This function implements the main cycle of kthread worker. It processes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * work_list until it is stopped with kthread_stop(). It sleeps when the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * is empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * The works are not allowed to keep any locks, disable preemption or interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * when they finish. There is defined a safe point for freezing when one work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * finishes and before a new one is started.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * Also the works must not be handled by more than one worker at the same time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * see also kthread_queue_work().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) int kthread_worker_fn(void *worker_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) struct kthread_worker *worker = worker_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) struct kthread_work *work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * FIXME: Update the check and remove the assignment when all kthread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * worker users are created using kthread_create_worker*() functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) WARN_ON(worker->task && worker->task != current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) worker->task = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (worker->flags & KTW_FREEZABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) set_freezable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) repeat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) raw_spin_lock_irq(&worker->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) worker->task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) raw_spin_unlock_irq(&worker->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) work = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) raw_spin_lock_irq(&worker->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (!list_empty(&worker->work_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) work = list_first_entry(&worker->work_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) struct kthread_work, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) list_del_init(&work->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) worker->current_work = work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) raw_spin_unlock_irq(&worker->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (work) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) work->func(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) } else if (!freezing(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) try_to_freeze();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) EXPORT_SYMBOL_GPL(kthread_worker_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) static __printf(3, 0) struct kthread_worker *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) __kthread_create_worker(int cpu, unsigned int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) const char namefmt[], va_list args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) struct kthread_worker *worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) int node = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) worker = kzalloc(sizeof(*worker), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (!worker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) kthread_init_worker(worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (cpu >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) node = cpu_to_node(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) task = __kthread_create_on_node(kthread_worker_fn, worker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) node, namefmt, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (IS_ERR(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) goto fail_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (cpu >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) kthread_bind(task, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) worker->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) worker->task = task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) wake_up_process(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) return worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) fail_task:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) kfree(worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return ERR_CAST(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * kthread_create_worker - create a kthread worker
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * @flags: flags modifying the default behavior of the worker
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * @namefmt: printf-style name for the kthread worker (task).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) * when the worker was SIGKILLed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) struct kthread_worker *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) kthread_create_worker(unsigned int flags, const char namefmt[], ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) struct kthread_worker *worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) va_list args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) va_start(args, namefmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) worker = __kthread_create_worker(-1, flags, namefmt, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) va_end(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) return worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) EXPORT_SYMBOL(kthread_create_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * kthread_create_worker_on_cpu - create a kthread worker and bind it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * to a given CPU and the associated NUMA node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) * @cpu: CPU number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * @flags: flags modifying the default behavior of the worker
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * @namefmt: printf-style name for the kthread worker (task).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * Use a valid CPU number if you want to bind the kthread worker
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * to the given CPU and the associated NUMA node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * A good practice is to add the cpu number also into the worker name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * when the worker was SIGKILLed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) struct kthread_worker *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) kthread_create_worker_on_cpu(int cpu, unsigned int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) const char namefmt[], ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) struct kthread_worker *worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) va_list args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) va_start(args, namefmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) worker = __kthread_create_worker(cpu, flags, namefmt, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) va_end(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) EXPORT_SYMBOL(kthread_create_worker_on_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * Returns true when the work could not be queued at the moment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * It happens when it is already pending in a worker list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * or when it is being cancelled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) static inline bool queuing_blocked(struct kthread_worker *worker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) struct kthread_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) lockdep_assert_held(&worker->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) return !list_empty(&work->node) || work->canceling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) struct kthread_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) lockdep_assert_held(&worker->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) WARN_ON_ONCE(!list_empty(&work->node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /* Do not use a work with >1 worker, see kthread_queue_work() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) WARN_ON_ONCE(work->worker && work->worker != worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) /* insert @work before @pos in @worker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) static void kthread_insert_work(struct kthread_worker *worker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) struct kthread_work *work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) struct list_head *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) kthread_insert_work_sanity_check(worker, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) list_add_tail(&work->node, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) work->worker = worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (!worker->current_work && likely(worker->task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) wake_up_process(worker->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * kthread_queue_work - queue a kthread_work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * @worker: target kthread_worker
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * @work: kthread_work to queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * Queue @work to work processor @task for async execution. @task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * must have been created with kthread_worker_create(). Returns %true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * if @work was successfully queued, %false if it was already pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * Reinitialize the work if it needs to be used by another worker.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * For example, when the worker was stopped and started again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) bool kthread_queue_work(struct kthread_worker *worker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) struct kthread_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) raw_spin_lock_irqsave(&worker->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (!queuing_blocked(worker, work)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) kthread_insert_work(worker, work, &worker->work_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) raw_spin_unlock_irqrestore(&worker->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) EXPORT_SYMBOL_GPL(kthread_queue_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * kthread_delayed_work_timer_fn - callback that queues the associated kthread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * delayed work when the timer expires.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * @t: pointer to the expired timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * The format of the function is defined by struct timer_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * It should have been called from irqsafe timer with irq already off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) void kthread_delayed_work_timer_fn(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) struct kthread_work *work = &dwork->work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) struct kthread_worker *worker = work->worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * This might happen when a pending work is reinitialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * It means that it is used a wrong way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (WARN_ON_ONCE(!worker))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) raw_spin_lock_irqsave(&worker->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) /* Work must not be used with >1 worker, see kthread_queue_work(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) WARN_ON_ONCE(work->worker != worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) /* Move the work from worker->delayed_work_list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) WARN_ON_ONCE(list_empty(&work->node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) list_del_init(&work->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (!work->canceling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) kthread_insert_work(worker, work, &worker->work_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) raw_spin_unlock_irqrestore(&worker->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) static void __kthread_queue_delayed_work(struct kthread_worker *worker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) struct kthread_delayed_work *dwork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) unsigned long delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) struct timer_list *timer = &dwork->timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) struct kthread_work *work = &dwork->work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * With CFI, timer->function can point to a jump table entry in a module,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * which fails the comparison. Disable the warning if CFI and modules are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * both enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (!IS_ENABLED(CONFIG_CFI_CLANG) || !IS_ENABLED(CONFIG_MODULES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * If @delay is 0, queue @dwork->work immediately. This is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * both optimization and correctness. The earliest @timer can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * expire is on the closest next tick and delayed_work users depend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * on that there's no such delay when @delay is 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (!delay) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) kthread_insert_work(worker, work, &worker->work_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) /* Be paranoid and try to detect possible races already now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) kthread_insert_work_sanity_check(worker, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) list_add(&work->node, &worker->delayed_work_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) work->worker = worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) timer->expires = jiffies + delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) add_timer(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * kthread_queue_delayed_work - queue the associated kthread work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) * after a delay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * @worker: target kthread_worker
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * @dwork: kthread_delayed_work to queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * @delay: number of jiffies to wait before queuing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * If the work has not been pending it starts a timer that will queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * the work after the given @delay. If @delay is zero, it queues the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * work immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * Return: %false if the @work has already been pending. It means that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * either the timer was running or the work was queued. It returns %true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) bool kthread_queue_delayed_work(struct kthread_worker *worker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) struct kthread_delayed_work *dwork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) unsigned long delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) struct kthread_work *work = &dwork->work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) raw_spin_lock_irqsave(&worker->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (!queuing_blocked(worker, work)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) __kthread_queue_delayed_work(worker, dwork, delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) raw_spin_unlock_irqrestore(&worker->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) struct kthread_flush_work {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) struct kthread_work work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) struct completion done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) static void kthread_flush_work_fn(struct kthread_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) struct kthread_flush_work *fwork =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) container_of(work, struct kthread_flush_work, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) complete(&fwork->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * kthread_flush_work - flush a kthread_work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * @work: work to flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * If @work is queued or executing, wait for it to finish execution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) void kthread_flush_work(struct kthread_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) struct kthread_flush_work fwork = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) COMPLETION_INITIALIZER_ONSTACK(fwork.done),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) struct kthread_worker *worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) bool noop = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) worker = work->worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (!worker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) raw_spin_lock_irq(&worker->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) /* Work must not be used with >1 worker, see kthread_queue_work(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) WARN_ON_ONCE(work->worker != worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) if (!list_empty(&work->node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) kthread_insert_work(worker, &fwork.work, work->node.next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) else if (worker->current_work == work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) kthread_insert_work(worker, &fwork.work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) worker->work_list.next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) noop = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) raw_spin_unlock_irq(&worker->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) if (!noop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) wait_for_completion(&fwork.done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) EXPORT_SYMBOL_GPL(kthread_flush_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) * Make sure that the timer is neither set nor running and could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) * not manipulate the work list_head any longer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) * The function is called under worker->lock. The lock is temporary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) * released but the timer can't be set again in the meantime.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) unsigned long *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) struct kthread_delayed_work *dwork =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) container_of(work, struct kthread_delayed_work, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) struct kthread_worker *worker = work->worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) * del_timer_sync() must be called to make sure that the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) * callback is not running. The lock must be temporary released
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) * to avoid a deadlock with the callback. In the meantime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * any queuing is blocked by setting the canceling counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) work->canceling++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) raw_spin_unlock_irqrestore(&worker->lock, *flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) del_timer_sync(&dwork->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) raw_spin_lock_irqsave(&worker->lock, *flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) work->canceling--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * This function removes the work from the worker queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * It is called under worker->lock. The caller must make sure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) * the timer used by delayed work is not running, e.g. by calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * kthread_cancel_delayed_work_timer().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * The work might still be in use when this function finishes. See the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * current_work proceed by the worker.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * Return: %true if @work was pending and successfully canceled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) * %false if @work was not pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) static bool __kthread_cancel_work(struct kthread_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) * Try to remove the work from a worker list. It might either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) * be from worker->work_list or from worker->delayed_work_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (!list_empty(&work->node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) list_del_init(&work->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) * @worker: kthread worker to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) * @dwork: kthread delayed work to queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) * @delay: number of jiffies to wait before queuing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) * modify @dwork's timer so that it expires after @delay. If @delay is zero,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) * @work is guaranteed to be queued immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) * Return: %false if @dwork was idle and queued, %true otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * A special case is when the work is being canceled in parallel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * It might be caused either by the real kthread_cancel_delayed_work_sync()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * or yet another kthread_mod_delayed_work() call. We let the other command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * win and return %true here. The return value can be used for reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * counting and the number of queued works stays the same. Anyway, the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * is supposed to synchronize these operations a reasonable way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) * This function is safe to call from any context including IRQ handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) bool kthread_mod_delayed_work(struct kthread_worker *worker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) struct kthread_delayed_work *dwork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) unsigned long delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) struct kthread_work *work = &dwork->work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) raw_spin_lock_irqsave(&worker->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) /* Do not bother with canceling when never queued. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) if (!work->worker) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) goto fast_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) /* Work must not be used with >1 worker, see kthread_queue_work() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) WARN_ON_ONCE(work->worker != worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) * Temporary cancel the work but do not fight with another command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) * that is canceling the work as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) * It is a bit tricky because of possible races with another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) * mod_delayed_work() and cancel_delayed_work() callers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) * The timer must be canceled first because worker->lock is released
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) * when doing so. But the work can be removed from the queue (list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * only when it can be queued again so that the return value can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * be used for reference counting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) kthread_cancel_delayed_work_timer(work, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (work->canceling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) /* The number of works in the queue does not change. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) ret = __kthread_cancel_work(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) * Canceling could run in parallel from kthread_cancel_delayed_work_sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) * and change work's canceling count as the spinlock is released and regain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) * in __kthread_cancel_work so we need to check the count again. Otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) * we might incorrectly queue the dwork and further cause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) * cancel_delayed_work_sync thread waiting for flush dwork endlessly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if (work->canceling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) fast_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) __kthread_queue_delayed_work(worker, dwork, delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) raw_spin_unlock_irqrestore(&worker->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) struct kthread_worker *worker = work->worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) int ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if (!worker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) raw_spin_lock_irqsave(&worker->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) /* Work must not be used with >1 worker, see kthread_queue_work(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) WARN_ON_ONCE(work->worker != worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if (is_dwork)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) kthread_cancel_delayed_work_timer(work, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) ret = __kthread_cancel_work(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (worker->current_work != work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) goto out_fast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) * The work is in progress and we need to wait with the lock released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * In the meantime, block any queuing by setting the canceling counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) work->canceling++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) raw_spin_unlock_irqrestore(&worker->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) kthread_flush_work(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) raw_spin_lock_irqsave(&worker->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) work->canceling--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) out_fast:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) raw_spin_unlock_irqrestore(&worker->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) * @work: the kthread work to cancel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) * Cancel @work and wait for its execution to finish. This function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) * can be used even if the work re-queues itself. On return from this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) * function, @work is guaranteed to be not pending or executing on any CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) * kthread_cancel_work_sync(&delayed_work->work) must not be used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) * The caller must ensure that the worker on which @work was last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) * queued can't be destroyed before this function returns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) * Return: %true if @work was pending, %false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) bool kthread_cancel_work_sync(struct kthread_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) return __kthread_cancel_work_sync(work, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) * wait for it to finish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) * @dwork: the kthread delayed work to cancel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) * This is kthread_cancel_work_sync() for delayed works.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) * Return: %true if @dwork was pending, %false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) return __kthread_cancel_work_sync(&dwork->work, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) * kthread_flush_worker - flush all current works on a kthread_worker
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) * @worker: worker to flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) * Wait until all currently executing or pending works on @worker are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) * finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) void kthread_flush_worker(struct kthread_worker *worker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) struct kthread_flush_work fwork = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) COMPLETION_INITIALIZER_ONSTACK(fwork.done),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) kthread_queue_work(worker, &fwork.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) wait_for_completion(&fwork.done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) EXPORT_SYMBOL_GPL(kthread_flush_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) * kthread_destroy_worker - destroy a kthread worker
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) * @worker: worker to be destroyed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) * Flush and destroy @worker. The simple flush is enough because the kthread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) * worker API is used only in trivial scenarios. There are no multi-step state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) * machines needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) void kthread_destroy_worker(struct kthread_worker *worker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) task = worker->task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) if (WARN_ON(!task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) kthread_flush_worker(worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) kthread_stop(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) WARN_ON(!list_empty(&worker->work_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) kfree(worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) EXPORT_SYMBOL(kthread_destroy_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) * kthread_use_mm - make the calling kthread operate on an address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) * @mm: address space to operate on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) void kthread_use_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) struct mm_struct *active_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) struct task_struct *tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) WARN_ON_ONCE(tsk->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) task_lock(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) /* Hold off tlb flush IPIs while switching mm's */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) active_mm = tsk->active_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) if (active_mm != mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) mmgrab(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) tsk->active_mm = mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) tsk->mm = mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) switch_mm_irqs_off(active_mm, mm, tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) task_unlock(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) #ifdef finish_arch_post_lock_switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) finish_arch_post_lock_switch();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) if (active_mm != mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) mmdrop(active_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) to_kthread(tsk)->oldfs = force_uaccess_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) EXPORT_SYMBOL_GPL(kthread_use_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) * kthread_unuse_mm - reverse the effect of kthread_use_mm()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) * @mm: address space to operate on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) void kthread_unuse_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) struct task_struct *tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) WARN_ON_ONCE(!tsk->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) force_uaccess_end(to_kthread(tsk)->oldfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) task_lock(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) sync_mm_rss(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) tsk->mm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) /* active_mm is still 'mm' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) enter_lazy_tlb(mm, tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) task_unlock(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) EXPORT_SYMBOL_GPL(kthread_unuse_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) #ifdef CONFIG_BLK_CGROUP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) * kthread_associate_blkcg - associate blkcg to current kthread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) * @css: the cgroup info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) * Current thread must be a kthread. The thread is running jobs on behalf of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) * other threads. In some cases, we expect the jobs attach cgroup info of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) * original threads instead of that of current thread. This function stores
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) * original thread's cgroup info in current kthread context for later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) * retrieval.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) void kthread_associate_blkcg(struct cgroup_subsys_state *css)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) struct kthread *kthread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (!(current->flags & PF_KTHREAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) kthread = to_kthread(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) if (!kthread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (kthread->blkcg_css) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) css_put(kthread->blkcg_css);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) kthread->blkcg_css = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) if (css) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) css_get(css);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) kthread->blkcg_css = css;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) EXPORT_SYMBOL(kthread_associate_blkcg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) * kthread_blkcg - get associated blkcg css of current kthread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) * Current thread must be a kthread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) struct cgroup_subsys_state *kthread_blkcg(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) struct kthread *kthread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if (current->flags & PF_KTHREAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) kthread = to_kthread(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) if (kthread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) return kthread->blkcg_css;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) EXPORT_SYMBOL(kthread_blkcg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) #endif