^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * padata.c - generic interface to process data streams in parallel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * See Documentation/core-api/padata.rst for more information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2008, 2009 secunet Security Networks AG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (c) 2020 Oracle and/or its affiliates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Author: Daniel Jordan <daniel.m.jordan@oracle.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * This program is free software; you can redistribute it and/or modify it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * under the terms and conditions of the GNU General Public License,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * version 2, as published by the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * This program is distributed in the hope it will be useful, but WITHOUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * You should have received a copy of the GNU General Public License along with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * this program; if not, write to the Free Software Foundation, Inc.,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/padata.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/sysfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define PADATA_WORK_ONSTACK 1 /* Work's memory is on stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct padata_work {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct work_struct pw_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct list_head pw_list; /* padata_free_works linkage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) void *pw_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static DEFINE_SPINLOCK(padata_works_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static struct padata_work *padata_works;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static LIST_HEAD(padata_free_works);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct padata_mt_job_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct completion completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct padata_mt_job *job;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) int nworks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) int nworks_fini;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) unsigned long chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static void padata_free_pd(struct parallel_data *pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static void __init padata_mt_helper(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) int cpu, target_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) target_cpu = cpumask_first(pd->cpumask.pcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) for (cpu = 0; cpu < cpu_index; cpu++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return target_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * Hash the sequence numbers to the cpus by taking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * seq_nr mod. number of cpus in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return padata_index_to_cpu(pd, cpu_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static struct padata_work *padata_work_alloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct padata_work *pw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) lockdep_assert_held(&padata_works_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (list_empty(&padata_free_works))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return NULL; /* No more work items allowed to be queued. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) pw = list_first_entry(&padata_free_works, struct padata_work, pw_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) list_del(&pw->pw_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return pw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static void padata_work_init(struct padata_work *pw, work_func_t work_fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) void *data, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (flags & PADATA_WORK_ONSTACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) INIT_WORK_ONSTACK(&pw->pw_work, work_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) INIT_WORK(&pw->pw_work, work_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) pw->pw_data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static int __init padata_work_alloc_mt(int nworks, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) spin_lock(&padata_works_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* Start at 1 because the current task participates in the job. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) for (i = 1; i < nworks; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct padata_work *pw = padata_work_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (!pw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) padata_work_init(pw, padata_mt_helper, data, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) list_add(&pw->pw_list, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) spin_unlock(&padata_works_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static void padata_work_free(struct padata_work *pw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) lockdep_assert_held(&padata_works_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) list_add(&pw->pw_list, &padata_free_works);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static void __init padata_works_free(struct list_head *works)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct padata_work *cur, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (list_empty(works))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) spin_lock(&padata_works_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) list_for_each_entry_safe(cur, next, works, pw_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) list_del(&cur->pw_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) padata_work_free(cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) spin_unlock(&padata_works_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static void padata_parallel_worker(struct work_struct *parallel_work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct padata_work *pw = container_of(parallel_work, struct padata_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) pw_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct padata_priv *padata = pw->pw_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) padata->parallel(padata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) spin_lock(&padata_works_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) padata_work_free(pw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) spin_unlock(&padata_works_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * padata_do_parallel - padata parallelization function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * @ps: padatashell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * @padata: object to be parallelized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * @cb_cpu: pointer to the CPU that the serialization callback function should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * run on. If it's not in the serial cpumask of @pinst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * (i.e. cpumask.cbcpu), this function selects a fallback CPU and if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * none found, returns -EINVAL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * The parallelization callback function will run with BHs off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * Note: Every object which is parallelized by padata_do_parallel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * must be seen by padata_do_serial.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * Return: 0 on success or else negative error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) int padata_do_parallel(struct padata_shell *ps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct padata_priv *padata, int *cb_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct padata_instance *pinst = ps->pinst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) int i, cpu, cpu_index, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct parallel_data *pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct padata_work *pw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) rcu_read_lock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) pd = rcu_dereference_bh(ps->pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (!cpumask_weight(pd->cpumask.cbcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /* Select an alternate fallback CPU and notify the caller. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) cpu = cpumask_first(pd->cpumask.cbcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) for (i = 0; i < cpu_index; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) cpu = cpumask_next(cpu, pd->cpumask.cbcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) *cb_cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if ((pinst->flags & PADATA_RESET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) atomic_inc(&pd->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) padata->pd = pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) padata->cb_cpu = *cb_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) spin_lock(&padata_works_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) padata->seq_nr = ++pd->seq_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) pw = padata_work_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) spin_unlock(&padata_works_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) rcu_read_unlock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (pw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) padata_work_init(pw, padata_parallel_worker, padata, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) queue_work(pinst->parallel_wq, &pw->pw_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /* Maximum works limit exceeded, run in the current task. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) padata->parallel(padata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) rcu_read_unlock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) EXPORT_SYMBOL(padata_do_parallel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * padata_find_next - Find the next object that needs serialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * * A pointer to the control struct of the next object that needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * serialization, if present in one of the percpu reorder queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * * NULL, if the next object that needs serialization will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * be parallel processed by another cpu and is not yet present in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * the cpu's reorder queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) static struct padata_priv *padata_find_next(struct parallel_data *pd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) bool remove_object)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct padata_priv *padata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct padata_list *reorder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) int cpu = pd->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) reorder = per_cpu_ptr(pd->reorder_list, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) spin_lock(&reorder->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (list_empty(&reorder->list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) spin_unlock(&reorder->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) padata = list_entry(reorder->list.next, struct padata_priv, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * Checks the rare case where two or more parallel jobs have hashed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * the same CPU and one of the later ones finishes first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (padata->seq_nr != pd->processed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) spin_unlock(&reorder->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (remove_object) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) list_del_init(&padata->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) ++pd->processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) spin_unlock(&reorder->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return padata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static void padata_reorder(struct parallel_data *pd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct padata_instance *pinst = pd->ps->pinst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) int cb_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct padata_priv *padata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct padata_serial_queue *squeue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct padata_list *reorder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * We need to ensure that only one cpu can work on dequeueing of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * the reorder queue the time. Calculating in which percpu reorder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * queue the next object will arrive takes some time. A spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * would be highly contended. Also it is not clear in which order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * the objects arrive to the reorder queues. So a cpu could wait to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * get the lock just to notice that there is nothing to do at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * moment. Therefore we use a trylock and let the holder of the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * care for all the objects enqueued during the holdtime of the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (!spin_trylock_bh(&pd->lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) padata = padata_find_next(pd, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * If the next object that needs serialization is parallel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * processed by another cpu and is still on it's way to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * cpu's reorder queue, nothing to do for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (!padata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) cb_cpu = padata->cb_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) squeue = per_cpu_ptr(pd->squeue, cb_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) spin_lock(&squeue->serial.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) list_add_tail(&padata->list, &squeue->serial.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) spin_unlock(&squeue->serial.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) spin_unlock_bh(&pd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * The next object that needs serialization might have arrived to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * the reorder queues in the meantime.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * Ensure reorder queue is read after pd->lock is dropped so we see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * new objects from another task in padata_do_serial. Pairs with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * smp_mb in padata_do_serial.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) reorder = per_cpu_ptr(pd->reorder_list, pd->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (!list_empty(&reorder->list) && padata_find_next(pd, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) queue_work(pinst->serial_wq, &pd->reorder_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) static void invoke_padata_reorder(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct parallel_data *pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) pd = container_of(work, struct parallel_data, reorder_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) padata_reorder(pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static void padata_serial_worker(struct work_struct *serial_work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct padata_serial_queue *squeue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct parallel_data *pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) LIST_HEAD(local_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) squeue = container_of(serial_work, struct padata_serial_queue, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) pd = squeue->pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) spin_lock(&squeue->serial.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) list_replace_init(&squeue->serial.list, &local_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) spin_unlock(&squeue->serial.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) while (!list_empty(&local_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct padata_priv *padata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) padata = list_entry(local_list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct padata_priv, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) list_del_init(&padata->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) padata->serial(padata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (atomic_sub_and_test(cnt, &pd->refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) padata_free_pd(pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * padata_do_serial - padata serialization function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * @padata: object to be serialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * padata_do_serial must be called for every parallelized object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * The serialization callback function will run with BHs off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) void padata_do_serial(struct padata_priv *padata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) struct parallel_data *pd = padata->pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) struct padata_priv *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) spin_lock(&reorder->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /* Sort in ascending order of sequence number. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) list_for_each_entry_reverse(cur, &reorder->list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (cur->seq_nr < padata->seq_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) list_add(&padata->list, &cur->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) spin_unlock(&reorder->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * Ensure the addition to the reorder list is ordered correctly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * in padata_reorder.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) padata_reorder(pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) EXPORT_SYMBOL(padata_do_serial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static int padata_setup_cpumasks(struct padata_instance *pinst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct workqueue_attrs *attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) attrs = alloc_workqueue_attrs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (!attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /* Restrict parallel_wq workers to pd->cpumask.pcpu. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) err = apply_workqueue_attrs(pinst->parallel_wq, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) free_workqueue_attrs(attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static void __init padata_mt_helper(struct work_struct *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct padata_work *pw = container_of(w, struct padata_work, pw_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct padata_mt_job_state *ps = pw->pw_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) struct padata_mt_job *job = ps->job;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) bool done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) spin_lock(&ps->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) while (job->size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) unsigned long start, size, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) start = job->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) /* So end is chunk size aligned if enough work remains. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) size = roundup(start + 1, ps->chunk_size) - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) size = min(size, job->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) end = start + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) job->start = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) job->size -= size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) spin_unlock(&ps->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) job->thread_fn(start, end, job->fn_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) spin_lock(&ps->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) ++ps->nworks_fini;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) done = (ps->nworks_fini == ps->nworks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) spin_unlock(&ps->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) complete(&ps->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * padata_do_multithreaded - run a multithreaded job
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * @job: Description of the job.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * See the definition of struct padata_mt_job for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) void __init padata_do_multithreaded(struct padata_mt_job *job)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /* In case threads finish at different times. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) static const unsigned long load_balance_factor = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) struct padata_work my_work, *pw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) struct padata_mt_job_state ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) LIST_HEAD(works);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) int nworks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (job->size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /* Ensure at least one thread when size < min_chunk. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) nworks = max(job->size / job->min_chunk, 1ul);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) nworks = min(nworks, job->max_threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (nworks == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /* Single thread, no coordination needed, cut to the chase. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) job->thread_fn(job->start, job->start + job->size, job->fn_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) spin_lock_init(&ps.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) init_completion(&ps.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) ps.job = job;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) ps.nworks = padata_work_alloc_mt(nworks, &ps, &works);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) ps.nworks_fini = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * Chunk size is the amount of work a helper does per call to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * thread function. Load balance large jobs between threads by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * increasing the number of chunks, guarantee at least the minimum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * chunk size from the caller, and honor the caller's alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) ps.chunk_size = job->size / (ps.nworks * load_balance_factor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) ps.chunk_size = max(ps.chunk_size, job->min_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) ps.chunk_size = roundup(ps.chunk_size, job->align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) list_for_each_entry(pw, &works, pw_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) queue_work(system_unbound_wq, &pw->pw_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /* Use the current thread, which saves starting a workqueue worker. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) padata_work_init(&my_work, padata_mt_helper, &ps, PADATA_WORK_ONSTACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) padata_mt_helper(&my_work.pw_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) /* Wait for all the helpers to finish. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) wait_for_completion(&ps.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) destroy_work_on_stack(&my_work.pw_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) padata_works_free(&works);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static void __padata_list_init(struct padata_list *pd_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) INIT_LIST_HEAD(&pd_list->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) spin_lock_init(&pd_list->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) /* Initialize all percpu queues used by serial workers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) static void padata_init_squeues(struct parallel_data *pd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct padata_serial_queue *squeue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) for_each_cpu(cpu, pd->cpumask.cbcpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) squeue = per_cpu_ptr(pd->squeue, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) squeue->pd = pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) __padata_list_init(&squeue->serial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) INIT_WORK(&squeue->work, padata_serial_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) /* Initialize per-CPU reorder lists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) static void padata_init_reorder_list(struct parallel_data *pd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct padata_list *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) for_each_cpu(cpu, pd->cpumask.pcpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) list = per_cpu_ptr(pd->reorder_list, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) __padata_list_init(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /* Allocate and initialize the internal cpumask dependend resources. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) struct padata_instance *pinst = ps->pinst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) struct parallel_data *pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (!pd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) pd->reorder_list = alloc_percpu(struct padata_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (!pd->reorder_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) goto err_free_pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) pd->squeue = alloc_percpu(struct padata_serial_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (!pd->squeue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) goto err_free_reorder_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) pd->ps = ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) goto err_free_squeue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) goto err_free_pcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) padata_init_reorder_list(pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) padata_init_squeues(pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) pd->seq_nr = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) atomic_set(&pd->refcnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) spin_lock_init(&pd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) pd->cpu = cpumask_first(pd->cpumask.pcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) return pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) err_free_pcpu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) free_cpumask_var(pd->cpumask.pcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) err_free_squeue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) free_percpu(pd->squeue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) err_free_reorder_list:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) free_percpu(pd->reorder_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) err_free_pd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) kfree(pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static void padata_free_pd(struct parallel_data *pd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) free_cpumask_var(pd->cpumask.pcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) free_cpumask_var(pd->cpumask.cbcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) free_percpu(pd->reorder_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) free_percpu(pd->squeue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) kfree(pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) static void __padata_start(struct padata_instance *pinst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) pinst->flags |= PADATA_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) static void __padata_stop(struct padata_instance *pinst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (!(pinst->flags & PADATA_INIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) pinst->flags &= ~PADATA_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) /* Replace the internal control structure with a new one. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) static int padata_replace_one(struct padata_shell *ps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) struct parallel_data *pd_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) pd_new = padata_alloc_pd(ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (!pd_new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) ps->opd = rcu_dereference_protected(ps->pd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) rcu_assign_pointer(ps->pd, pd_new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) static int padata_replace(struct padata_instance *pinst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) struct padata_shell *ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) pinst->flags |= PADATA_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) list_for_each_entry(ps, &pinst->pslist, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) err = padata_replace_one(ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (atomic_dec_and_test(&ps->opd->refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) padata_free_pd(ps->opd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) pinst->flags &= ~PADATA_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /* If cpumask contains no active cpu, we mark the instance as invalid. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) static bool padata_validate_cpumask(struct padata_instance *pinst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) const struct cpumask *cpumask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (!cpumask_intersects(cpumask, cpu_online_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) pinst->flags |= PADATA_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) pinst->flags &= ~PADATA_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) static int __padata_set_cpumasks(struct padata_instance *pinst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) cpumask_var_t pcpumask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) cpumask_var_t cbcpumask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) int valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) valid = padata_validate_cpumask(pinst, pcpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (!valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) __padata_stop(pinst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) goto out_replace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) valid = padata_validate_cpumask(pinst, cbcpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (!valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) __padata_stop(pinst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) out_replace:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) cpumask_copy(pinst->cpumask.pcpu, pcpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) __padata_start(pinst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * padata_set_cpumask - Sets specified by @cpumask_type cpumask to the value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * equivalent to @cpumask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * @pinst: padata instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * to parallel and serial cpumasks respectively.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * @cpumask: the cpumask to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * Return: 0 on success or negative error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) cpumask_var_t cpumask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) struct cpumask *serial_mask, *parallel_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) get_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) mutex_lock(&pinst->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) switch (cpumask_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) case PADATA_CPU_PARALLEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) serial_mask = pinst->cpumask.cbcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) parallel_mask = cpumask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) case PADATA_CPU_SERIAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) parallel_mask = pinst->cpumask.pcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) serial_mask = cpumask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) mutex_unlock(&pinst->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) EXPORT_SYMBOL(padata_set_cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) if (cpumask_test_cpu(cpu, cpu_online_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) err = padata_replace(pinst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) __padata_start(pinst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (!cpumask_test_cpu(cpu, cpu_online_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) __padata_stop(pinst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) err = padata_replace(pinst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) struct padata_instance *pinst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (!pinst_has_cpu(pinst, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) mutex_lock(&pinst->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) ret = __padata_add_cpu(pinst, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) mutex_unlock(&pinst->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) struct padata_instance *pinst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (!pinst_has_cpu(pinst, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) mutex_lock(&pinst->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) ret = __padata_remove_cpu(pinst, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) mutex_unlock(&pinst->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) static enum cpuhp_state hp_online;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) static void __padata_free(struct padata_instance *pinst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) &pinst->cpu_dead_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) WARN_ON(!list_empty(&pinst->pslist));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) free_cpumask_var(pinst->cpumask.pcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) free_cpumask_var(pinst->cpumask.cbcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) destroy_workqueue(pinst->serial_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) destroy_workqueue(pinst->parallel_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) kfree(pinst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) #define kobj2pinst(_kobj) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) container_of(_kobj, struct padata_instance, kobj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) #define attr2pentry(_attr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) container_of(_attr, struct padata_sysfs_entry, attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) static void padata_sysfs_release(struct kobject *kobj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) struct padata_instance *pinst = kobj2pinst(kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) __padata_free(pinst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) struct padata_sysfs_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) struct attribute attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) ssize_t (*store)(struct padata_instance *, struct attribute *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) const char *, size_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) static ssize_t show_cpumask(struct padata_instance *pinst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) struct attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) struct cpumask *cpumask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) ssize_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) mutex_lock(&pinst->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (!strcmp(attr->name, "serial_cpumask"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) cpumask = pinst->cpumask.cbcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) cpumask = pinst->cpumask.pcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) len = snprintf(buf, PAGE_SIZE, "%*pb\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) nr_cpu_ids, cpumask_bits(cpumask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) mutex_unlock(&pinst->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return len < PAGE_SIZE ? len : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) static ssize_t store_cpumask(struct padata_instance *pinst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) struct attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) cpumask_var_t new_cpumask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) int mask_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) nr_cpumask_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) mask_type = !strcmp(attr->name, "serial_cpumask") ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) ret = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) free_cpumask_var(new_cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) #define PADATA_ATTR_RW(_name, _show_name, _store_name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) static struct padata_sysfs_entry _name##_attr = \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) __ATTR(_name, 0644, _show_name, _store_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) #define PADATA_ATTR_RO(_name, _show_name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) static struct padata_sysfs_entry _name##_attr = \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) __ATTR(_name, 0400, _show_name, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * Padata sysfs provides the following objects:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * serial_cpumask [RW] - cpumask for serial workers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * parallel_cpumask [RW] - cpumask for parallel workers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) static struct attribute *padata_default_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) &serial_cpumask_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) ¶llel_cpumask_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) ATTRIBUTE_GROUPS(padata_default);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) static ssize_t padata_sysfs_show(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) struct attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) struct padata_instance *pinst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) struct padata_sysfs_entry *pentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) ssize_t ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) pinst = kobj2pinst(kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) pentry = attr2pentry(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (pentry->show)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) ret = pentry->show(pinst, attr, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) struct padata_instance *pinst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) struct padata_sysfs_entry *pentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) ssize_t ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) pinst = kobj2pinst(kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) pentry = attr2pentry(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (pentry->show)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) ret = pentry->store(pinst, attr, buf, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) static const struct sysfs_ops padata_sysfs_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) .show = padata_sysfs_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) .store = padata_sysfs_store,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) static struct kobj_type padata_attr_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) .sysfs_ops = &padata_sysfs_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) .default_groups = padata_default_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) .release = padata_sysfs_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * padata_alloc - allocate and initialize a padata instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * @name: used to identify the instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * Return: new instance on success, NULL on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) struct padata_instance *padata_alloc(const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) struct padata_instance *pinst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (!pinst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_UNBOUND, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (!pinst->parallel_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) goto err_free_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) get_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) WQ_CPU_INTENSIVE, 1, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) if (!pinst->serial_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) goto err_put_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) goto err_free_serial_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) free_cpumask_var(pinst->cpumask.pcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) goto err_free_serial_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) INIT_LIST_HEAD(&pinst->pslist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) cpumask_copy(pinst->cpumask.pcpu, cpu_possible_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) cpumask_copy(pinst->cpumask.cbcpu, cpu_possible_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (padata_setup_cpumasks(pinst))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) goto err_free_masks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) __padata_start(pinst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) kobject_init(&pinst->kobj, &padata_attr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) mutex_init(&pinst->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) cpuhp_state_add_instance_nocalls_cpuslocked(hp_online,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) &pinst->cpu_online_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) &pinst->cpu_dead_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) return pinst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) err_free_masks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) free_cpumask_var(pinst->cpumask.pcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) free_cpumask_var(pinst->cpumask.cbcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) err_free_serial_wq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) destroy_workqueue(pinst->serial_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) err_put_cpus:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) destroy_workqueue(pinst->parallel_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) err_free_inst:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) kfree(pinst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) EXPORT_SYMBOL(padata_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * padata_free - free a padata instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) * @pinst: padata instance to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) void padata_free(struct padata_instance *pinst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) kobject_put(&pinst->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) EXPORT_SYMBOL(padata_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) * padata_alloc_shell - Allocate and initialize padata shell.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) * @pinst: Parent padata_instance object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * Return: new shell on success, NULL on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) struct padata_shell *padata_alloc_shell(struct padata_instance *pinst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) struct parallel_data *pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) struct padata_shell *ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) ps = kzalloc(sizeof(*ps), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) if (!ps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) ps->pinst = pinst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) get_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) pd = padata_alloc_pd(ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (!pd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) goto out_free_ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) mutex_lock(&pinst->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) RCU_INIT_POINTER(ps->pd, pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) list_add(&ps->list, &pinst->pslist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) mutex_unlock(&pinst->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) return ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) out_free_ps:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) kfree(ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) EXPORT_SYMBOL(padata_alloc_shell);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) * padata_free_shell - free a padata shell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) * @ps: padata shell to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) void padata_free_shell(struct padata_shell *ps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (!ps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) mutex_lock(&ps->pinst->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) list_del(&ps->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) padata_free_pd(rcu_dereference_protected(ps->pd, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) mutex_unlock(&ps->pinst->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) kfree(ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) EXPORT_SYMBOL(padata_free_shell);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) void __init padata_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) unsigned int i, possible_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) padata_cpu_online, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) hp_online = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) NULL, padata_cpu_dead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) goto remove_online_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) possible_cpus = num_possible_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) padata_works = kmalloc_array(possible_cpus, sizeof(struct padata_work),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) if (!padata_works)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) goto remove_dead_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) for (i = 0; i < possible_cpus; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) list_add(&padata_works[i].pw_list, &padata_free_works);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) remove_dead_state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) cpuhp_remove_multi_state(CPUHP_PADATA_DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) remove_online_state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) cpuhp_remove_multi_state(hp_online);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) pr_warn("padata: initialization failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }