Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Auto-group scheduling implementation:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/nospec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include "sched.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) static struct autogroup autogroup_default;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) static atomic_t autogroup_seq_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) void __init autogroup_init(struct task_struct *init_task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 	autogroup_default.tg = &root_task_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 	kref_init(&autogroup_default.kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 	init_rwsem(&autogroup_default.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 	init_task->signal->autogroup = &autogroup_default;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) void autogroup_free(struct task_group *tg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	kfree(tg->autogroup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) static inline void autogroup_destroy(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	struct autogroup *ag = container_of(kref, struct autogroup, kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #ifdef CONFIG_RT_GROUP_SCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	/* We've redirected RT tasks to the root task group... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	ag->tg->rt_se = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	ag->tg->rt_rq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	sched_offline_group(ag->tg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	sched_destroy_group(ag->tg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) static inline void autogroup_kref_put(struct autogroup *ag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	kref_put(&ag->kref, autogroup_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) static inline struct autogroup *autogroup_kref_get(struct autogroup *ag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	kref_get(&ag->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	return ag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) static inline struct autogroup *autogroup_task_get(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	struct autogroup *ag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	if (!lock_task_sighand(p, &flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		return autogroup_kref_get(&autogroup_default);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	ag = autogroup_kref_get(p->signal->autogroup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	unlock_task_sighand(p, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	return ag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) static inline struct autogroup *autogroup_create(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	struct task_group *tg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	if (!ag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 		goto out_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	tg = sched_create_group(&root_task_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	if (IS_ERR(tg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	kref_init(&ag->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	init_rwsem(&ag->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	ag->id = atomic_inc_return(&autogroup_seq_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	ag->tg = tg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) #ifdef CONFIG_RT_GROUP_SCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	 * Autogroup RT tasks are redirected to the root task group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	 * so we don't have to move tasks around upon policy change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	 * or flail around trying to allocate bandwidth on the fly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	 * A bandwidth exception in __sched_setscheduler() allows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	 * the policy change to proceed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	free_rt_sched_group(tg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	tg->rt_se = root_task_group.rt_se;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	tg->rt_rq = root_task_group.rt_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	tg->autogroup = ag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	sched_online_group(tg, &root_task_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	return ag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	kfree(ag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) out_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	if (printk_ratelimit()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		printk(KERN_WARNING "autogroup_create: %s failure.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 			ag ? "sched_create_group()" : "kzalloc()");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	return autogroup_kref_get(&autogroup_default);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	if (tg != &root_task_group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	 * If we race with autogroup_move_group() the caller can use the old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	 * value of signal->autogroup but in this case sched_move_task() will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	 * be called again before autogroup_kref_put().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	 * However, there is no way sched_autogroup_exit_task() could tell us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	 * to avoid autogroup->tg, so we abuse PF_EXITING flag for this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	if (p->flags & PF_EXITING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) void sched_autogroup_exit_task(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	 * We are going to call exit_notify() and autogroup_move_group() can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	 * see this thread after that: we can no longer use signal->autogroup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	 * See the PF_EXITING check in task_wants_autogroup().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	sched_move_task(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) autogroup_move_group(struct task_struct *p, struct autogroup *ag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	struct autogroup *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	struct task_struct *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	BUG_ON(!lock_task_sighand(p, &flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	prev = p->signal->autogroup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	if (prev == ag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		unlock_task_sighand(p, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	p->signal->autogroup = autogroup_kref_get(ag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	 * We can't avoid sched_move_task() after we changed signal->autogroup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	 * this process can already run with task_group() == prev->tg or we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	 * race with cgroup code which can read autogroup = prev under rq->lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	 * In the latter case for_each_thread() can not miss a migrating thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	 * cpu_cgroup_attach() must not be possible after cgroup_exit() and it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	 * can't be removed from thread list, we hold ->siglock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	 * If an exiting thread was already removed from thread list we rely on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	 * sched_autogroup_exit_task().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	for_each_thread(p, t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		sched_move_task(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	unlock_task_sighand(p, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	autogroup_kref_put(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* Allocates GFP_KERNEL, cannot be called under any spinlock: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) void sched_autogroup_create_attach(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	struct autogroup *ag = autogroup_create();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	autogroup_move_group(p, ag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	/* Drop extra reference added by autogroup_create(): */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	autogroup_kref_put(ag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) EXPORT_SYMBOL(sched_autogroup_create_attach);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* Cannot be called under siglock. Currently has no users: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) void sched_autogroup_detach(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	autogroup_move_group(p, &autogroup_default);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) EXPORT_SYMBOL(sched_autogroup_detach);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) void sched_autogroup_fork(struct signal_struct *sig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	sig->autogroup = autogroup_task_get(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) void sched_autogroup_exit(struct signal_struct *sig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	autogroup_kref_put(sig->autogroup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static int __init setup_autogroup(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	sysctl_sched_autogroup_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) __setup("noautogroup", setup_autogroup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	static unsigned long next = INITIAL_JIFFIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	struct autogroup *ag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	unsigned long shares;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	int err, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	if (nice < MIN_NICE || nice > MAX_NICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	err = security_task_setnice(current, nice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	if (nice < 0 && !can_nice(current, nice))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	/* This is a heavy operation, taking global locks.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	if (!capable(CAP_SYS_ADMIN) && time_before(jiffies, next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	next = HZ / 10 + jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	ag = autogroup_task_get(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	idx = array_index_nospec(nice + 20, 40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	shares = scale_load(sched_prio_to_weight[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	down_write(&ag->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	err = sched_group_set_shares(ag->tg, shares);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		ag->nice = nice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	up_write(&ag->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	autogroup_kref_put(ag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	struct autogroup *ag = autogroup_task_get(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	if (!task_group_is_autogroup(ag->tg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	down_read(&ag->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	up_read(&ag->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	autogroup_kref_put(ag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) #endif /* CONFIG_PROC_FS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) int autogroup_path(struct task_group *tg, char *buf, int buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	if (!task_group_is_autogroup(tg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }