^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) //SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/cgroup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "cgroup-internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <trace/events/cgroup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Propagate the cgroup frozen state upwards by the cgroup tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) static void cgroup_propagate_frozen(struct cgroup *cgrp, bool frozen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) int desc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * If the new state is frozen, some freezing ancestor cgroups may change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * their state too, depending on if all their descendants are frozen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Otherwise, all ancestor cgroups are forced into the non-frozen state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) while ((cgrp = cgroup_parent(cgrp))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) if (frozen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) cgrp->freezer.nr_frozen_descendants += desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) if (!test_bit(CGRP_FROZEN, &cgrp->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) test_bit(CGRP_FREEZE, &cgrp->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) cgrp->freezer.nr_frozen_descendants ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) cgrp->nr_descendants) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) set_bit(CGRP_FROZEN, &cgrp->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) cgroup_file_notify(&cgrp->events_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) TRACE_CGROUP_PATH(notify_frozen, cgrp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) desc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) cgrp->freezer.nr_frozen_descendants -= desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) if (test_bit(CGRP_FROZEN, &cgrp->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) clear_bit(CGRP_FROZEN, &cgrp->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) cgroup_file_notify(&cgrp->events_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) TRACE_CGROUP_PATH(notify_frozen, cgrp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) desc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * Revisit the cgroup frozen state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * Checks if the cgroup is really frozen and perform all state transitions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) void cgroup_update_frozen(struct cgroup *cgrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) bool frozen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) lockdep_assert_held(&css_set_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * If the cgroup has to be frozen (CGRP_FREEZE bit set),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * and all tasks are frozen and/or stopped, let's consider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * the cgroup frozen. Otherwise it's not frozen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) frozen = test_bit(CGRP_FREEZE, &cgrp->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) cgrp->freezer.nr_frozen_tasks == __cgroup_task_count(cgrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (frozen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* Already there? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (test_bit(CGRP_FROZEN, &cgrp->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) set_bit(CGRP_FROZEN, &cgrp->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /* Already there? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (!test_bit(CGRP_FROZEN, &cgrp->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) clear_bit(CGRP_FROZEN, &cgrp->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) cgroup_file_notify(&cgrp->events_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) TRACE_CGROUP_PATH(notify_frozen, cgrp, frozen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /* Update the state of ancestor cgroups. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) cgroup_propagate_frozen(cgrp, frozen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * Increment cgroup's nr_frozen_tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static void cgroup_inc_frozen_cnt(struct cgroup *cgrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) cgrp->freezer.nr_frozen_tasks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * Decrement cgroup's nr_frozen_tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static void cgroup_dec_frozen_cnt(struct cgroup *cgrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) cgrp->freezer.nr_frozen_tasks--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) WARN_ON_ONCE(cgrp->freezer.nr_frozen_tasks < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * Enter frozen/stopped state, if not yet there. Update cgroup's counters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * and revisit the state of the cgroup, if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) void cgroup_enter_frozen(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct cgroup *cgrp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (current->frozen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) spin_lock_irq(&css_set_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) current->frozen = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) cgrp = task_dfl_cgroup(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) cgroup_inc_frozen_cnt(cgrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) cgroup_update_frozen(cgrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) spin_unlock_irq(&css_set_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * Conditionally leave frozen/stopped state. Update cgroup's counters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * and revisit the state of the cgroup, if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * If always_leave is not set, and the cgroup is freezing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * we're racing with the cgroup freezing. In this case, we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * drop the frozen counter to avoid a transient switch to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * the unfrozen state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) void cgroup_leave_frozen(bool always_leave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct cgroup *cgrp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) spin_lock_irq(&css_set_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) cgrp = task_dfl_cgroup(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (always_leave || !test_bit(CGRP_FREEZE, &cgrp->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) cgroup_dec_frozen_cnt(cgrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) cgroup_update_frozen(cgrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) WARN_ON_ONCE(!current->frozen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) current->frozen = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) } else if (!(current->jobctl & JOBCTL_TRAP_FREEZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) spin_lock(¤t->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) current->jobctl |= JOBCTL_TRAP_FREEZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) set_thread_flag(TIF_SIGPENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) spin_unlock(¤t->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) spin_unlock_irq(&css_set_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * Freeze or unfreeze the task by setting or clearing the JOBCTL_TRAP_FREEZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * jobctl bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static void cgroup_freeze_task(struct task_struct *task, bool freeze)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /* If the task is about to die, don't bother with freezing it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (!lock_task_sighand(task, &flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (freeze) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) task->jobctl |= JOBCTL_TRAP_FREEZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) signal_wake_up(task, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) task->jobctl &= ~JOBCTL_TRAP_FREEZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) wake_up_process(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) unlock_task_sighand(task, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * Freeze or unfreeze all tasks in the given cgroup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static void cgroup_do_freeze(struct cgroup *cgrp, bool freeze)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct css_task_iter it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) lockdep_assert_held(&cgroup_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) spin_lock_irq(&css_set_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (freeze)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) set_bit(CGRP_FREEZE, &cgrp->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) clear_bit(CGRP_FREEZE, &cgrp->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) spin_unlock_irq(&css_set_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (freeze)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) TRACE_CGROUP_PATH(freeze, cgrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) TRACE_CGROUP_PATH(unfreeze, cgrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) css_task_iter_start(&cgrp->self, 0, &it);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) while ((task = css_task_iter_next(&it))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * Ignore kernel threads here. Freezing cgroups containing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * kthreads isn't supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (task->flags & PF_KTHREAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) cgroup_freeze_task(task, freeze);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) css_task_iter_end(&it);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * Cgroup state should be revisited here to cover empty leaf cgroups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * and cgroups which descendants are already in the desired state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) spin_lock_irq(&css_set_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (cgrp->nr_descendants == cgrp->freezer.nr_frozen_descendants)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) cgroup_update_frozen(cgrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) spin_unlock_irq(&css_set_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * Adjust the task state (freeze or unfreeze) and revisit the state of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * source and destination cgroups.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) void cgroup_freezer_migrate_task(struct task_struct *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct cgroup *src, struct cgroup *dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) lockdep_assert_held(&css_set_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * Kernel threads are not supposed to be frozen at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (task->flags & PF_KTHREAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * It's not necessary to do changes if both of the src and dst cgroups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * are not freezing and task is not frozen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (!test_bit(CGRP_FREEZE, &src->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) !test_bit(CGRP_FREEZE, &dst->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) !task->frozen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * Adjust counters of freezing and frozen tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * Note, that if the task is frozen, but the destination cgroup is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * frozen, we bump both counters to keep them balanced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (task->frozen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) cgroup_inc_frozen_cnt(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) cgroup_dec_frozen_cnt(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) cgroup_update_frozen(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) cgroup_update_frozen(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * Force the task to the desired state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) cgroup_freeze_task(task, test_bit(CGRP_FREEZE, &dst->flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) void cgroup_freeze(struct cgroup *cgrp, bool freeze)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct cgroup_subsys_state *css;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct cgroup *dsct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) bool applied = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) lockdep_assert_held(&cgroup_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * Nothing changed? Just exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (cgrp->freezer.freeze == freeze)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) cgrp->freezer.freeze = freeze;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * Propagate changes downwards the cgroup tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) css_for_each_descendant_pre(css, &cgrp->self) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) dsct = css->cgroup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (cgroup_is_dead(dsct))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (freeze) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) dsct->freezer.e_freeze++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * Already frozen because of ancestor's settings?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (dsct->freezer.e_freeze > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) dsct->freezer.e_freeze--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * Still frozen because of ancestor's settings?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (dsct->freezer.e_freeze > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) WARN_ON_ONCE(dsct->freezer.e_freeze < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * Do change actual state: freeze or unfreeze.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) cgroup_do_freeze(dsct, freeze);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) applied = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * Even if the actual state hasn't changed, let's notify a user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * The state can be enforced by an ancestor cgroup: the cgroup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * can already be in the desired state or it can be locked in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * opposite state, so that the transition will never happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * In both cases it's better to notify a user, that there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * nothing to wait for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (!applied) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) TRACE_CGROUP_PATH(notify_frozen, cgrp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) test_bit(CGRP_FROZEN, &cgrp->flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) cgroup_file_notify(&cgrp->events_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }