^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* drivers/cpufreq/cpufreq_times.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2018 Google, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This software is licensed under the terms of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * License version 2, as published by the Free Software Foundation, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * may be copied, distributed, and modified under those terms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This program is distributed in the hope that it will be useful,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * but WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * GNU General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/cpufreq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/cpufreq_times.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/threads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <trace/hooks/cpufreq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * struct cpu_freqs - per-cpu frequency information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * @offset: start of these freqs' stats in task time_in_state array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * @max_state: number of entries in freq_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * @last_index: index in freq_table of last frequency switched to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * @freq_table: list of available frequencies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct cpu_freqs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) unsigned int max_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) unsigned int last_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) unsigned int freq_table[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static struct cpu_freqs *all_freqs[NR_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static unsigned int next_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) void cpufreq_task_times_init(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) spin_lock_irqsave(&task_time_in_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) p->time_in_state = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) spin_unlock_irqrestore(&task_time_in_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) p->max_state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) void cpufreq_task_times_alloc(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) void *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) unsigned int max_state = READ_ONCE(next_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /* We use one array to avoid multiple allocs per task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) temp = kcalloc(max_state, sizeof(p->time_in_state[0]), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (!temp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) spin_lock_irqsave(&task_time_in_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) p->time_in_state = temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) spin_unlock_irqrestore(&task_time_in_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) p->max_state = max_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /* Caller must hold task_time_in_state_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static int cpufreq_task_times_realloc_locked(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) void *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) unsigned int max_state = READ_ONCE(next_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) temp = krealloc(p->time_in_state, max_state * sizeof(u64), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (!temp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) p->time_in_state = temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) memset(p->time_in_state + p->max_state, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) (max_state - p->max_state) * sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) p->max_state = max_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) void cpufreq_task_times_exit(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) void *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (!p->time_in_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) spin_lock_irqsave(&task_time_in_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) temp = p->time_in_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) p->time_in_state = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) spin_unlock_irqrestore(&task_time_in_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) kfree(temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct pid *pid, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) unsigned int cpu, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) u64 cputime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct cpu_freqs *freqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct cpu_freqs *last_freqs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) spin_lock_irqsave(&task_time_in_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) freqs = all_freqs[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (!freqs || freqs == last_freqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) last_freqs = freqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) seq_printf(m, "cpu%u\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) for (i = 0; i < freqs->max_state; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) cputime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (freqs->offset + i < p->max_state &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) p->time_in_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) cputime = p->time_in_state[freqs->offset + i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) seq_printf(m, "%u %lu\n", freqs->freq_table[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) (unsigned long)nsec_to_clock_t(cputime));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) spin_unlock_irqrestore(&task_time_in_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) void cpufreq_acct_update_power(struct task_struct *p, u64 cputime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) unsigned int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct cpu_freqs *freqs = all_freqs[task_cpu(p)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (!freqs || is_idle_task(p) || p->flags & PF_EXITING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) state = freqs->offset + READ_ONCE(freqs->last_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) spin_lock_irqsave(&task_time_in_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if ((state < p->max_state || !cpufreq_task_times_realloc_locked(p)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) p->time_in_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) p->time_in_state[state] += cputime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) spin_unlock_irqrestore(&task_time_in_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) trace_android_vh_cpufreq_acct_update_power(cputime, p, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static int cpufreq_times_get_index(struct cpu_freqs *freqs, unsigned int freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) for (index = 0; index < freqs->max_state; ++index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (freqs->freq_table[index] == freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) void cpufreq_times_create_policy(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) int cpu, index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) unsigned int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct cpufreq_frequency_table *pos, *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct cpu_freqs *freqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) void *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (all_freqs[policy->cpu])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) table = policy->freq_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (!table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) cpufreq_for_each_valid_entry(pos, table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) tmp = kzalloc(sizeof(*freqs) + sizeof(freqs->freq_table[0]) * count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) freqs = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) freqs->max_state = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) cpufreq_for_each_valid_entry(pos, table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) freqs->freq_table[index++] = pos->frequency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) index = cpufreq_times_get_index(freqs, policy->cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (index >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) WRITE_ONCE(freqs->last_index, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) freqs->offset = next_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) WRITE_ONCE(next_offset, freqs->offset + count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) for_each_cpu(cpu, policy->related_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) all_freqs[cpu] = freqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) void cpufreq_times_record_transition(struct cpufreq_policy *policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) unsigned int new_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct cpu_freqs *freqs = all_freqs[policy->cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (!freqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) index = cpufreq_times_get_index(freqs, new_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (index >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) WRITE_ONCE(freqs->last_index, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }