Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * kernel/sched/debug.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Print the CFS rbtree and other debugging details
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include "sched.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * This allows printing both to /proc/sched_debug and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * to the console
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #define SEQ_printf(m, x...)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  do {						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) 	if (m)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) 		seq_printf(m, x);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) 	else					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) 		pr_cont(x);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  * Ease the printing of nsec fields:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) static long long nsec_high(unsigned long long nsec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 	if ((long long)nsec < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 		nsec = -nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 		do_div(nsec, 1000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 		return -nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 	do_div(nsec, 1000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 	return nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) static unsigned long nsec_low(unsigned long long nsec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	if ((long long)nsec < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 		nsec = -nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	return do_div(nsec, 1000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define SCHED_FEAT(name, enabled)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	#name ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) const char * const sched_feat_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include "features.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) EXPORT_SYMBOL_GPL(sched_feat_names);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #undef SCHED_FEAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) static int sched_feat_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	for (i = 0; i < __SCHED_FEAT_NR; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 		if (!(sysctl_sched_features & (1UL << i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 			seq_puts(m, "NO_");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 		seq_printf(m, "%s ", sched_feat_names[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	seq_puts(m, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #ifdef CONFIG_JUMP_LABEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #define jump_label_key__true  STATIC_KEY_INIT_TRUE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #define jump_label_key__false STATIC_KEY_INIT_FALSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #define SCHED_FEAT(name, enabled)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	jump_label_key__##enabled ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #include "features.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) EXPORT_SYMBOL_GPL(sched_feat_keys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) #undef SCHED_FEAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) static void sched_feat_disable(int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	static_key_disable_cpuslocked(&sched_feat_keys[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) static void sched_feat_enable(int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	static_key_enable_cpuslocked(&sched_feat_keys[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) static void sched_feat_disable(int i) { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) static void sched_feat_enable(int i) { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) #endif /* CONFIG_JUMP_LABEL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) static int sched_feat_set(char *cmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	int neg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	if (strncmp(cmp, "NO_", 3) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 		neg = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 		cmp += 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	if (i < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	if (neg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 		sysctl_sched_features &= ~(1UL << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		sched_feat_disable(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		sysctl_sched_features |= (1UL << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 		sched_feat_enable(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) sched_feat_write(struct file *filp, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 		size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	char buf[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	char *cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	if (cnt > 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		cnt = 63;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	if (copy_from_user(&buf, ubuf, cnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	buf[cnt] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	cmp = strstrip(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	/* Ensure the static_key remains in a consistent state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	cpus_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	ret = sched_feat_set(cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	cpus_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	*ppos += cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) static int sched_feat_open(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	return single_open(filp, sched_feat_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) static const struct file_operations sched_feat_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	.open		= sched_feat_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	.write		= sched_feat_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	.read		= seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	.llseek		= seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	.release	= single_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) __read_mostly bool sched_debug_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) static __init int sched_init_debug(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	debugfs_create_file("sched_features", 0644, NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 			&sched_feat_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	debugfs_create_bool("sched_debug", 0644, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 			&sched_debug_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) late_initcall(sched_init_debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) #ifdef CONFIG_SYSCTL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) static struct ctl_table sd_ctl_dir[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		.procname	= "sched_domain",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		.mode		= 0555,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	{}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) static struct ctl_table sd_ctl_root[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		.procname	= "kernel",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		.mode		= 0555,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		.child		= sd_ctl_dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	{}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) static struct ctl_table *sd_alloc_ctl_entry(int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	struct ctl_table *entry =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) static void sd_free_ctl_entry(struct ctl_table **tablep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	struct ctl_table *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	 * In the intermediate directories, both the child directory and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	 * procname are dynamically allocated and could fail but the mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	 * will always be set. In the lowest directory the names are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	 * static strings and all have proc handlers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	for (entry = *tablep; entry->mode; entry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		if (entry->child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 			sd_free_ctl_entry(&entry->child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		if (entry->proc_handler == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 			kfree(entry->procname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	kfree(*tablep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	*tablep = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) set_table_entry(struct ctl_table *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		const char *procname, void *data, int maxlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		umode_t mode, proc_handler *proc_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	entry->procname = procname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	entry->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	entry->maxlen = maxlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	entry->mode = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	entry->proc_handler = proc_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) static int sd_ctl_doflags(struct ctl_table *table, int write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 			  void *buffer, size_t *lenp, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	unsigned long flags = *(unsigned long *)table->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	size_t data_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	size_t len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	char *tmp, *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	if (write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		char *name = sd_flag_debug[idx].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		/* Name plus whitespace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		data_size += strlen(name) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	if (*ppos > data_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		*lenp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	buf = kcalloc(data_size + 1, sizeof(*buf), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		char *name = sd_flag_debug[idx].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		len += snprintf(buf + len, strlen(name) + 2, "%s ", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	tmp = buf + *ppos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	len -= *ppos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	if (len > *lenp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		len = *lenp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	if (len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 		memcpy(buffer, tmp, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	if (len < *lenp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		((char *)buffer)[len] = '\n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		len++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	*lenp = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	*ppos += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) static struct ctl_table *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) sd_alloc_ctl_domain_table(struct sched_domain *sd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	struct ctl_table *table = sd_alloc_ctl_entry(9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	if (table == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	set_table_entry(&table[0], "min_interval",	  &sd->min_interval,	    sizeof(long), 0644, proc_doulongvec_minmax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	set_table_entry(&table[1], "max_interval",	  &sd->max_interval,	    sizeof(long), 0644, proc_doulongvec_minmax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	set_table_entry(&table[2], "busy_factor",	  &sd->busy_factor,	    sizeof(int),  0644, proc_dointvec_minmax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	set_table_entry(&table[3], "imbalance_pct",	  &sd->imbalance_pct,	    sizeof(int),  0644, proc_dointvec_minmax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	set_table_entry(&table[4], "cache_nice_tries",	  &sd->cache_nice_tries,    sizeof(int),  0644, proc_dointvec_minmax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	set_table_entry(&table[5], "flags",		  &sd->flags,		    sizeof(int),  0444, sd_ctl_doflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	set_table_entry(&table[6], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	set_table_entry(&table[7], "name",		  sd->name,	       CORENAME_MAX_SIZE, 0444, proc_dostring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	/* &table[8] is terminator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	return table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	struct ctl_table *entry, *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	struct sched_domain *sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	int domain_num = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	char buf[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	for_each_domain(cpu, sd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		domain_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	entry = table = sd_alloc_ctl_entry(domain_num + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	if (table == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	for_each_domain(cpu, sd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		snprintf(buf, 32, "domain%d", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		entry->procname = kstrdup(buf, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		entry->mode = 0555;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		entry->child = sd_alloc_ctl_domain_table(sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		entry++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	return table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) static cpumask_var_t		sd_sysctl_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) static struct ctl_table_header	*sd_sysctl_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) void register_sched_domain_sysctl(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	static struct ctl_table *cpu_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	static struct ctl_table **cpu_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	static bool init_done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	char buf[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	if (!cpu_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		if (!cpu_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		WARN_ON(sd_ctl_dir[0].child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		sd_ctl_dir[0].child = cpu_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	if (!cpu_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		struct ctl_table *e = cpu_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		if (!cpu_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		/* deal with sparse possible map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 			cpu_idx[i] = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 			e++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	if (!cpumask_available(sd_sysctl_cpus)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	if (!init_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		init_done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		/* init to possible to not have holes in @cpu_entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	for_each_cpu(i, sd_sysctl_cpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		struct ctl_table *e = cpu_idx[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		if (e->child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 			sd_free_ctl_entry(&e->child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		if (!e->procname) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 			snprintf(buf, 32, "cpu%d", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 			e->procname = kstrdup(buf, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		e->mode = 0555;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		e->child = sd_alloc_ctl_cpu_table(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		__cpumask_clear_cpu(i, sd_sysctl_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	WARN_ON(sd_sysctl_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	sd_sysctl_header = register_sysctl_table(sd_ctl_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) void dirty_sched_domain_sysctl(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	if (cpumask_available(sd_sysctl_cpus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		__cpumask_set_cpu(cpu, sd_sysctl_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) /* may be called multiple times per register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) void unregister_sched_domain_sysctl(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	unregister_sysctl_table(sd_sysctl_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	sd_sysctl_header = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) #endif /* CONFIG_SYSCTL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) #ifdef CONFIG_FAIR_GROUP_SCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	struct sched_entity *se = tg->se[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) #define P(F)		SEQ_printf(m, "  .%-30s: %lld\n",	#F, (long long)F)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) #define P_SCHEDSTAT(F)	SEQ_printf(m, "  .%-30s: %lld\n",	#F, (long long)schedstat_val(F))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) #define PN(F)		SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) #define PN_SCHEDSTAT(F)	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	if (!se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	PN(se->exec_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	PN(se->vruntime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	PN(se->sum_exec_runtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	if (schedstat_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		PN_SCHEDSTAT(se->statistics.wait_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		PN_SCHEDSTAT(se->statistics.sleep_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		PN_SCHEDSTAT(se->statistics.block_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		PN_SCHEDSTAT(se->statistics.sleep_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		PN_SCHEDSTAT(se->statistics.block_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		PN_SCHEDSTAT(se->statistics.exec_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		PN_SCHEDSTAT(se->statistics.slice_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		PN_SCHEDSTAT(se->statistics.wait_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		PN_SCHEDSTAT(se->statistics.wait_sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		P_SCHEDSTAT(se->statistics.wait_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	P(se->load.weight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	P(se->avg.load_avg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	P(se->avg.util_avg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	P(se->avg.runnable_avg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) #undef PN_SCHEDSTAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) #undef PN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) #undef P_SCHEDSTAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) #undef P
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) #ifdef CONFIG_CGROUP_SCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) static DEFINE_SPINLOCK(sched_debug_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) static char group_path[PATH_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) static void task_group_path(struct task_group *tg, char *path, int plen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	if (autogroup_path(tg, path, plen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	cgroup_path(tg->css.cgroup, path, plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485)  * Only 1 SEQ_printf_task_group_path() caller can use the full length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486)  * group_path[] for cgroup path. Other simultaneous callers will have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487)  * to use a shorter stack buffer. A "..." suffix is appended at the end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488)  * of the stack buffer so that it will show up in case the output length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489)  * matches the given buffer size to indicate possible path name truncation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) #define SEQ_printf_task_group_path(m, tg, fmt...)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	if (spin_trylock(&sched_debug_lock)) {				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		task_group_path(tg, group_path, sizeof(group_path));	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		SEQ_printf(m, fmt, group_path);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		spin_unlock(&sched_debug_lock);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	} else {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		char buf[128];						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		char *bufend = buf + sizeof(buf) - 3;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		task_group_path(tg, buf, bufend - buf);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		strcpy(bufend - 1, "...");				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		SEQ_printf(m, fmt, buf);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	}								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	if (rq->curr == p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		SEQ_printf(m, ">R");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 		SEQ_printf(m, " %c", task_state_to_char(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		p->comm, task_pid_nr(p),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		SPLIT_NS(p->se.vruntime),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		(long long)(p->nvcsw + p->nivcsw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		p->prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		SPLIT_NS(p->se.sum_exec_runtime),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) #ifdef CONFIG_NUMA_BALANCING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) #ifdef CONFIG_CGROUP_SCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	SEQ_printf_task_group_path(m, task_group(p), " %s")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	SEQ_printf(m, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	struct task_struct *g, *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	SEQ_printf(m, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	SEQ_printf(m, "runnable tasks:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	SEQ_printf(m, " S            task   PID         tree-key  switches  prio"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		   "     wait-time             sum-exec        sum-sleep\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	SEQ_printf(m, "-------------------------------------------------------"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		   "------------------------------------------------------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	for_each_process_thread(g, p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		if (task_cpu(p) != rq_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		print_task(m, rq, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		spread, rq0_min_vruntime, spread0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	struct rq *rq = cpu_rq(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	struct sched_entity *last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) #ifdef CONFIG_FAIR_GROUP_SCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	SEQ_printf(m, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	SEQ_printf(m, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 			SPLIT_NS(cfs_rq->exec_clock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	raw_spin_lock_irqsave(&rq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	if (rb_first_cached(&cfs_rq->tasks_timeline))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	last = __pick_last_entity(cfs_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	if (last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		max_vruntime = last->vruntime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	min_vruntime = cfs_rq->min_vruntime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	raw_spin_unlock_irqrestore(&rq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 			SPLIT_NS(MIN_vruntime));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 			SPLIT_NS(min_vruntime));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 			SPLIT_NS(max_vruntime));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	spread = max_vruntime - MIN_vruntime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 			SPLIT_NS(spread));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	spread0 = min_vruntime - rq0_min_vruntime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 			SPLIT_NS(spread0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 			cfs_rq->nr_spread_over);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 			cfs_rq->avg.load_avg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	SEQ_printf(m, "  .%-30s: %lu\n", "runnable_avg",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 			cfs_rq->avg.runnable_avg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	SEQ_printf(m, "  .%-30s: %lu\n", "util_avg",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 			cfs_rq->avg.util_avg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	SEQ_printf(m, "  .%-30s: %u\n", "util_est_enqueued",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 			cfs_rq->avg.util_est.enqueued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	SEQ_printf(m, "  .%-30s: %ld\n", "removed.load_avg",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 			cfs_rq->removed.load_avg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	SEQ_printf(m, "  .%-30s: %ld\n", "removed.util_avg",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 			cfs_rq->removed.util_avg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	SEQ_printf(m, "  .%-30s: %ld\n", "removed.runnable_avg",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 			cfs_rq->removed.runnable_avg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) #ifdef CONFIG_FAIR_GROUP_SCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	SEQ_printf(m, "  .%-30s: %lu\n", "tg_load_avg_contrib",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 			cfs_rq->tg_load_avg_contrib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 			atomic_long_read(&cfs_rq->tg->load_avg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) #ifdef CONFIG_CFS_BANDWIDTH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	SEQ_printf(m, "  .%-30s: %d\n", "throttled",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 			cfs_rq->throttled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 			cfs_rq->throttle_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) #ifdef CONFIG_FAIR_GROUP_SCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	print_cfs_group_stats(m, cpu, cfs_rq->tg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) #ifdef CONFIG_RT_GROUP_SCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	SEQ_printf(m, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	SEQ_printf(m, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	SEQ_printf(m, "rt_rq[%d]:\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) #define P(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) #define PU(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) #define PN(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	PU(rt_nr_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	PU(rt_nr_migratory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	P(rt_throttled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	PN(rt_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	PN(rt_runtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) #undef PN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) #undef PU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) #undef P
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	struct dl_bw *dl_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	SEQ_printf(m, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	SEQ_printf(m, "dl_rq[%d]:\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) #define PU(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	PU(dl_nr_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	PU(dl_nr_migratory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	dl_bw = &cpu_rq(cpu)->rd->dl_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	dl_bw = &dl_rq->dl_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) #undef PU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) static void print_cpu(struct seq_file *m, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	struct rq *rq = cpu_rq(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) #ifdef CONFIG_X86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		unsigned int freq = cpu_khz ? : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 			   cpu, freq / 1000, (freq % 1000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	SEQ_printf(m, "cpu#%d\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) #define P(x)								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) do {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	if (sizeof(rq->x) == 4)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		SEQ_printf(m, "  .%-30s: %ld\n", #x, (long)(rq->x));	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	else								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) #define PN(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	P(nr_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	P(nr_switches);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	P(nr_uninterruptible);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	PN(next_balance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	PN(clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	PN(clock_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) #undef P
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) #undef PN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) #define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	P64(avg_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	P64(max_idle_balance_cost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) #undef P64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) #define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, schedstat_val(rq->n));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	if (schedstat_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		P(yld_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		P(sched_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		P(sched_goidle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		P(ttwu_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		P(ttwu_local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) #undef P
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	print_cfs_stats(m, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	print_rt_stats(m, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	print_dl_stats(m, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	print_rq(m, rq, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	SEQ_printf(m, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) static const char *sched_tunable_scaling_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	"none",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	"logarithmic",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	"linear"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) static void sched_debug_header(struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	u64 ktime, sched_clk, cpu_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	ktime = ktime_to_ns(ktime_get());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	sched_clk = sched_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	cpu_clk = local_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		init_utsname()->release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		(int)strcspn(init_utsname()->version, " "),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		init_utsname()->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) #define P(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) #define PN(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	PN(ktime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	PN(sched_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	PN(cpu_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	P(jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	P(sched_clock_stable());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) #undef PN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) #undef P
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	SEQ_printf(m, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	SEQ_printf(m, "sysctl_sched\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) #define P(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) #define PN(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	PN(sysctl_sched_latency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	PN(sysctl_sched_min_granularity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	PN(sysctl_sched_wakeup_granularity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	P(sysctl_sched_child_runs_first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	P(sysctl_sched_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) #undef PN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) #undef P
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	SEQ_printf(m, "  .%-40s: %d (%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		"sysctl_sched_tunable_scaling",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		sysctl_sched_tunable_scaling,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	SEQ_printf(m, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) static int sched_debug_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	int cpu = (unsigned long)(v - 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	if (cpu != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		print_cpu(m, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		sched_debug_header(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) void sysrq_sched_debug_show(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	sched_debug_header(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		 * Need to reset softlockup watchdogs on all CPUs, because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		 * another CPU might be blocked waiting for us to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		 * an IPI or stop_machine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		touch_nmi_watchdog();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		touch_all_softlockup_watchdogs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		print_cpu(NULL, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836)  * This itererator needs some explanation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837)  * It returns 1 for the header position.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838)  * This means 2 is CPU 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839)  * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840)  * to use cpumask_* to iterate over the CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) static void *sched_debug_start(struct seq_file *file, loff_t *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	unsigned long n = *offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	if (n == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		return (void *) 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	n--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	if (n > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		n = cpumask_next(n - 1, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		n = cpumask_first(cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	*offset = n + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	if (n < nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		return (void *)(unsigned long)(n + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	(*offset)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	return sched_debug_start(file, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) static void sched_debug_stop(struct seq_file *file, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) static const struct seq_operations sched_debug_sops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	.start		= sched_debug_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	.next		= sched_debug_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	.stop		= sched_debug_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	.show		= sched_debug_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) static int __init init_sched_debug_procfs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	if (!proc_create_seq("sched_debug", 0444, NULL, &sched_debug_sops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) __initcall(init_sched_debug_procfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) #define __P(F) __PS(#F, F)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) #define   P(F) __PS(#F, p->F)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) #define   PM(F, M) __PS(#F, p->F & (M))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) #define __PN(F) __PSN(#F, F)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) #define   PN(F) __PSN(#F, p->F)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) #ifdef CONFIG_NUMA_BALANCING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		unsigned long tpf, unsigned long gsf, unsigned long gpf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	SEQ_printf(m, "numa_faults node=%d ", node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) static void sched_show_numa(struct task_struct *p, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) #ifdef CONFIG_NUMA_BALANCING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	if (p->mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		P(mm->numa_scan_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	P(numa_pages_migrated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	P(numa_preferred_nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	P(total_numa_faults);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 			task_node(p), task_numa_group_id(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	show_numa_stats(p, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 						  struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	unsigned long nr_switches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 						get_nr_threads(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	SEQ_printf(m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		"---------------------------------------------------------"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		"----------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) #define P_SCHEDSTAT(F)  __PS(#F, schedstat_val(p->F))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) #define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->F))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	PN(se.exec_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	PN(se.vruntime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	PN(se.sum_exec_runtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	nr_switches = p->nvcsw + p->nivcsw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	P(se.nr_migrations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	if (schedstat_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		u64 avg_atom, avg_per_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		PN_SCHEDSTAT(se.statistics.wait_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		PN_SCHEDSTAT(se.statistics.sleep_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		PN_SCHEDSTAT(se.statistics.block_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		PN_SCHEDSTAT(se.statistics.sleep_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		PN_SCHEDSTAT(se.statistics.block_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		PN_SCHEDSTAT(se.statistics.exec_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		PN_SCHEDSTAT(se.statistics.slice_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		PN_SCHEDSTAT(se.statistics.wait_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		PN_SCHEDSTAT(se.statistics.wait_sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		P_SCHEDSTAT(se.statistics.wait_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		PN_SCHEDSTAT(se.statistics.iowait_sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		P_SCHEDSTAT(se.statistics.iowait_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		P_SCHEDSTAT(se.statistics.nr_migrations_cold);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		P_SCHEDSTAT(se.statistics.nr_forced_migrations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		P_SCHEDSTAT(se.statistics.nr_wakeups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		P_SCHEDSTAT(se.statistics.nr_wakeups_local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		avg_atom = p->se.sum_exec_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		if (nr_switches)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 			avg_atom = div64_ul(avg_atom, nr_switches);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 			avg_atom = -1LL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		avg_per_cpu = p->se.sum_exec_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		if (p->se.nr_migrations) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 			avg_per_cpu = div64_u64(avg_per_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 						p->se.nr_migrations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 			avg_per_cpu = -1LL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		__PN(avg_atom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		__PN(avg_per_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	__P(nr_switches);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	__PS("nr_voluntary_switches", p->nvcsw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	__PS("nr_involuntary_switches", p->nivcsw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	P(se.load.weight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	P(se.avg.load_sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	P(se.avg.runnable_sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	P(se.avg.util_sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	P(se.avg.load_avg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	P(se.avg.runnable_avg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	P(se.avg.util_avg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	P(se.avg.last_update_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	P(se.avg.util_est.ewma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) #ifdef CONFIG_UCLAMP_TASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	__PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	__PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	__PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	__PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	P(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	P(prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	if (task_has_dl_policy(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		P(dl.runtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		P(dl.deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) #undef PN_SCHEDSTAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) #undef P_SCHEDSTAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		unsigned int this_cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		u64 t0, t1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		t0 = cpu_clock(this_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		t1 = cpu_clock(this_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		__PS("clock-delta", t1-t0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	sched_show_numa(p, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) void proc_sched_set_task(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) #ifdef CONFIG_SCHEDSTATS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }