Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * taskstats.c - Export per-task statistics to userland
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) Shailabh Nagar, IBM Corp. 2006
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *           (C) Balbir Singh,   IBM Corp. 2006
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/taskstats_kern.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/tsacct_kern.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/delayacct.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/cgroupstats.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/cgroup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/pid_namespace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <net/genetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/sched/cputime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * Maximum length of a cpumask that can be specified in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define TASKSTATS_CPUMASK_MAXLEN	(100+6*NR_CPUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) static DEFINE_PER_CPU(__u32, taskstats_seqnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) static int family_registered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) struct kmem_cache *taskstats_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) static struct genl_family family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) static const struct nla_policy taskstats_cmd_get_policy[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	[TASKSTATS_CMD_ATTR_PID]  = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	[TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) static const struct nla_policy cgroupstats_cmd_get_policy[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	[CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) struct listener {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	pid_t pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	char valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) struct listener_list {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	struct rw_semaphore sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) static DEFINE_PER_CPU(struct listener_list, listener_array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) enum actions {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	REGISTER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	DEREGISTER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	CPU_DONT_CARE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 				size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	void *reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	 * If new attributes are added, please revisit this allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	skb = genlmsg_new(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	if (!info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		int seq = this_cpu_inc_return(taskstats_seqnum) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	if (reply == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		nlmsg_free(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	*skbp = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  * Send taskstats data in @skb to listener with nl_pid @pid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) static int send_reply(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	void *reply = genlmsg_data(genlhdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	genlmsg_end(skb, reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	return genlmsg_reply(skb, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  * Send taskstats data in @skb to listeners registered for @cpu's exit data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static void send_cpu_listeners(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 					struct listener_list *listeners)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	struct listener *s, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	struct sk_buff *skb_next, *skb_cur = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	void *reply = genlmsg_data(genlhdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	int rc, delcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	genlmsg_end(skb, reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	down_read(&listeners->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	list_for_each_entry(s, &listeners->list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		skb_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		if (!list_is_last(&s->list, &listeners->list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 			skb_next = skb_clone(skb_cur, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 			if (!skb_next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		rc = genlmsg_unicast(&init_net, skb_cur, s->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		if (rc == -ECONNREFUSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 			s->valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 			delcount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		skb_cur = skb_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	up_read(&listeners->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	if (skb_cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		nlmsg_free(skb_cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	if (!delcount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	/* Delete invalidated entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	down_write(&listeners->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	list_for_each_entry_safe(s, tmp, &listeners->list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		if (!s->valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 			list_del(&s->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 			kfree(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	up_write(&listeners->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static void fill_stats(struct user_namespace *user_ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		       struct pid_namespace *pid_ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		       struct task_struct *tsk, struct taskstats *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	memset(stats, 0, sizeof(*stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	 * Each accounting subsystem adds calls to its functions to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	 * fill in relevant parts of struct taskstsats as follows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	 *	per-task-foo(stats, tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	delayacct_add_tsk(stats, tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	/* fill in basic acct fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	stats->version = TASKSTATS_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	stats->nvcsw = tsk->nvcsw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	stats->nivcsw = tsk->nivcsw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	bacct_add_tsk(user_ns, pid_ns, stats, tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	/* fill in extended acct fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	xacct_add_tsk(stats, tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static int fill_stats_for_pid(pid_t pid, struct taskstats *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	struct task_struct *tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	tsk = find_get_task_by_vpid(pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	if (!tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		return -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	fill_stats(current_user_ns(), task_active_pid_ns(current), tsk, stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	put_task_struct(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	struct task_struct *tsk, *first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	int rc = -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	u64 delta, utime, stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	u64 start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	 * Add additional stats from live tasks except zombie thread group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	 * leaders who are already counted with the dead tasks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	first = find_task_by_vpid(tgid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	if (!first || !lock_task_sighand(first, &flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	if (first->signal->stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		memcpy(stats, first->signal->stats, sizeof(*stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		memset(stats, 0, sizeof(*stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	tsk = first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	start_time = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		if (tsk->exit_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		 * Accounting subsystem can call its functions here to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		 * fill in relevant parts of struct taskstsats as follows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		 *	per-task-foo(stats, tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		delayacct_add_tsk(stats, tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		/* calculate task elapsed time in nsec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		delta = start_time - tsk->start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		/* Convert to micro seconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		do_div(delta, NSEC_PER_USEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		stats->ac_etime += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		task_cputime(tsk, &utime, &stime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		stats->ac_utime += div_u64(utime, NSEC_PER_USEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		stats->ac_stime += div_u64(stime, NSEC_PER_USEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		stats->nvcsw += tsk->nvcsw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		stats->nivcsw += tsk->nivcsw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	} while_each_thread(first, tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	unlock_task_sighand(first, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	stats->version = TASKSTATS_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	 * Accounting subsystems can also add calls here to modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	 * fields of taskstats.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static void fill_tgid_exit(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	if (!tsk->signal->stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		goto ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	 * Each accounting subsystem calls its functions here to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	 * accumalate its per-task stats for tsk, into the per-tgid structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	 *	per-task-foo(tsk->signal->stats, tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	delayacct_add_tsk(tsk->signal->stats, tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) ret:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	struct listener_list *listeners;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	struct listener *s, *tmp, *s2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	if (!cpumask_subset(mask, cpu_possible_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	if (current_user_ns() != &init_user_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	if (task_active_pid_ns(current) != &init_pid_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	if (isadd == REGISTER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		for_each_cpu(cpu, mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 			s = kmalloc_node(sizeof(struct listener),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 					GFP_KERNEL, cpu_to_node(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 			if (!s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 				ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 				goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 			s->pid = pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 			s->valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 			listeners = &per_cpu(listener_array, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 			down_write(&listeners->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 			list_for_each_entry(s2, &listeners->list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 				if (s2->pid == pid && s2->valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 					goto exists;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 			list_add(&s->list, &listeners->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 			s = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) exists:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 			up_write(&listeners->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 			kfree(s); /* nop if NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	/* Deregister or cleanup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	for_each_cpu(cpu, mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		listeners = &per_cpu(listener_array, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		down_write(&listeners->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		list_for_each_entry_safe(s, tmp, &listeners->list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 			if (s->pid == pid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 				list_del(&s->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 				kfree(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		up_write(&listeners->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) static int parse(struct nlattr *na, struct cpumask *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	char *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	if (na == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	len = nla_len(na);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	if (len > TASKSTATS_CPUMASK_MAXLEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	if (len < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	data = kmalloc(len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	nla_strlcpy(data, na, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	ret = cpulist_parse(data, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	struct nlattr *na, *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	int aggr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	aggr = (type == TASKSTATS_TYPE_PID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 			? TASKSTATS_TYPE_AGGR_PID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 			: TASKSTATS_TYPE_AGGR_TGID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	na = nla_nest_start_noflag(skb, aggr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	if (!na)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	if (nla_put(skb, type, sizeof(pid), &pid) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		nla_nest_cancel(skb, na);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	ret = nla_reserve_64bit(skb, TASKSTATS_TYPE_STATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 				sizeof(struct taskstats), TASKSTATS_TYPE_NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		nla_nest_cancel(skb, na);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	nla_nest_end(skb, na);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	return nla_data(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	struct sk_buff *rep_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	struct cgroupstats *stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	struct nlattr *na;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	u32 fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	struct fd f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	na = info->attrs[CGROUPSTATS_CMD_ATTR_FD];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	if (!na)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	f = fdget(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	if (!f.file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	size = nla_total_size(sizeof(struct cgroupstats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 				size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 				sizeof(struct cgroupstats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	if (na == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		nlmsg_free(rep_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		rc = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	stats = nla_data(na);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	memset(stats, 0, sizeof(*stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	rc = cgroupstats_build(stats, f.file->f_path.dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		nlmsg_free(rep_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	rc = send_reply(rep_skb, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static int cmd_attr_register_cpumask(struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	cpumask_var_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	rc = add_del_listener(info->snd_portid, mask, REGISTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	free_cpumask_var(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) static int cmd_attr_deregister_cpumask(struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	cpumask_var_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	rc = add_del_listener(info->snd_portid, mask, DEREGISTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	free_cpumask_var(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static size_t taskstats_packet_size(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	size = nla_total_size(sizeof(u32)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 		nla_total_size_64bit(sizeof(struct taskstats)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		nla_total_size(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) static int cmd_attr_pid(struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	struct taskstats *stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	struct sk_buff *rep_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	u32 pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	size = taskstats_packet_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	if (!stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	rc = fill_stats_for_pid(pid, stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	return send_reply(rep_skb, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	nlmsg_free(rep_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) static int cmd_attr_tgid(struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	struct taskstats *stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	struct sk_buff *rep_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	u32 tgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	size = taskstats_packet_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	if (!stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	rc = fill_stats_for_tgid(tgid, stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	return send_reply(rep_skb, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	nlmsg_free(rep_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 		return cmd_attr_register_cpumask(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		return cmd_attr_deregister_cpumask(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	else if (info->attrs[TASKSTATS_CMD_ATTR_PID])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 		return cmd_attr_pid(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	else if (info->attrs[TASKSTATS_CMD_ATTR_TGID])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 		return cmd_attr_tgid(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	struct signal_struct *sig = tsk->signal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	struct taskstats *stats_new, *stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	/* Pairs with smp_store_release() below. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	stats = smp_load_acquire(&sig->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	if (stats || thread_group_empty(tsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 		return stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	/* No problem if kmem_cache_zalloc() fails */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	stats_new = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	spin_lock_irq(&tsk->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	stats = sig->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	if (!stats) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 		 * Pairs with smp_store_release() above and order the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		 * kmem_cache_zalloc().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 		smp_store_release(&sig->stats, stats_new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 		stats = stats_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 		stats_new = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	spin_unlock_irq(&tsk->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	if (stats_new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 		kmem_cache_free(taskstats_cache, stats_new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	return stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) /* Send pid data out on exit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) void taskstats_exit(struct task_struct *tsk, int group_dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	struct listener_list *listeners;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	struct taskstats *stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	struct sk_buff *rep_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	int is_thread_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	if (!family_registered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	 * Size includes space for nested attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	size = taskstats_packet_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	is_thread_group = !!taskstats_tgid_alloc(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	if (is_thread_group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 		/* PID + STATS + TGID + STATS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 		size = 2 * size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 		/* fill the tsk->signal->stats structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 		fill_tgid_exit(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	listeners = raw_cpu_ptr(&listener_array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	if (list_empty(&listeners->list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 			 task_pid_nr_ns(tsk, &init_pid_ns));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	if (!stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	fill_stats(&init_user_ns, &init_pid_ns, tsk, stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	 * Doesn't matter if tsk is the leader or the last group member leaving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	if (!is_thread_group || !group_dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 		goto send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 			 task_tgid_nr_ns(tsk, &init_pid_ns));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	if (!stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	memcpy(stats, tsk->signal->stats, sizeof(*stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) send:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	send_cpu_listeners(rep_skb, listeners);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	nlmsg_free(rep_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) static const struct genl_ops taskstats_ops[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 		.cmd		= TASKSTATS_CMD_GET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 		.doit		= taskstats_user_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 		.policy		= taskstats_cmd_get_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 		.maxattr	= ARRAY_SIZE(taskstats_cmd_get_policy) - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 		.flags		= GENL_ADMIN_PERM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 		.cmd		= CGROUPSTATS_CMD_GET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 		.doit		= cgroupstats_user_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 		.policy		= cgroupstats_cmd_get_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 		.maxattr	= ARRAY_SIZE(cgroupstats_cmd_get_policy) - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) static struct genl_family family __ro_after_init = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	.name		= TASKSTATS_GENL_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	.version	= TASKSTATS_GENL_VERSION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	.module		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	.ops		= taskstats_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 	.n_ops		= ARRAY_SIZE(taskstats_ops),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /* Needed early in initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) void __init taskstats_init_early(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 	for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 		INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 		init_rwsem(&(per_cpu(listener_array, i).sem));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) static int __init taskstats_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	rc = genl_register_family(&family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	family_registered = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	pr_info("registered taskstats version %d\n", TASKSTATS_GENL_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)  * late initcall ensures initialization of statistics collection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)  * mechanisms precedes initialization of the taskstats interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) late_initcall(taskstats_init);