Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * @file buffer_sync.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * @remark Copyright 2002-2009 OProfile authors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * @remark Read the file COPYING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * @author John Levon <levon@movementarian.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * @author Barry Kasindorf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * @author Robert Richter <robert.richter@amd.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * This is the core of the buffer management. Each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * CPU buffer is processed and entered into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * global event buffer. Such processing is necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * in several circumstances, mentioned below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * The processing does the job of converting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * transitory EIP value into a persistent dentry/offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * value that the profiler can record at its leisure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * See fs/dcookies.c for a description of the dentry/offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * objects.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <linux/dcookies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <linux/profile.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <linux/oprofile.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #include "oprofile_stats.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #include "event_buffer.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #include "cpu_buffer.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #include "buffer_sync.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) static LIST_HEAD(dying_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) static LIST_HEAD(dead_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) static cpumask_var_t marked_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) static DEFINE_SPINLOCK(task_mortuary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) static void process_task_mortuary(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) /* Take ownership of the task struct and place it on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  * list for processing. Only after two full buffer syncs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  * does the task eventually get freed, because by then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  * we are sure we will not reference it again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  * Can be invoked from softirq via RCU callback due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  * call_rcu() of the task struct, hence the _irqsave.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) task_free_notify(struct notifier_block *self, unsigned long val, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	struct task_struct *task = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	spin_lock_irqsave(&task_mortuary, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	list_add(&task->tasks, &dying_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	spin_unlock_irqrestore(&task_mortuary, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) /* The task is on its way out. A sync of the buffer means we can catch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  * any remaining samples for this task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) task_exit_notify(struct notifier_block *self, unsigned long val, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	/* To avoid latency problems, we only process the current CPU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	 * hoping that most samples for the task are on this CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	sync_buffer(raw_smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) /* The task is about to try a do_munmap(). We peek at what it's going to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83)  * do, and if it's an executable region, process the samples first, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)  * we don't lose any. This does not have to be exact, it's a QoI issue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  * only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) munmap_notify(struct notifier_block *self, unsigned long val, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	unsigned long addr = (unsigned long)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	struct vm_area_struct *mpnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	mpnt = find_vma(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		/* To avoid latency problems, we only process the current CPU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		 * hoping that most samples for the task are on this CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		sync_buffer(raw_smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /* We need to be told about new modules so we don't attribute to a previously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  * loaded module, or drop the samples on the floor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) module_load_notify(struct notifier_block *self, unsigned long val, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #ifdef CONFIG_MODULES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	if (val != MODULE_STATE_COMING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	/* FIXME: should we process all CPU buffers ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	mutex_lock(&buffer_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	add_event_entry(ESCAPE_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	add_event_entry(MODULE_LOADED_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	mutex_unlock(&buffer_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static struct notifier_block task_free_nb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	.notifier_call	= task_free_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static struct notifier_block task_exit_nb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	.notifier_call	= task_exit_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static struct notifier_block munmap_nb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	.notifier_call	= munmap_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static struct notifier_block module_load_nb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	.notifier_call = module_load_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static void free_all_tasks(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	/* make sure we don't leak task structs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	process_task_mortuary();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	process_task_mortuary();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) int sync_start(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	err = task_handoff_register(&task_free_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		goto out1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	err = profile_event_register(PROFILE_MUNMAP, &munmap_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		goto out3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	err = register_module_notifier(&module_load_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		goto out4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	start_cpu_work();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) out4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) out3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) out2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	task_handoff_unregister(&task_free_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	free_all_tasks();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) out1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	free_cpumask_var(marked_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) void sync_stop(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	end_cpu_work();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	unregister_module_notifier(&module_load_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	task_handoff_unregister(&task_free_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	barrier();			/* do all of the above first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	flush_cpu_work();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	free_all_tasks();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	free_cpumask_var(marked_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /* Optimisation. We can manage without taking the dcookie sem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)  * because we cannot reach this code without at least one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)  * dcookie user still being registered (namely, the reader
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)  * of the event buffer). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static inline unsigned long fast_get_dcookie(const struct path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	unsigned long cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	if (path->dentry->d_flags & DCACHE_COOKIE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		return (unsigned long)path->dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	get_dcookie(path, &cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* Look up the dcookie for the task's mm->exe_file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)  * which corresponds loosely to "application name". This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)  * not strictly necessary but allows oprofile to associate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)  * shared-library samples with particular applications
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static unsigned long get_exec_dcookie(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	unsigned long cookie = NO_COOKIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	struct file *exe_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	if (!mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	exe_file = get_mm_exe_file(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	if (!exe_file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	cookie = fast_get_dcookie(&exe_file->f_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	fput(exe_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /* Convert the EIP value of a sample into a persistent dentry/offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)  * pair that can then be added to the global event buffer. We make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)  * sure to do this lookup before a mm->mmap modification happens so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)  * we don't lose track.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)  * The caller must ensure the mm is not nil (ie: not a kernel thread).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	unsigned long cookie = NO_COOKIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		if (addr < vma->vm_start || addr >= vma->vm_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		if (vma->vm_file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 			cookie = fast_get_dcookie(&vma->vm_file->f_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 			*offset = (vma->vm_pgoff << PAGE_SHIFT) + addr -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 				vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 			/* must be an anonymous map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 			*offset = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	if (!vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		cookie = INVALID_COOKIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static unsigned long last_cookie = INVALID_COOKIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static void add_cpu_switch(int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	add_event_entry(ESCAPE_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	add_event_entry(CPU_SWITCH_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	add_event_entry(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	last_cookie = INVALID_COOKIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static void add_kernel_ctx_switch(unsigned int in_kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	add_event_entry(ESCAPE_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	if (in_kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		add_event_entry(KERNEL_ENTER_SWITCH_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		add_event_entry(KERNEL_EXIT_SWITCH_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) add_user_ctx_switch(struct task_struct const *task, unsigned long cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	add_event_entry(ESCAPE_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	add_event_entry(CTX_SWITCH_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	add_event_entry(task->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	add_event_entry(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	/* Another code for daemon back-compat */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	add_event_entry(ESCAPE_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	add_event_entry(CTX_TGID_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	add_event_entry(task->tgid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static void add_cookie_switch(unsigned long cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	add_event_entry(ESCAPE_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	add_event_entry(COOKIE_SWITCH_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	add_event_entry(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static void add_trace_begin(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	add_event_entry(ESCAPE_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	add_event_entry(TRACE_BEGIN_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static void add_data(struct op_entry *entry, struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	unsigned long code, pc, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	unsigned long cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	off_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	if (!op_cpu_buffer_get_data(entry, &code))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	if (!op_cpu_buffer_get_data(entry, &pc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	if (!op_cpu_buffer_get_size(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	if (mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		cookie = lookup_dcookie(mm, pc, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		if (cookie == NO_COOKIE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 			offset = pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		if (cookie == INVALID_COOKIE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 			atomic_inc(&oprofile_stats.sample_lost_no_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 			offset = pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		if (cookie != last_cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 			add_cookie_switch(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 			last_cookie = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		offset = pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	add_event_entry(ESCAPE_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	add_event_entry(code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	add_event_entry(offset);	/* Offset from Dcookie */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	while (op_cpu_buffer_get_data(entry, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		add_event_entry(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) static inline void add_sample_entry(unsigned long offset, unsigned long event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	add_event_entry(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	add_event_entry(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)  * Add a sample to the global event buffer. If possible the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)  * sample is converted into a persistent dentry/offset pair
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)  * for later lookup from userspace. Return 0 on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	unsigned long cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	off_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	if (in_kernel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		add_sample_entry(s->eip, s->event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	/* add userspace sample */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	if (!mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		atomic_inc(&oprofile_stats.sample_lost_no_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	cookie = lookup_dcookie(mm, s->eip, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	if (cookie == INVALID_COOKIE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		atomic_inc(&oprofile_stats.sample_lost_no_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	if (cookie != last_cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		add_cookie_switch(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		last_cookie = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	add_sample_entry(offset, s->event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static void release_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	if (!mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	mmput(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) static inline int is_code(unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	return val == ESCAPE_CODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /* Move tasks along towards death. Any tasks on dead_tasks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)  * will definitely have no remaining references in any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)  * CPU buffers at this point, because we use two lists,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)  * and to have reached the list, it must have gone through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)  * one full sync already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static void process_task_mortuary(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	LIST_HEAD(local_dead_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	struct task_struct *ttask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	spin_lock_irqsave(&task_mortuary, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	list_splice_init(&dead_tasks, &local_dead_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	list_splice_init(&dying_tasks, &dead_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	spin_unlock_irqrestore(&task_mortuary, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 		list_del(&task->tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 		free_task(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) static void mark_done(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	cpumask_set_cpu(cpu, marked_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	for_each_online_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 		if (!cpumask_test_cpu(i, marked_cpus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	/* All CPUs have been processed at least once,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	 * we can process the mortuary once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	process_task_mortuary();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	cpumask_clear(marked_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /* FIXME: this is not sufficient if we implement syscall barrier backtrace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)  * traversal, the code switch to sb_sample_start at first kernel enter/exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)  * switch so we need a fifth state and some special handling in sync_buffer()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) typedef enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	sb_bt_ignore = -2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	sb_buffer_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	sb_bt_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	sb_sample_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) } sync_buffer_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) /* Sync one of the CPU's buffers into the global event buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)  * Here we need to go through each batch of samples punctuated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)  * by context switch notes, taking the task's mmap_lock and doing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)  * lookup in task->mm->mmap to convert EIP into dcookie/offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)  * value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) void sync_buffer(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	struct mm_struct *mm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	struct mm_struct *oldmm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	struct task_struct *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	unsigned long cookie = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	int in_kernel = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	sync_buffer_state state = sb_buffer_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	unsigned long available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	struct op_entry entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	struct op_sample *sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	mutex_lock(&buffer_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	add_cpu_switch(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	op_cpu_buffer_reset(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	available = op_cpu_buffer_entries(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	for (i = 0; i < available; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 		sample = op_cpu_buffer_read_entry(&entry, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 		if (!sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 		if (is_code(sample->eip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 			flags = sample->event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 			if (flags & TRACE_BEGIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 				state = sb_bt_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 				add_trace_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 			if (flags & KERNEL_CTX_SWITCH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 				/* kernel/userspace switch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 				in_kernel = flags & IS_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 				if (state == sb_buffer_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 					state = sb_sample_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 				add_kernel_ctx_switch(flags & IS_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 			if (flags & USER_CTX_SWITCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 			    && op_cpu_buffer_get_data(&entry, &val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 				/* userspace context switch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 				new = (struct task_struct *)val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 				oldmm = mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 				release_mm(oldmm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 				mm = get_task_mm(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 				if (mm != oldmm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 					cookie = get_exec_dcookie(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 				add_user_ctx_switch(new, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 			if (op_cpu_buffer_get_size(&entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 				add_data(&entry, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 		if (state < sb_bt_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 			/* ignore sample */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 		if (add_sample(mm, sample, in_kernel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 		/* ignore backtraces if failed to add a sample */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 		if (state == sb_bt_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 			state = sb_bt_ignore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 			atomic_inc(&oprofile_stats.bt_lost_no_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	release_mm(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	mark_done(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	mutex_unlock(&buffer_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) /* The function can be used to add a buffer worth of data directly to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)  * the kernel buffer. The buffer is assumed to be a circular buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)  * Take the entries from index start and end at index end, wrapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)  * at max_entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) void oprofile_put_buff(unsigned long *buf, unsigned int start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 		       unsigned int stop, unsigned int max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	i = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	mutex_lock(&buffer_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	while (i != stop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 		add_event_entry(buf[i++]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 		if (i >= max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 			i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	mutex_unlock(&buffer_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)