Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * @file cpu_buffer.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * @remark Copyright 2002-2009 OProfile authors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * @remark Read the file COPYING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * @author John Levon <levon@movementarian.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * @author Barry Kasindorf <barry.kasindorf@amd.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * @author Robert Richter <robert.richter@amd.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * Each CPU has a local buffer that stores PC value/event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * pairs. We also log context switches when we notice them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * Eventually each CPU's buffer is processed into the global
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * event buffer by sync_buffer().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * We use a local buffer for two reasons: an NMI or similar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * interrupt cannot synchronise, and high sampling rates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * would lead to catastrophic global synchronisation if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * a global buffer was used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/oprofile.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include "event_buffer.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include "cpu_buffer.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include "buffer_sync.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include "oprof.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define OP_BUFFER_FLAGS	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) static struct trace_buffer *op_ring_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) static void wq_sync_buffer(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define DEFAULT_TIMER_EXPIRE (HZ / 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) static int work_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) unsigned long oprofile_get_cpu_buffer_size(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	return oprofile_cpu_buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) void oprofile_cpu_buffer_inc_smpl_lost(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	cpu_buf->sample_lost_overflow++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) void free_cpu_buffers(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	if (op_ring_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		ring_buffer_free(op_ring_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	op_ring_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #define RB_EVENT_HDR_SIZE 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) int alloc_cpu_buffers(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	unsigned long buffer_size = oprofile_cpu_buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	unsigned long byte_size = buffer_size * (sizeof(struct op_sample) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 						 RB_EVENT_HDR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	if (!op_ring_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		b->last_task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		b->last_is_kernel = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		b->tracing = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		b->buffer_size = buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		b->sample_received = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		b->sample_lost_overflow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		b->backtrace_aborted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		b->sample_invalid_eip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		b->cpu = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	free_cpu_buffers();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) void start_cpu_work(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	work_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	for_each_online_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		 * Spread the work by 1 jiffy per cpu so they dont all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		 * fire at once.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) void end_cpu_work(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	work_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) void flush_cpu_work(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	for_each_online_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		/* these works are per-cpu, no need for flush_sync */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		flush_delayed_work(&b->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)  * This function prepares the cpu buffer to write a sample.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)  * Struct op_entry is used during operations on the ring buffer while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)  * struct op_sample contains the data that is stored in the ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)  * buffer. Struct entry can be uninitialized. The function reserves a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  * data array that is specified by size. Use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  * op_cpu_buffer_write_commit() after preparing the sample. In case of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)  * errors a null pointer is returned, otherwise the pointer to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  * sample.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct op_sample
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	entry->event = ring_buffer_lock_reserve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		(op_ring_buffer, sizeof(struct op_sample) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		 size * sizeof(entry->sample->data[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	if (!entry->event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	entry->sample = ring_buffer_event_data(entry->event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	entry->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	entry->data = entry->sample->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	return entry->sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) int op_cpu_buffer_write_commit(struct op_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	return ring_buffer_unlock_commit(op_ring_buffer, entry->event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	struct ring_buffer_event *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	e = ring_buffer_consume(op_ring_buffer, cpu, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	entry->event = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	entry->sample = ring_buffer_event_data(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		/ sizeof(entry->sample->data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	entry->data = entry->sample->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	return entry->sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) unsigned long op_cpu_buffer_entries(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	return ring_buffer_entries_cpu(op_ring_buffer, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	    int is_kernel, struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	struct op_entry entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	struct op_sample *sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	if (backtrace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		flags |= TRACE_BEGIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	/* notice a switch from user->kernel or vice versa */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	is_kernel = !!is_kernel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	if (cpu_buf->last_is_kernel != is_kernel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		cpu_buf->last_is_kernel = is_kernel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		flags |= KERNEL_CTX_SWITCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		if (is_kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 			flags |= IS_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	/* notice a task switch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	if (cpu_buf->last_task != task) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		cpu_buf->last_task = task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		flags |= USER_CTX_SWITCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	if (!flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		/* nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	if (flags & USER_CTX_SWITCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		size = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	sample = op_cpu_buffer_write_reserve(&entry, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	if (!sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	sample->eip = ESCAPE_CODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	sample->event = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	if (size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		op_cpu_buffer_add_data(&entry, (unsigned long)task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	op_cpu_buffer_write_commit(&entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) op_add_sample(struct oprofile_cpu_buffer *cpu_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	      unsigned long pc, unsigned long event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	struct op_entry entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	struct op_sample *sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	sample = op_cpu_buffer_write_reserve(&entry, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	if (!sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	sample->eip = pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	sample->event = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	return op_cpu_buffer_write_commit(&entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)  * This must be safe from any context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)  * is_kernel is needed because on some architectures you cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)  * tell if you are in kernel or user space simply by looking at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)  * pc. We tag this in the buffer by generating kernel enter/exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)  * events whenever is_kernel changes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	   unsigned long backtrace, int is_kernel, unsigned long event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	   struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	struct task_struct *tsk = task ? task : current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	cpu_buf->sample_received++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	if (pc == ESCAPE_CODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		cpu_buf->sample_invalid_eip++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	if (op_add_code(cpu_buf, backtrace, is_kernel, tsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	if (op_add_sample(cpu_buf, pc, event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	cpu_buf->sample_lost_overflow++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	cpu_buf->tracing = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	cpu_buf->tracing = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 			  unsigned long event, int is_kernel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 			  struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	unsigned long backtrace = oprofile_backtrace_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	 * if log_sample() fail we can't backtrace since we lost the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	 * source of this event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event, task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		/* failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	if (!backtrace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	oprofile_begin_trace(cpu_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	oprofile_ops.backtrace(regs, backtrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	oprofile_end_trace(cpu_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) void oprofile_add_ext_hw_sample(unsigned long pc, struct pt_regs * const regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 				unsigned long event, int is_kernel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 				struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	__oprofile_add_ext_sample(pc, regs, event, is_kernel, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 			     unsigned long event, int is_kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	__oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	int is_kernel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	unsigned long pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	if (likely(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		is_kernel = !user_mode(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		pc = profile_pc(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		is_kernel = 0;    /* This value will not be used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		pc = ESCAPE_CODE; /* as this causes an early return. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	__oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)  * Add samples with data to the ring buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)  * Use oprofile_add_data(&entry, val) to add data and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)  * oprofile_write_commit(&entry) to commit the sample.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		       unsigned long pc, int code, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	struct op_sample *sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	int is_kernel = !user_mode(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	cpu_buf->sample_received++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	/* no backtraces for samples with data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	if (op_add_code(cpu_buf, 0, is_kernel, current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	sample = op_cpu_buffer_write_reserve(entry, size + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	if (!sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	sample->eip = ESCAPE_CODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	sample->event = 0;		/* no flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	op_cpu_buffer_add_data(entry, code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	op_cpu_buffer_add_data(entry, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	entry->event = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	cpu_buf->sample_lost_overflow++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) int oprofile_add_data(struct op_entry *entry, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	if (!entry->event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	return op_cpu_buffer_add_data(entry, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) int oprofile_add_data64(struct op_entry *entry, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	if (!entry->event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	if (op_cpu_buffer_get_size(entry) < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 		 * the function returns 0 to indicate a too small
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		 * buffer, even if there is some space left
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	if (!op_cpu_buffer_add_data(entry, (u32)val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	return op_cpu_buffer_add_data(entry, (u32)(val >> 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) int oprofile_write_commit(struct op_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	if (!entry->event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	return op_cpu_buffer_write_commit(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	log_sample(cpu_buf, pc, 0, is_kernel, event, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) void oprofile_add_trace(unsigned long pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	if (!cpu_buf->tracing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	 * broken frame can give an eip with the same value as an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	 * escape code, abort the trace if we get it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	if (pc == ESCAPE_CODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	if (op_add_sample(cpu_buf, pc, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	cpu_buf->tracing = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	cpu_buf->backtrace_aborted++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)  * This serves to avoid cpu buffer overflow, and makes sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)  * the task mortuary progresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)  * By using schedule_delayed_work_on and then schedule_delayed_work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)  * we guarantee this will stay on the correct cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) static void wq_sync_buffer(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	struct oprofile_cpu_buffer *b =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		container_of(work, struct oprofile_cpu_buffer, work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	if (b->cpu != smp_processor_id() && !cpu_online(b->cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 		cancel_delayed_work(&b->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	sync_buffer(b->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	/* don't re-add the work if we're shutting down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	if (work_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }