^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * @file cpu_buffer.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * @remark Copyright 2002-2009 OProfile authors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * @remark Read the file COPYING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * @author John Levon <levon@movementarian.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * @author Robert Richter <robert.richter@amd.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #ifndef OPROFILE_CPU_BUFFER_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define OPROFILE_CPU_BUFFER_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/ring_buffer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct task_struct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) int alloc_cpu_buffers(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) void free_cpu_buffers(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) void start_cpu_work(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) void end_cpu_work(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) void flush_cpu_work(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /* CPU buffer is composed of such entries (which are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * also used for context switch notes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct op_sample {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) unsigned long eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) unsigned long event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) unsigned long data[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct op_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct oprofile_cpu_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) unsigned long buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct task_struct *last_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) int last_is_kernel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) int tracing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) unsigned long sample_received;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) unsigned long sample_lost_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) unsigned long backtrace_aborted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) unsigned long sample_invalid_eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct delayed_work work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) DECLARE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * Resets the cpu buffer to a sane state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * reset these to invalid values; the next sample collected will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * populate the buffer with proper values to initialize the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static inline void op_cpu_buffer_reset(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct oprofile_cpu_buffer *cpu_buf = &per_cpu(op_cpu_buffer, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) cpu_buf->last_is_kernel = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) cpu_buf->last_task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * op_cpu_buffer_add_data() and op_cpu_buffer_write_commit() may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * called only if op_cpu_buffer_write_reserve() did not return NULL or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * entry->event != NULL, otherwise entry->size or entry->event will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * used uninitialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct op_sample
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int op_cpu_buffer_write_commit(struct op_entry *entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned long op_cpu_buffer_entries(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* returns the remaining free size of data in the entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int op_cpu_buffer_add_data(struct op_entry *entry, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (!entry->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) *entry->data = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) entry->size--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) entry->data++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return entry->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* returns the size of data in the entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) int op_cpu_buffer_get_size(struct op_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return entry->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* returns 0 if empty or the size of data including the current value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) int op_cpu_buffer_get_data(struct op_entry *entry, unsigned long *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) int size = entry->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (!size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) *val = *entry->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) entry->size--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) entry->data++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* extra data flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define KERNEL_CTX_SWITCH (1UL << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define IS_KERNEL (1UL << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define TRACE_BEGIN (1UL << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define USER_CTX_SWITCH (1UL << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #endif /* OPROFILE_CPU_BUFFER_H */