^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * @file event_buffer.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * @remark Copyright 2002 OProfile authors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * @remark Read the file COPYING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * @author John Levon <levon@movementarian.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This is the global event buffer that the user-space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * daemon reads from. The event buffer is an untyped array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * of unsigned longs. Entries are prefixed by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * escape value ESCAPE_CODE followed by an identifying code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/oprofile.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/capability.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/dcookies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "oprof.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "event_buffer.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "oprofile_stats.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) DEFINE_MUTEX(buffer_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static unsigned long buffer_opened;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static DECLARE_WAIT_QUEUE_HEAD(buffer_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static unsigned long *event_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static unsigned long buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static unsigned long buffer_watershed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static size_t buffer_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* atomic_t because wait_event checks it outside of buffer_mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static atomic_t buffer_ready = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * Add an entry to the event buffer. When we get near to the end we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * wake up the process sleeping on the read() of the file. To protect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * the event_buffer this function may only be called when buffer_mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) void add_event_entry(unsigned long value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * This shouldn't happen since all workqueues or handlers are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * canceled or flushed before the event buffer is freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (!event_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (buffer_pos == buffer_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) atomic_inc(&oprofile_stats.event_lost_overflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) event_buffer[buffer_pos] = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (++buffer_pos == buffer_size - buffer_watershed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) atomic_set(&buffer_ready, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) wake_up(&buffer_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* Wake up the waiting process if any. This happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * on "echo 0 >/dev/oprofile/enable" so the daemon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * processes the data remaining in the event buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) void wake_up_buffer_waiter(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) mutex_lock(&buffer_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) atomic_set(&buffer_ready, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) wake_up(&buffer_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) mutex_unlock(&buffer_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) int alloc_event_buffer(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) raw_spin_lock_irqsave(&oprofilefs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) buffer_size = oprofile_buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) buffer_watershed = oprofile_buffer_watershed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (buffer_watershed >= buffer_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) buffer_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) event_buffer = vmalloc(array_size(buffer_size, sizeof(unsigned long)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (!event_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) void free_event_buffer(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) mutex_lock(&buffer_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) vfree(event_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) buffer_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) event_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) mutex_unlock(&buffer_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static int event_buffer_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) int err = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (!perfmon_capable())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (test_and_set_bit_lock(0, &buffer_opened))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* Register as a user of dcookies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * to ensure they persist for the lifetime of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * the open event file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) file->private_data = dcookie_register();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (!file->private_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if ((err = oprofile_setup()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* NB: the actual start happens from userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * echo 1 >/dev/oprofile/enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return nonseekable_open(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) dcookie_unregister(file->private_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) __clear_bit_unlock(0, &buffer_opened);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static int event_buffer_release(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) oprofile_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) oprofile_shutdown();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) dcookie_unregister(file->private_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) buffer_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) atomic_set(&buffer_ready, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) __clear_bit_unlock(0, &buffer_opened);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static ssize_t event_buffer_read(struct file *file, char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) size_t count, loff_t *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) int retval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) size_t const max = buffer_size * sizeof(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* handling partial reads is more trouble than it's worth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (count != max || *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) wait_event_interruptible(buffer_wait, atomic_read(&buffer_ready));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /* can't currently happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (!atomic_read(&buffer_ready))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) mutex_lock(&buffer_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* May happen if the buffer is freed during pending reads. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (!event_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) retval = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) atomic_set(&buffer_ready, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) retval = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) count = buffer_pos * sizeof(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (copy_to_user(buf, event_buffer, count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) retval = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) buffer_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) mutex_unlock(&buffer_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) const struct file_operations event_buffer_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) .open = event_buffer_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) .release = event_buffer_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) .read = event_buffer_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) .llseek = no_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) };