^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * ring buffer tester and benchmark
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2009 Steven Rostedt <srostedt@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/ring_buffer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <uapi/linux/sched/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/ktime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/local.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) struct rb_page {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) u64 ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) local_t commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) char data[4080];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /* run time and sleep time in seconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define RUN_TIME 10ULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define SLEEP_TIME 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /* number of events for writer to wake up the reader */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static int wakeup_interval = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static int reader_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static DECLARE_COMPLETION(read_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static DECLARE_COMPLETION(read_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static struct trace_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static struct task_struct *producer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static struct task_struct *consumer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static unsigned long read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static unsigned int disable_reader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) module_param(disable_reader, uint, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) MODULE_PARM_DESC(disable_reader, "only run producer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static unsigned int write_iteration = 50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) module_param(write_iteration, uint, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static int producer_nice = MAX_NICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static int consumer_nice = MAX_NICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static int producer_fifo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static int consumer_fifo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) module_param(producer_nice, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) MODULE_PARM_DESC(producer_nice, "nice prio for producer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) module_param(consumer_nice, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) MODULE_PARM_DESC(consumer_nice, "nice prio for consumer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) module_param(producer_fifo, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) MODULE_PARM_DESC(producer_fifo, "use fifo for producer: 0 - disabled, 1 - low prio, 2 - fifo");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) module_param(consumer_fifo, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) MODULE_PARM_DESC(consumer_fifo, "use fifo for consumer: 0 - disabled, 1 - low prio, 2 - fifo");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static int read_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static int test_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define TEST_ERROR() \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (!test_error) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) test_error = 1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) WARN_ON(1); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) enum event_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) EVENT_FOUND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) EVENT_DROPPED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static bool break_test(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return test_error || kthread_should_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static enum event_status read_event(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct ring_buffer_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) int *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) u64 ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) event = ring_buffer_consume(buffer, cpu, &ts, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (!event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return EVENT_DROPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) entry = ring_buffer_event_data(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (*entry != cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) TEST_ERROR();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return EVENT_DROPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) read++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return EVENT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static enum event_status read_page(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct ring_buffer_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct rb_page *rpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) unsigned long commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) void *bpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) int *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) int inc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) bpage = ring_buffer_alloc_read_page(buffer, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (IS_ERR(bpage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return EVENT_DROPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (ret >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) rpage = bpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* The commit may have missed event flags set, clear them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) commit = local_read(&rpage->commit) & 0xfffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) for (i = 0; i < commit && !test_error ; i += inc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) TEST_ERROR();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) inc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) event = (void *)&rpage->data[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) switch (event->type_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) case RINGBUF_TYPE_PADDING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* failed writes may be discarded events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (!event->time_delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) TEST_ERROR();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) inc = event->array[0] + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) case RINGBUF_TYPE_TIME_EXTEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) inc = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) entry = ring_buffer_event_data(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (*entry != cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) TEST_ERROR();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) read++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (!event->array[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) TEST_ERROR();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) inc = event->array[0] + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) entry = ring_buffer_event_data(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (*entry != cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) TEST_ERROR();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) read++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) inc = ((event->type_len + 1) * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (test_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (inc <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) TEST_ERROR();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) ring_buffer_free_read_page(buffer, cpu, bpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return EVENT_DROPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return EVENT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static void ring_buffer_consumer(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /* toggle between reading pages and events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) read_events ^= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) read = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * Continue running until the producer specifically asks to stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * and is ready for the completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) while (!READ_ONCE(reader_finish)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) while (found && !test_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) enum event_status stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (read_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) stat = read_event(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) stat = read_page(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (test_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (stat == EVENT_FOUND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /* Wait till the producer wakes us up when there is more data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * available or when the producer wants us to finish reading.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (reader_finish)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) reader_finish = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) complete(&read_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static void ring_buffer_producer(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) ktime_t start_time, end_time, timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) unsigned long long time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) unsigned long long entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) unsigned long long overruns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) unsigned long missed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) unsigned long hit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) unsigned long avg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) int cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * Hammer the buffer for 10 secs (this may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * make the system stall)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) trace_printk("Starting ring buffer hammer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) start_time = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) timeout = ktime_add_ns(start_time, RUN_TIME * NSEC_PER_SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct ring_buffer_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) int *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) for (i = 0; i < write_iteration; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) event = ring_buffer_lock_reserve(buffer, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (!event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) missed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) hit++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) entry = ring_buffer_event_data(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) *entry = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) ring_buffer_unlock_commit(buffer, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) end_time = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (consumer && !(cnt % wakeup_interval))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) wake_up_process(consumer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) #ifndef CONFIG_PREEMPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * If we are a non preempt kernel, the 10 seconds run will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * stop everything while it runs. Instead, we will call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * cond_resched and also add any time that was lost by a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * reschedule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * Do a cond resched at the same frequency we would wake up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * the reader.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (cnt % wakeup_interval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) } while (ktime_before(end_time, timeout) && !break_test());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) trace_printk("End ring buffer hammer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (consumer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /* Init both completions here to avoid races */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) init_completion(&read_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) init_completion(&read_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /* the completions must be visible before the finish var */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) reader_finish = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) wake_up_process(consumer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) wait_for_completion(&read_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) time = ktime_us_delta(end_time, start_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) entries = ring_buffer_entries(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) overruns = ring_buffer_overruns(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (test_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) trace_printk("ERROR!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (!disable_reader) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (consumer_fifo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) trace_printk("Running Consumer at SCHED_FIFO %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) consumer_fifo == 1 ? "low" : "high");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) trace_printk("Running Consumer at nice: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) consumer_nice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (producer_fifo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) trace_printk("Running Producer at SCHED_FIFO %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) producer_fifo == 1 ? "low" : "high");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) trace_printk("Running Producer at nice: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) producer_nice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /* Let the user know that the test is running at low priority */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (!producer_fifo && !consumer_fifo &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) producer_nice == MAX_NICE && consumer_nice == MAX_NICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) trace_printk("WARNING!!! This test is running at lowest priority.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) trace_printk("Time: %lld (usecs)\n", time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) trace_printk("Overruns: %lld\n", overruns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (disable_reader)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) trace_printk("Read: (reader disabled)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) trace_printk("Read: %ld (by %s)\n", read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) read_events ? "events" : "pages");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) trace_printk("Entries: %lld\n", entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) trace_printk("Total: %lld\n", entries + overruns + read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) trace_printk("Missed: %ld\n", missed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) trace_printk("Hit: %ld\n", hit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* Convert time from usecs to millisecs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) do_div(time, USEC_PER_MSEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) hit /= (long)time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) trace_printk("TIME IS ZERO??\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) trace_printk("Entries per millisec: %ld\n", hit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (hit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /* Calculate the average time in nanosecs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) avg = NSEC_PER_MSEC / hit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) trace_printk("%ld ns per entry\n", avg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (missed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) missed /= (long)time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) trace_printk("Total iterations per millisec: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) hit + missed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /* it is possible that hit + missed will overflow and be zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (!(hit + missed)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) trace_printk("hit + missed overflowed and totalled zero!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) hit--; /* make it non zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /* Calculate the average time in nanosecs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) avg = NSEC_PER_MSEC / (hit + missed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) trace_printk("%ld ns per entry\n", avg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static void wait_to_die(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) while (!kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static int ring_buffer_consumer_thread(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) while (!break_test()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) complete(&read_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) ring_buffer_consumer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (break_test())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (!kthread_should_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) wait_to_die();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) static int ring_buffer_producer_thread(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) while (!break_test()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) ring_buffer_reset(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (consumer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) wake_up_process(consumer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) wait_for_completion(&read_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) ring_buffer_producer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (break_test())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) goto out_kill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) trace_printk("Sleeping for 10 secs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (break_test())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) goto out_kill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) schedule_timeout(HZ * SLEEP_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) out_kill:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (!kthread_should_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) wait_to_die();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static int __init ring_buffer_benchmark_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /* make a one meg buffer in overwite mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) buffer = ring_buffer_alloc(1000000, RB_FL_OVERWRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (!buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (!disable_reader) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) consumer = kthread_create(ring_buffer_consumer_thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) NULL, "rb_consumer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) ret = PTR_ERR(consumer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (IS_ERR(consumer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) goto out_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) producer = kthread_run(ring_buffer_producer_thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) NULL, "rb_producer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) ret = PTR_ERR(producer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (IS_ERR(producer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) goto out_kill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * Run them as low-prio background tasks by default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (!disable_reader) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (consumer_fifo >= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) sched_set_fifo(consumer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) else if (consumer_fifo == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) sched_set_fifo_low(consumer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) set_user_nice(consumer, consumer_nice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (producer_fifo >= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) sched_set_fifo(producer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) else if (producer_fifo == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) sched_set_fifo_low(producer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) set_user_nice(producer, producer_nice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) out_kill:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (consumer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) kthread_stop(consumer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) out_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) ring_buffer_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) static void __exit ring_buffer_benchmark_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) kthread_stop(producer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (consumer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) kthread_stop(consumer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) ring_buffer_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) module_init(ring_buffer_benchmark_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) module_exit(ring_buffer_benchmark_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) MODULE_AUTHOR("Steven Rostedt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) MODULE_DESCRIPTION("ring_buffer_benchmark");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) MODULE_LICENSE("GPL");