^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) IBM Corporation, 2005
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Jeff Muizelaar, 2006, 2007
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Pekka Paalanen, 2008 <pq@iki.fi>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Derived from the read-mod example from relay-examples by Tom Zanussi.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define pr_fmt(fmt) "mmiotrace: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define DEBUG 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/mmiotrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/e820/api.h> /* for ISA_START_ADDRESS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "pf_in.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct trap_reason {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) unsigned long ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) enum reason_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) int active_traces;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct remap_trace {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct kmmio_probe probe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) resource_size_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned long id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* Accessed per-cpu. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static DEFINE_PER_CPU(struct trap_reason, pf_reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static DEFINE_PER_CPU(struct mmiotrace_rw, cpu_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static DEFINE_MUTEX(mmiotrace_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static DEFINE_SPINLOCK(trace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static atomic_t mmiotrace_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static LIST_HEAD(trace_list); /* struct remap_trace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * Locking in this file:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * - mmiotrace_mutex enforces enable/disable_mmiotrace() critical sections.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * - mmiotrace_enabled may be modified only when holding mmiotrace_mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * and trace_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * - Routines depending on is_enabled() must take trace_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * - trace_list users must hold trace_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * - is_enabled() guarantees that mmio_trace_{rw,mapping} are allowed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * - pre/post callbacks assume the effect of is_enabled() being true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* module parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static unsigned long filter_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static bool nommiotrace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static bool trace_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) module_param(filter_offset, ulong, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) module_param(nommiotrace, bool, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) module_param(trace_pc, bool, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) MODULE_PARM_DESC(filter_offset, "Start address of traced mappings.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) MODULE_PARM_DESC(nommiotrace, "Disable actual MMIO tracing.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) MODULE_PARM_DESC(trace_pc, "Record address of faulting instructions.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static bool is_enabled(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return atomic_read(&mmiotrace_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static void print_pte(unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) unsigned int level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) pte_t *pte = lookup_address(address, &level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (!pte) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) pr_err("Error in %s: no pte for page 0x%08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) __func__, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (level == PG_LEVEL_2M) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) pr_emerg("4MB pages are not currently supported: 0x%08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) pr_info("pte for 0x%lx: 0x%llx 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) (unsigned long long)pte_val(*pte),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) (unsigned long long)pte_val(*pte) & _PAGE_PRESENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * For some reason the pre/post pairs have been called in an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * unmatched order. Report and die.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static void die_kmmio_nesting_error(struct pt_regs *regs, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) const struct trap_reason *my_reason = &get_cpu_var(pf_reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) pr_emerg("unexpected fault for address: 0x%08lx, last fault for address: 0x%08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) addr, my_reason->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) print_pte(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) pr_emerg("faulting IP is at %pS\n", (void *)regs->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) pr_emerg("last faulting IP was at %pS\n", (void *)my_reason->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #ifdef __i386__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) pr_emerg("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) regs->ax, regs->bx, regs->cx, regs->dx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) pr_emerg("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) regs->si, regs->di, regs->bp, regs->sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) pr_emerg("rax: %016lx rcx: %016lx rdx: %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) regs->ax, regs->cx, regs->dx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) pr_emerg("rsi: %016lx rdi: %016lx rbp: %016lx rsp: %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) regs->si, regs->di, regs->bp, regs->sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) put_cpu_var(pf_reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static void pre(struct kmmio_probe *p, struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct trap_reason *my_reason = &get_cpu_var(pf_reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct mmiotrace_rw *my_trace = &get_cpu_var(cpu_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) const unsigned long instptr = instruction_pointer(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) const enum reason_type type = get_ins_type(instptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct remap_trace *trace = p->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /* it doesn't make sense to have more than one active trace per cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (my_reason->active_traces)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) die_kmmio_nesting_error(regs, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) my_reason->active_traces++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) my_reason->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) my_reason->addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) my_reason->ip = instptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) my_trace->phys = addr - trace->probe.addr + trace->phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) my_trace->map_id = trace->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * Only record the program counter when requested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * It may taint clean-room reverse engineering.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (trace_pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) my_trace->pc = instptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) my_trace->pc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * XXX: the timestamp recorded will be *after* the tracing has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * done, not at the time we hit the instruction. SMP implications
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * on event ordering?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) case REG_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) my_trace->opcode = MMIO_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) my_trace->width = get_ins_mem_width(instptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) case REG_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) my_trace->opcode = MMIO_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) my_trace->width = get_ins_mem_width(instptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) my_trace->value = get_ins_reg_val(instptr, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) case IMM_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) my_trace->opcode = MMIO_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) my_trace->width = get_ins_mem_width(instptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) my_trace->value = get_ins_imm_val(instptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) unsigned char *ip = (unsigned char *)instptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) my_trace->opcode = MMIO_UNKNOWN_OP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) my_trace->width = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) *(ip + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) put_cpu_var(cpu_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) put_cpu_var(pf_reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static void post(struct kmmio_probe *p, unsigned long condition,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct trap_reason *my_reason = &get_cpu_var(pf_reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct mmiotrace_rw *my_trace = &get_cpu_var(cpu_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /* this should always return the active_trace count to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) my_reason->active_traces--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (my_reason->active_traces) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) pr_emerg("unexpected post handler");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) switch (my_reason->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) case REG_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) my_trace->value = get_ins_reg_val(my_reason->ip, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) mmio_trace_rw(my_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) put_cpu_var(cpu_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) put_cpu_var(pf_reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static void ioremap_trace_core(resource_size_t offset, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static atomic_t next_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /* These are page-unaligned. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct mmiotrace_map map = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) .phys = offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) .virt = (unsigned long)addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) .len = size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) .opcode = MMIO_PROBE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (!trace) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) pr_err("kmalloc failed in ioremap\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) *trace = (struct remap_trace) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) .probe = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) .addr = (unsigned long)addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) .len = size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) .pre_handler = pre,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) .post_handler = post,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) .private = trace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) .phys = offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) .id = atomic_inc_return(&next_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) map.map_id = trace->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) spin_lock_irq(&trace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (!is_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) kfree(trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) goto not_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) mmio_trace_mapping(&map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) list_add_tail(&trace->list, &trace_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (!nommiotrace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) register_kmmio_probe(&trace->probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) not_enabled:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) spin_unlock_irq(&trace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (!is_enabled()) /* recheck and proper locking in *_core() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) pr_debug("ioremap_*(0x%llx, 0x%lx) = %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) (unsigned long long)offset, size, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if ((filter_offset) && (offset != filter_offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) ioremap_trace_core(offset, size, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static void iounmap_trace_core(volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct mmiotrace_map map = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) .phys = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) .virt = (unsigned long)addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) .len = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) .opcode = MMIO_UNPROBE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct remap_trace *trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct remap_trace *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct remap_trace *found_trace = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) pr_debug("Unmapping %p.\n", addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) spin_lock_irq(&trace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (!is_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) goto not_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) list_for_each_entry_safe(trace, tmp, &trace_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if ((unsigned long)addr == trace->probe.addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (!nommiotrace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) unregister_kmmio_probe(&trace->probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) list_del(&trace->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) found_trace = trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) map.map_id = (found_trace) ? found_trace->id : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) mmio_trace_mapping(&map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) not_enabled:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) spin_unlock_irq(&trace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (found_trace) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) synchronize_rcu(); /* unregister_kmmio_probe() requirement */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) kfree(found_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) void mmiotrace_iounmap(volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (is_enabled()) /* recheck and proper locking in *_core() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) iounmap_trace_core(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) int mmiotrace_printk(const char *fmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) va_list args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) va_start(args, fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) spin_lock_irqsave(&trace_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (is_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) ret = mmio_trace_printk(fmt, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) spin_unlock_irqrestore(&trace_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) va_end(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) EXPORT_SYMBOL(mmiotrace_printk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static void clear_trace_list(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct remap_trace *trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct remap_trace *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * No locking required, because the caller ensures we are in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * critical section via mutex, and is_enabled() is false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * i.e. nothing can traverse or modify this list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * Caller also ensures is_enabled() cannot change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) list_for_each_entry(trace, &trace_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) pr_notice("purging non-iounmapped trace @0x%08lx, size 0x%lx.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) trace->probe.addr, trace->probe.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (!nommiotrace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) unregister_kmmio_probe(&trace->probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) synchronize_rcu(); /* unregister_kmmio_probe() requirement */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) list_for_each_entry_safe(trace, tmp, &trace_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) list_del(&trace->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) kfree(trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) static cpumask_var_t downed_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) static void enter_uniprocessor(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (!cpumask_available(downed_cpus) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) !alloc_cpumask_var(&downed_cpus, GFP_KERNEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) pr_notice("Failed to allocate mask\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) get_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) cpumask_copy(downed_cpus, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) cpumask_clear_cpu(cpumask_first(cpu_online_mask), downed_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (num_online_cpus() > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) pr_notice("Disabling non-boot CPUs...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) for_each_cpu(cpu, downed_cpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) err = remove_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) pr_info("CPU%d is down.\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) pr_err("Error taking CPU%d down: %d\n", cpu, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (num_online_cpus() > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) pr_warn("multiple CPUs still online, may miss events.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static void leave_uniprocessor(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (!cpumask_available(downed_cpus) || cpumask_weight(downed_cpus) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) pr_notice("Re-enabling CPUs...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) for_each_cpu(cpu, downed_cpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) err = add_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) pr_info("enabled CPU%d.\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) pr_err("cannot re-enable CPU%d: %d\n", cpu, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) #else /* !CONFIG_HOTPLUG_CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) static void enter_uniprocessor(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (num_online_cpus() > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) pr_warn("multiple CPUs are online, may miss events. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) "Suggest booting with maxcpus=1 kernel argument.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static void leave_uniprocessor(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) void enable_mmiotrace(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) mutex_lock(&mmiotrace_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (is_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (nommiotrace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) pr_info("MMIO tracing disabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) kmmio_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) enter_uniprocessor();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) spin_lock_irq(&trace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) atomic_inc(&mmiotrace_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) spin_unlock_irq(&trace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) pr_info("enabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) mutex_unlock(&mmiotrace_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) void disable_mmiotrace(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) mutex_lock(&mmiotrace_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (!is_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) spin_lock_irq(&trace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) atomic_dec(&mmiotrace_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) BUG_ON(is_enabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) spin_unlock_irq(&trace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) clear_trace_list(); /* guarantees: no more kmmio callbacks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) leave_uniprocessor();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) kmmio_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) pr_info("disabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) mutex_unlock(&mmiotrace_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }