^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2007 Alan Stern
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2009 IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2009 Frederic Weisbecker <fweisbec@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Authors: Alan Stern <stern@rowland.harvard.edu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * K.Prasad <prasad@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Frederic Weisbecker <fweisbec@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * using the CPU's debug registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/hw_breakpoint.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/irqflags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/kallsyms.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/kdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/hw_breakpoint.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/debugreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/user.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/desc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /* Per cpu debug control register value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) DEFINE_PER_CPU(unsigned long, cpu_dr7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) EXPORT_PER_CPU_SYMBOL(cpu_dr7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* Per cpu debug address registers values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static DEFINE_PER_CPU(unsigned long, cpu_debugreg[HBP_NUM]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * Stores the breakpoints currently in use on each breakpoint address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * register for each cpus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) __encode_dr7(int drnum, unsigned int len, unsigned int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned long bp_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) bp_info = (len | type) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) bp_info <<= (DR_CONTROL_SHIFT + drnum * DR_CONTROL_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) bp_info |= (DR_GLOBAL_ENABLE << (drnum * DR_ENABLE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return bp_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * Encode the length, type, Exact, and Enable bits for a particular breakpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * as stored in debug register 7.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return __encode_dr7(drnum, len, type) | DR_GLOBAL_SLOWDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * Decode the length and type bits for a particular breakpoint as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * stored in debug register 7. Return the "enabled" status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) int decode_dr7(unsigned long dr7, int bpnum, unsigned *len, unsigned *type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int bp_info = dr7 >> (DR_CONTROL_SHIFT + bpnum * DR_CONTROL_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) *len = (bp_info & 0xc) | 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) *type = (bp_info & 0x3) | 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return (dr7 >> (bpnum * DR_ENABLE_SIZE)) & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * Install a perf counter breakpoint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * We seek a free debug address register and use it for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * breakpoint. Eventually we enable it in the debug control register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * Atomic: we hold the counter->ctx->lock and we only handle variables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * and registers local to this cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) int arch_install_hw_breakpoint(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct arch_hw_breakpoint *info = counter_arch_bp(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) unsigned long *dr7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) lockdep_assert_irqs_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) for (i = 0; i < HBP_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (!*slot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) *slot = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) set_debugreg(info->address, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) __this_cpu_write(cpu_debugreg[i], info->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) dr7 = this_cpu_ptr(&cpu_dr7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) *dr7 |= encode_dr7(i, info->len, info->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * Ensure we first write cpu_dr7 before we set the DR7 register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * This ensures an NMI never see cpu_dr7 0 when DR7 is not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) set_debugreg(*dr7, 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (info->mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) set_dr_addr_mask(info->mask, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * Uninstall the breakpoint contained in the given counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * First we search the debug address register it uses and then we disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * Atomic: we hold the counter->ctx->lock and we only handle variables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * and registers local to this cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) void arch_uninstall_hw_breakpoint(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct arch_hw_breakpoint *info = counter_arch_bp(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) unsigned long dr7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) lockdep_assert_irqs_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) for (i = 0; i < HBP_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (*slot == bp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) *slot = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) dr7 = this_cpu_read(cpu_dr7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) dr7 &= ~__encode_dr7(i, info->len, info->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) set_debugreg(dr7, 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (info->mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) set_dr_addr_mask(0, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * Ensure the write to cpu_dr7 is after we've set the DR7 register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * This ensures an NMI never see cpu_dr7 0 when DR7 is not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) this_cpu_write(cpu_dr7, dr7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static int arch_bp_generic_len(int x86_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) switch (x86_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) case X86_BREAKPOINT_LEN_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return HW_BREAKPOINT_LEN_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) case X86_BREAKPOINT_LEN_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return HW_BREAKPOINT_LEN_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) case X86_BREAKPOINT_LEN_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return HW_BREAKPOINT_LEN_4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) case X86_BREAKPOINT_LEN_8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return HW_BREAKPOINT_LEN_8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) int arch_bp_generic_fields(int x86_len, int x86_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) int *gen_len, int *gen_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /* Type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) switch (x86_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) case X86_BREAKPOINT_EXECUTE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (x86_len != X86_BREAKPOINT_LEN_X)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) *gen_type = HW_BREAKPOINT_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) *gen_len = sizeof(long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) case X86_BREAKPOINT_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) *gen_type = HW_BREAKPOINT_W;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) case X86_BREAKPOINT_RW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* Len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) len = arch_bp_generic_len(x86_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (len < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) *gen_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * Check for virtual address in kernel space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) unsigned long va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) va = hw->address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) len = arch_bp_generic_len(hw->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) WARN_ON_ONCE(len < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * We don't need to worry about va + len - 1 overflowing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * we already require that va is aligned to a multiple of len.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return (va >= TASK_SIZE_MAX) || ((va + len - 1) >= TASK_SIZE_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * Checks whether the range [addr, end], overlaps the area [base, base + size).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static inline bool within_area(unsigned long addr, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) unsigned long base, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return end >= base && addr < (base + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * Checks whether the range from addr to end, inclusive, overlaps the fixed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * mapped CPU entry area range or other ranges used for CPU entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static inline bool within_cpu_entry(unsigned long addr, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /* CPU entry erea is always used for CPU entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (within_area(addr, end, CPU_ENTRY_AREA_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) CPU_ENTRY_AREA_TOTAL_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * When FSGSBASE is enabled, paranoid_entry() fetches the per-CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * GSBASE value via __per_cpu_offset or pcpu_unit_offsets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (within_area(addr, end, (unsigned long)__per_cpu_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) sizeof(unsigned long) * nr_cpu_ids))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (within_area(addr, end, (unsigned long)&pcpu_unit_offsets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) sizeof(pcpu_unit_offsets)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /* The original rw GDT is being used after load_direct_gdt() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (within_area(addr, end, (unsigned long)get_cpu_gdt_rw(cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) GDT_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * cpu_tss_rw is not directly referenced by hardware, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * cpu_tss_rw is also used in CPU entry code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (within_area(addr, end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) (unsigned long)&per_cpu(cpu_tss_rw, cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) sizeof(struct tss_struct)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * cpu_tlbstate.user_pcid_flush_mask is used for CPU entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * If a data breakpoint on it, it will cause an unwanted #DB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * Protect the full cpu_tlbstate structure to be sure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (within_area(addr, end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) (unsigned long)&per_cpu(cpu_tlbstate, cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) sizeof(struct tlb_state)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * When in guest (X86_FEATURE_HYPERVISOR), local_db_save()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * will read per-cpu cpu_dr7 before clear dr7 register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (within_area(addr, end, (unsigned long)&per_cpu(cpu_dr7, cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) sizeof(cpu_dr7)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) static int arch_build_bp_info(struct perf_event *bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) const struct perf_event_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct arch_hw_breakpoint *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) unsigned long bp_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) bp_end = attr->bp_addr + attr->bp_len - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (bp_end < attr->bp_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * Prevent any breakpoint of any type that overlaps the CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * entry area and data. This protects the IST stacks and also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * reduces the chance that we ever find out what happens if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * there's a data breakpoint on the GDT, IDT, or TSS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (within_cpu_entry(attr->bp_addr, bp_end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) hw->address = attr->bp_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) hw->mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /* Type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) switch (attr->bp_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) case HW_BREAKPOINT_W:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) hw->type = X86_BREAKPOINT_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) hw->type = X86_BREAKPOINT_RW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) case HW_BREAKPOINT_X:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * We don't allow kernel breakpoints in places that are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * acceptable for kprobes. On non-kprobes kernels, we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * allow kernel breakpoints at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (attr->bp_addr >= TASK_SIZE_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (within_kprobe_blacklist(attr->bp_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) hw->type = X86_BREAKPOINT_EXECUTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * x86 inst breakpoints need to have a specific undefined len.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * But we still need to check userspace is not trying to setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * an unsupported length, to get a range breakpoint for example.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (attr->bp_len == sizeof(long)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) hw->len = X86_BREAKPOINT_LEN_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /* Len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) switch (attr->bp_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) case HW_BREAKPOINT_LEN_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) hw->len = X86_BREAKPOINT_LEN_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) case HW_BREAKPOINT_LEN_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) hw->len = X86_BREAKPOINT_LEN_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) case HW_BREAKPOINT_LEN_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) hw->len = X86_BREAKPOINT_LEN_4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) case HW_BREAKPOINT_LEN_8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) hw->len = X86_BREAKPOINT_LEN_8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /* AMD range breakpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (!is_power_of_2(attr->bp_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (attr->bp_addr & (attr->bp_len - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (!boot_cpu_has(X86_FEATURE_BPEXT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * It's impossible to use a range breakpoint to fake out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * user vs kernel detection because bp_len - 1 can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * have the high bit set. If we ever allow range instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * breakpoints, then we'll have to check for kprobe-blacklisted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * addresses anywhere in the range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) hw->mask = attr->bp_len - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) hw->len = X86_BREAKPOINT_LEN_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * Validate the arch-specific HW Breakpoint register settings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) int hw_breakpoint_arch_parse(struct perf_event *bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) const struct perf_event_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct arch_hw_breakpoint *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) unsigned int align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) ret = arch_build_bp_info(bp, attr, hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) switch (hw->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) case X86_BREAKPOINT_LEN_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) align = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (hw->mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) align = hw->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) case X86_BREAKPOINT_LEN_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) align = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) case X86_BREAKPOINT_LEN_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) align = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) case X86_BREAKPOINT_LEN_8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) align = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * Check that the low-order bits of the address are appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * for the alignment implied by len.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (hw->address & align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * Release the user breakpoints used by ptrace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct thread_struct *t = &tsk->thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) for (i = 0; i < HBP_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) unregister_hw_breakpoint(t->ptrace_bps[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) t->ptrace_bps[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) t->virtual_dr6 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) t->ptrace_dr7 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) void hw_breakpoint_restore(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) set_debugreg(__this_cpu_read(cpu_debugreg[0]), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) set_debugreg(__this_cpu_read(cpu_debugreg[1]), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) set_debugreg(__this_cpu_read(cpu_debugreg[2]), 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) set_debugreg(__this_cpu_read(cpu_debugreg[3]), 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) set_debugreg(DR6_RESERVED, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) set_debugreg(__this_cpu_read(cpu_dr7), 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) EXPORT_SYMBOL_GPL(hw_breakpoint_restore);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * Handle debug exception notifications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * Return value is either NOTIFY_STOP or NOTIFY_DONE as explained below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * NOTIFY_DONE returned if one of the following conditions is true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * i) When the causative address is from user-space and the exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * is a valid one, i.e. not triggered as a result of lazy debug register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * switching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * ii) When there are more bits than trap<n> set in DR6 register (such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * as BD, BS or BT) indicating that more than one debug condition is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * met and requires some more action in do_debug().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * NOTIFY_STOP returned for all other cases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) static int hw_breakpoint_handler(struct die_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) int i, rc = NOTIFY_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct perf_event *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) unsigned long *dr6_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) unsigned long dr6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) bool bpx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) /* The DR6 value is pointed by args->err */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) dr6_p = (unsigned long *)ERR_PTR(args->err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) dr6 = *dr6_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /* Do an early return if no trap bits are set in DR6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if ((dr6 & DR_TRAP_BITS) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) /* Handle all the breakpoints that were triggered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) for (i = 0; i < HBP_NUM; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (likely(!(dr6 & (DR_TRAP0 << i))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) bp = this_cpu_read(bp_per_reg[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (!bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) bpx = bp->hw.info.type == X86_BREAKPOINT_EXECUTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * TF and data breakpoints are traps and can be merged, however
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * instruction breakpoints are faults and will be raised
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * separately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * However DR6 can indicate both TF and instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * breakpoints. In that case take TF as that has precedence and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * delay the instruction breakpoint for the next exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (bpx && (dr6 & DR_STEP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * Reset the 'i'th TRAP bit in dr6 to denote completion of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * exception handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) (*dr6_p) &= ~(DR_TRAP0 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) perf_bp_event(bp, args->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * Set up resume flag to avoid breakpoint recursion when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * returning back to origin.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (bpx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) args->regs->flags |= X86_EFLAGS_RF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * Further processing in do_debug() is needed for a) user-space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * breakpoints (to generate signals) and b) when the system has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * taken exception due to multiple causes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if ((current->thread.virtual_dr6 & DR_TRAP_BITS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) (dr6 & (~DR_TRAP_BITS)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) rc = NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * Handle debug exception notifications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) int hw_breakpoint_exceptions_notify(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct notifier_block *unused, unsigned long val, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (val != DIE_DEBUG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return hw_breakpoint_handler(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) void hw_breakpoint_pmu_read(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /* TODO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }