^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2009, 2010 ARM Limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Will Deacon <will.deacon@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * using the CPU's debug registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define pr_fmt(fmt) "hw-breakpoint: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/hw_breakpoint.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/cpu_pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/coresight.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/cputype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/current.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/hw_breakpoint.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /* Breakpoint currently in use for each BRP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* Watchpoint currently in use for each WRP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* Number of BRP/WRP registers on this CPU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static int core_num_brps __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static int core_num_wrps __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /* Debug architecture version. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static u8 debug_arch __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* Does debug architecture support OS Save and Restore? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static bool has_ossr __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* Maximum supported watchpoint length. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static u8 max_watchpoint_len __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define READ_WB_REG_CASE(OP2, M, VAL) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) case ((OP2 << 4) + M): \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) ARM_DBG_READ(c0, c ## M, OP2, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) break
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define WRITE_WB_REG_CASE(OP2, M, VAL) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) case ((OP2 << 4) + M): \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) ARM_DBG_WRITE(c0, c ## M, OP2, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) break
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define GEN_READ_WB_REG_CASES(OP2, VAL) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) READ_WB_REG_CASE(OP2, 0, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) READ_WB_REG_CASE(OP2, 1, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) READ_WB_REG_CASE(OP2, 2, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) READ_WB_REG_CASE(OP2, 3, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) READ_WB_REG_CASE(OP2, 4, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) READ_WB_REG_CASE(OP2, 5, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) READ_WB_REG_CASE(OP2, 6, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) READ_WB_REG_CASE(OP2, 7, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) READ_WB_REG_CASE(OP2, 8, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) READ_WB_REG_CASE(OP2, 9, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) READ_WB_REG_CASE(OP2, 10, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) READ_WB_REG_CASE(OP2, 11, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) READ_WB_REG_CASE(OP2, 12, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) READ_WB_REG_CASE(OP2, 13, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) READ_WB_REG_CASE(OP2, 14, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) READ_WB_REG_CASE(OP2, 15, VAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define GEN_WRITE_WB_REG_CASES(OP2, VAL) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) WRITE_WB_REG_CASE(OP2, 0, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) WRITE_WB_REG_CASE(OP2, 1, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) WRITE_WB_REG_CASE(OP2, 2, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) WRITE_WB_REG_CASE(OP2, 3, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) WRITE_WB_REG_CASE(OP2, 4, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) WRITE_WB_REG_CASE(OP2, 5, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) WRITE_WB_REG_CASE(OP2, 6, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) WRITE_WB_REG_CASE(OP2, 7, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) WRITE_WB_REG_CASE(OP2, 8, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) WRITE_WB_REG_CASE(OP2, 9, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) WRITE_WB_REG_CASE(OP2, 10, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) WRITE_WB_REG_CASE(OP2, 11, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) WRITE_WB_REG_CASE(OP2, 12, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) WRITE_WB_REG_CASE(OP2, 13, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) WRITE_WB_REG_CASE(OP2, 14, VAL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) WRITE_WB_REG_CASE(OP2, 15, VAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static u32 read_wb_reg(int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) switch (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) GEN_READ_WB_REG_CASES(ARM_OP2_BVR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) GEN_READ_WB_REG_CASES(ARM_OP2_BCR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) GEN_READ_WB_REG_CASES(ARM_OP2_WVR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) GEN_READ_WB_REG_CASES(ARM_OP2_WCR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) pr_warn("attempt to read from unknown breakpoint register %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static void write_wb_reg(int n, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) switch (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) GEN_WRITE_WB_REG_CASES(ARM_OP2_BVR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) GEN_WRITE_WB_REG_CASES(ARM_OP2_BCR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) GEN_WRITE_WB_REG_CASES(ARM_OP2_WVR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) GEN_WRITE_WB_REG_CASES(ARM_OP2_WCR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) pr_warn("attempt to write to unknown breakpoint register %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* Determine debug architecture. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static u8 get_debug_arch(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) u32 didr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* Do we implement the extended CPUID interface? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) pr_warn_once("CPUID feature registers not supported. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) "Assuming v6 debug is present.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return ARM_DEBUG_ARCH_V6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) ARM_DBG_READ(c0, c0, 0, didr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return (didr >> 16) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) u8 arch_get_debug_arch(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return debug_arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static int debug_arch_supported(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) u8 arch = get_debug_arch();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /* We don't support the memory-mapped interface. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return (arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) arch >= ARM_DEBUG_ARCH_V7_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* Can we determine the watchpoint access type from the fsr? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static int debug_exception_updates_fsr(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return get_debug_arch() >= ARM_DEBUG_ARCH_V8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /* Determine number of WRP registers available. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static int get_num_wrp_resources(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) u32 didr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) ARM_DBG_READ(c0, c0, 0, didr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return ((didr >> 28) & 0xf) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* Determine number of BRP registers available. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static int get_num_brp_resources(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) u32 didr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) ARM_DBG_READ(c0, c0, 0, didr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return ((didr >> 24) & 0xf) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /* Does this core support mismatch breakpoints? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static int core_has_mismatch_brps(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return (get_debug_arch() >= ARM_DEBUG_ARCH_V7_ECP14 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) get_num_brp_resources() > 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /* Determine number of usable WRPs available. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static int get_num_wrps(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * On debug architectures prior to 7.1, when a watchpoint fires, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * only way to work out which watchpoint it was is by disassembling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * the faulting instruction and working out the address of the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * Furthermore, we can only do this if the watchpoint was precise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * since imprecise watchpoints prevent us from calculating register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * based addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * Providing we have more than 1 breakpoint register, we only report
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * a single watchpoint register for the time being. This way, we always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * know which watchpoint fired. In the future we can either add a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * disassembler and address generation emulator, or we can insert a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * check to see if the DFAR is set on watchpoint exception entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * [the ARM ARM states that the DFAR is UNKNOWN, but experience shows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * that it is set on some implementations].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (get_debug_arch() < ARM_DEBUG_ARCH_V7_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return get_num_wrp_resources();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* Determine number of usable BRPs available. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static int get_num_brps(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) int brps = get_num_brp_resources();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return core_has_mismatch_brps() ? brps - 1 : brps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * In order to access the breakpoint/watchpoint control registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * we must be running in debug monitor mode. Unfortunately, we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * be put into halting debug mode at any time by an external debugger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * but there is nothing we can do to prevent that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static int monitor_mode_enabled(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) u32 dscr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) ARM_DBG_READ(c0, c1, 0, dscr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return !!(dscr & ARM_DSCR_MDBGEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static int enable_monitor_mode(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) u32 dscr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) ARM_DBG_READ(c0, c1, 0, dscr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /* If monitor mode is already enabled, just return. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (dscr & ARM_DSCR_MDBGEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /* Write to the corresponding DSCR. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) switch (get_debug_arch()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) case ARM_DEBUG_ARCH_V6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) case ARM_DEBUG_ARCH_V6_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) ARM_DBG_WRITE(c0, c1, 0, (dscr | ARM_DSCR_MDBGEN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) case ARM_DEBUG_ARCH_V7_ECP14:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) case ARM_DEBUG_ARCH_V7_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) case ARM_DEBUG_ARCH_V8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) case ARM_DEBUG_ARCH_V8_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) case ARM_DEBUG_ARCH_V8_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) case ARM_DEBUG_ARCH_V8_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) ARM_DBG_WRITE(c0, c2, 2, (dscr | ARM_DSCR_MDBGEN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* Check that the write made it through. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) ARM_DBG_READ(c0, c1, 0, dscr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (!(dscr & ARM_DSCR_MDBGEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) pr_warn_once("Failed to enable monitor mode on CPU %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) int hw_breakpoint_slots(int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (!debug_arch_supported())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * We can be called early, so don't rely on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * our static variables being initialised.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) case TYPE_INST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return get_num_brps();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) case TYPE_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return get_num_wrps();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) pr_warn("unknown slot type: %d\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * Check if 8-bit byte-address select is available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * This clobbers WRP 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static u8 get_max_wp_len(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) u32 ctrl_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct arch_hw_breakpoint_ctrl ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) u8 size = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (debug_arch < ARM_DEBUG_ARCH_V7_ECP14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) memset(&ctrl, 0, sizeof(ctrl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) ctrl.len = ARM_BREAKPOINT_LEN_8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) ctrl_reg = encode_ctrl_reg(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) write_wb_reg(ARM_BASE_WVR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) write_wb_reg(ARM_BASE_WCR, ctrl_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if ((read_wb_reg(ARM_BASE_WCR) & ctrl_reg) == ctrl_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) size = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) u8 arch_get_max_wp_len(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return max_watchpoint_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * Install a perf counter breakpoint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) int arch_install_hw_breakpoint(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct arch_hw_breakpoint *info = counter_arch_bp(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) struct perf_event **slot, **slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) int i, max_slots, ctrl_base, val_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) u32 addr, ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) addr = info->address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) ctrl = encode_ctrl_reg(info->ctrl) | 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /* Breakpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) ctrl_base = ARM_BASE_BCR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) val_base = ARM_BASE_BVR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) slots = this_cpu_ptr(bp_on_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) max_slots = core_num_brps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /* Watchpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) ctrl_base = ARM_BASE_WCR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) val_base = ARM_BASE_WVR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) slots = this_cpu_ptr(wp_on_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) max_slots = core_num_wrps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) for (i = 0; i < max_slots; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) slot = &slots[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (!*slot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) *slot = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (i == max_slots) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) pr_warn("Can't find any breakpoint slot\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /* Override the breakpoint data with the step data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (info->step_ctrl.enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) addr = info->trigger & ~0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) ctrl = encode_ctrl_reg(info->step_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) ctrl_base = ARM_BASE_BCR + core_num_brps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) val_base = ARM_BASE_BVR + core_num_brps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /* Setup the address register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) write_wb_reg(val_base + i, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /* Setup the control register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) write_wb_reg(ctrl_base + i, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) void arch_uninstall_hw_breakpoint(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct arch_hw_breakpoint *info = counter_arch_bp(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct perf_event **slot, **slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) int i, max_slots, base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /* Breakpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) base = ARM_BASE_BCR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) slots = this_cpu_ptr(bp_on_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) max_slots = core_num_brps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /* Watchpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) base = ARM_BASE_WCR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) slots = this_cpu_ptr(wp_on_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) max_slots = core_num_wrps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /* Remove the breakpoint. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) for (i = 0; i < max_slots; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) slot = &slots[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (*slot == bp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) *slot = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (i == max_slots) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) pr_warn("Can't find any breakpoint slot\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) /* Ensure that we disable the mismatch breakpoint. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) info->step_ctrl.enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) base = ARM_BASE_BCR + core_num_brps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /* Reset the control register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) write_wb_reg(base + i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) static int get_hbp_len(u8 hbp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) unsigned int len_in_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) switch (hbp_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) case ARM_BREAKPOINT_LEN_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) len_in_bytes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) case ARM_BREAKPOINT_LEN_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) len_in_bytes = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) case ARM_BREAKPOINT_LEN_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) len_in_bytes = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) case ARM_BREAKPOINT_LEN_8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) len_in_bytes = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return len_in_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * Check whether bp virtual address is in kernel space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) unsigned long va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) va = hw->address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) len = get_hbp_len(hw->ctrl.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * Hopefully this will disappear when ptrace can bypass the conversion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * to generic breakpoint descriptions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) int *gen_len, int *gen_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /* Type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) switch (ctrl.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) case ARM_BREAKPOINT_EXECUTE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) *gen_type = HW_BREAKPOINT_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) case ARM_BREAKPOINT_LOAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) *gen_type = HW_BREAKPOINT_R;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) case ARM_BREAKPOINT_STORE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) *gen_type = HW_BREAKPOINT_W;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) *gen_type = HW_BREAKPOINT_RW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) /* Len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) switch (ctrl.len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) case ARM_BREAKPOINT_LEN_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) *gen_len = HW_BREAKPOINT_LEN_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) case ARM_BREAKPOINT_LEN_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) *gen_len = HW_BREAKPOINT_LEN_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) case ARM_BREAKPOINT_LEN_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) *gen_len = HW_BREAKPOINT_LEN_4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) case ARM_BREAKPOINT_LEN_8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) *gen_len = HW_BREAKPOINT_LEN_8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * Construct an arch_hw_breakpoint from a perf_event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) static int arch_build_bp_info(struct perf_event *bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) const struct perf_event_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) struct arch_hw_breakpoint *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /* Type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) switch (attr->bp_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) case HW_BREAKPOINT_X:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) hw->ctrl.type = ARM_BREAKPOINT_EXECUTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) case HW_BREAKPOINT_R:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) hw->ctrl.type = ARM_BREAKPOINT_LOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) case HW_BREAKPOINT_W:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) hw->ctrl.type = ARM_BREAKPOINT_STORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) case HW_BREAKPOINT_RW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /* Len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) switch (attr->bp_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) case HW_BREAKPOINT_LEN_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) hw->ctrl.len = ARM_BREAKPOINT_LEN_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) case HW_BREAKPOINT_LEN_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) hw->ctrl.len = ARM_BREAKPOINT_LEN_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) case HW_BREAKPOINT_LEN_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) case HW_BREAKPOINT_LEN_8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) hw->ctrl.len = ARM_BREAKPOINT_LEN_8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if ((hw->ctrl.type != ARM_BREAKPOINT_EXECUTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) && max_watchpoint_len >= 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * Breakpoints must be of length 2 (thumb) or 4 (ARM) bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * Watchpoints can be of length 1, 2, 4 or 8 bytes if supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * by the hardware and must be aligned to the appropriate number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) hw->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) hw->ctrl.len != ARM_BREAKPOINT_LEN_4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /* Address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) hw->address = attr->bp_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) /* Privilege */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) hw->ctrl.privilege = ARM_BREAKPOINT_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (arch_check_bp_in_kernelspace(hw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) hw->ctrl.privilege |= ARM_BREAKPOINT_PRIV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /* Enabled? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) hw->ctrl.enabled = !attr->disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /* Mismatch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) hw->ctrl.mismatch = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * Validate the arch-specific HW Breakpoint register settings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) int hw_breakpoint_arch_parse(struct perf_event *bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) const struct perf_event_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) struct arch_hw_breakpoint *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) u32 offset, alignment_mask = 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /* Ensure that we are in monitor debug mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (!monitor_mode_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) /* Build the arch_hw_breakpoint. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) ret = arch_build_bp_info(bp, attr, hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) /* Check address alignment. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) alignment_mask = 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) offset = hw->address & alignment_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) switch (offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /* Aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /* Allow halfword watchpoints and breakpoints. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /* Allow single byte watchpoint. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) hw->address &= ~alignment_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) hw->ctrl.len <<= offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (is_default_overflow_handler(bp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * Mismatch breakpoints are required for single-stepping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * breakpoints.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (!core_has_mismatch_brps())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /* We don't allow mismatch breakpoints in kernel space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (arch_check_bp_in_kernelspace(hw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * Per-cpu breakpoints are not supported by our stepping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * mechanism.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (!bp->hw.target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * We only support specific access types if the fsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * reports them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (!debug_exception_updates_fsr() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) (hw->ctrl.type == ARM_BREAKPOINT_LOAD ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) hw->ctrl.type == ARM_BREAKPOINT_STORE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * Enable/disable single-stepping over the breakpoint bp at address addr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) static void enable_single_step(struct perf_event *bp, u32 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) struct arch_hw_breakpoint *info = counter_arch_bp(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) arch_uninstall_hw_breakpoint(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) info->step_ctrl.mismatch = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) info->step_ctrl.len = ARM_BREAKPOINT_LEN_4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) info->step_ctrl.type = ARM_BREAKPOINT_EXECUTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) info->step_ctrl.privilege = info->ctrl.privilege;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) info->step_ctrl.enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) info->trigger = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) arch_install_hw_breakpoint(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) static void disable_single_step(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) arch_uninstall_hw_breakpoint(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) counter_arch_bp(bp)->step_ctrl.enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) arch_install_hw_breakpoint(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * Arm32 hardware does not always report a watchpoint hit address that matches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * one of the watchpoints set. It can also report an address "near" the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * watchpoint if a single instruction access both watched and unwatched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * addresses. There is no straight-forward way, short of disassembling the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * offending instruction, to map that address back to the watchpoint. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * function computes the distance of the memory access from the watchpoint as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * heuristic for the likelyhood that a given access triggered the watchpoint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * See this same function in the arm64 platform code, which has the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * The function returns the distance of the address from the bytes watched by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * the watchpoint. In case of an exact match, it returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) static u32 get_distance_from_watchpoint(unsigned long addr, u32 val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct arch_hw_breakpoint_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) u32 wp_low, wp_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) u32 lens, lene;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) lens = __ffs(ctrl->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) lene = __fls(ctrl->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) wp_low = val + lens;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) wp_high = val + lene;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (addr < wp_low)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) return wp_low - addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) else if (addr > wp_high)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) return addr - wp_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) static int watchpoint_fault_on_uaccess(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) struct arch_hw_breakpoint *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return !user_mode(regs) && info->ctrl.privilege == ARM_BREAKPOINT_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) static void watchpoint_handler(unsigned long addr, unsigned int fsr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) int i, access, closest_match = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) u32 min_dist = -1, dist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) u32 val, ctrl_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) struct perf_event *wp, **slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) struct arch_hw_breakpoint *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) struct arch_hw_breakpoint_ctrl ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) slots = this_cpu_ptr(wp_on_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * Find all watchpoints that match the reported address. If no exact
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * match is found. Attribute the hit to the closest watchpoint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) for (i = 0; i < core_num_wrps; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) wp = slots[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (wp == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * The DFAR is an unknown value on debug architectures prior
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * to 7.1. Since we only allow a single watchpoint on these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * older CPUs, we can set the trigger to the lowest possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * faulting address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (debug_arch < ARM_DEBUG_ARCH_V7_1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) BUG_ON(i > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) info = counter_arch_bp(wp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) info->trigger = wp->attr.bp_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) /* Check that the access type matches. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (debug_exception_updates_fsr()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) access = (fsr & ARM_FSR_ACCESS_MASK) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) HW_BREAKPOINT_W : HW_BREAKPOINT_R;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (!(access & hw_breakpoint_type(wp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) val = read_wb_reg(ARM_BASE_WVR + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) ctrl_reg = read_wb_reg(ARM_BASE_WCR + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) decode_ctrl_reg(ctrl_reg, &ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) dist = get_distance_from_watchpoint(addr, val, &ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (dist < min_dist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) min_dist = dist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) closest_match = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) /* Is this an exact match? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (dist != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) /* We have a winner. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) info = counter_arch_bp(wp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) info->trigger = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * If we triggered a user watchpoint from a uaccess routine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * then handle the stepping ourselves since userspace really
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * can't help us with this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (watchpoint_fault_on_uaccess(regs, info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) goto step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) perf_bp_event(wp, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * Defer stepping to the overflow handler if one is installed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * Otherwise, insert a temporary mismatch breakpoint so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * we can single-step over the watchpoint trigger.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (!is_default_overflow_handler(wp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) step:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) enable_single_step(wp, instruction_pointer(regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (min_dist > 0 && min_dist != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) /* No exact match found. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) wp = slots[closest_match];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) info = counter_arch_bp(wp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) info->trigger = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) perf_bp_event(wp, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (is_default_overflow_handler(wp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) enable_single_step(wp, instruction_pointer(regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) static void watchpoint_single_step_handler(unsigned long pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct perf_event *wp, **slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct arch_hw_breakpoint *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) slots = this_cpu_ptr(wp_on_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) for (i = 0; i < core_num_wrps; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) wp = slots[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (wp == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) info = counter_arch_bp(wp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (!info->step_ctrl.enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * Restore the original watchpoint if we've completed the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * single-step.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (info->trigger != pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) disable_single_step(wp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) u32 ctrl_reg, val, addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) struct perf_event *bp, **slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) struct arch_hw_breakpoint *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) struct arch_hw_breakpoint_ctrl ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) slots = this_cpu_ptr(bp_on_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) /* The exception entry code places the amended lr in the PC. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) addr = regs->ARM_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) /* Check the currently installed breakpoints first. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) for (i = 0; i < core_num_brps; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) bp = slots[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (bp == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) info = counter_arch_bp(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) /* Check if the breakpoint value matches. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) val = read_wb_reg(ARM_BASE_BVR + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (val != (addr & ~0x3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) goto mismatch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) /* Possible match, check the byte address select to confirm. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) ctrl_reg = read_wb_reg(ARM_BASE_BCR + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) decode_ctrl_reg(ctrl_reg, &ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if ((1 << (addr & 0x3)) & ctrl.len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) info->trigger = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) pr_debug("breakpoint fired: address = 0x%x\n", addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) perf_bp_event(bp, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) if (is_default_overflow_handler(bp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) enable_single_step(bp, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) mismatch:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) /* If we're stepping a breakpoint, it can now be restored. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (info->step_ctrl.enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) disable_single_step(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) /* Handle any pending watchpoint single-step breakpoints. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) watchpoint_single_step_handler(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * Called from either the Data Abort Handler [watchpoint] or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * Prefetch Abort Handler [breakpoint] with interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) u32 dscr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) if (interrupts_enabled(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) /* We only handle watchpoints and hardware breakpoints. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) ARM_DBG_READ(c0, c1, 0, dscr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) /* Perform perf callbacks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) switch (ARM_DSCR_MOE(dscr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) case ARM_ENTRY_BREAKPOINT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) breakpoint_handler(addr, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) case ARM_ENTRY_ASYNC_WATCHPOINT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) case ARM_ENTRY_SYNC_WATCHPOINT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) watchpoint_handler(addr, fsr, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) ret = 1; /* Unhandled fault. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * One-time initialisation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) static cpumask_t debug_err_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) static int debug_reg_trap(struct pt_regs *regs, unsigned int instr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) pr_warn("Debug register access (0x%x) caused undefined instruction on CPU %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) instr, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) /* Set the error flag for this CPU and skip the faulting instruction. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) cpumask_set_cpu(cpu, &debug_err_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) instruction_pointer(regs) += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) static struct undef_hook debug_reg_hook = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) .instr_mask = 0x0fe80f10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) .instr_val = 0x0e000e10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) .fn = debug_reg_trap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) /* Does this core support OS Save and Restore? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) static bool core_has_os_save_restore(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) u32 oslsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) switch (get_debug_arch()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) case ARM_DEBUG_ARCH_V7_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) case ARM_DEBUG_ARCH_V7_ECP14:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) ARM_DBG_READ(c1, c1, 4, oslsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (oslsr & ARM_OSLSR_OSLM0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) static void reset_ctrl_regs(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) int i, raw_num_brps, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * v7 debug contains save and restore registers so that debug state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * can be maintained across low-power modes without leaving the debug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * logic powered up. It is IMPLEMENTATION DEFINED whether we can access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * the debug registers out of reset, so we must unlock the OS Lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * Access Register to avoid taking undefined instruction exceptions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * later on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) switch (debug_arch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) case ARM_DEBUG_ARCH_V6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) case ARM_DEBUG_ARCH_V6_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) /* ARMv6 cores clear the registers out of reset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) goto out_mdbgen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) case ARM_DEBUG_ARCH_V7_ECP14:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * Ensure sticky power-down is clear (i.e. debug logic is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * powered up).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) ARM_DBG_READ(c1, c5, 4, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if ((val & 0x1) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) err = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (!has_ossr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) goto clear_vcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) case ARM_DEBUG_ARCH_V7_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) * Ensure the OS double lock is clear.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) ARM_DBG_READ(c1, c3, 4, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if ((val & 0x1) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) err = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) pr_warn_once("CPU %d debug is powered down!\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * Unconditionally clear the OS lock by writing a value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * other than CS_LAR_KEY to the access register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) ARM_DBG_WRITE(c1, c0, 4, ~CORESIGHT_UNLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * Clear any configured vector-catch events before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * enabling monitor mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) clear_vcr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) ARM_DBG_WRITE(c0, c7, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) pr_warn_once("CPU %d failed to disable vector catch\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) * The control/value register pairs are UNKNOWN out of reset so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * clear them to avoid spurious debug events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) raw_num_brps = get_num_brp_resources();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) for (i = 0; i < raw_num_brps; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) write_wb_reg(ARM_BASE_BCR + i, 0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) write_wb_reg(ARM_BASE_BVR + i, 0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) for (i = 0; i < core_num_wrps; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) write_wb_reg(ARM_BASE_WCR + i, 0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) write_wb_reg(ARM_BASE_WVR + i, 0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * Have a crack at enabling monitor mode. We don't actually need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * it yet, but reporting an error early is useful if it fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) out_mdbgen:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) if (enable_monitor_mode())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) static int dbg_reset_online(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) reset_ctrl_regs(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) #ifdef CONFIG_CPU_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (action == CPU_PM_EXIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) reset_ctrl_regs(smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) static struct notifier_block dbg_cpu_pm_nb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) .notifier_call = dbg_cpu_pm_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) static void __init pm_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) cpu_pm_register_notifier(&dbg_cpu_pm_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) static inline void pm_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) static int __init arch_hw_breakpoint_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) debug_arch = get_debug_arch();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) if (!debug_arch_supported()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) pr_info("debug architecture 0x%x unsupported.\n", debug_arch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * whenever a WFI is issued, even if the core is not powered down, in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * violation of the architecture. When DBGPRSR.SPD is set, accesses to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) * breakpoint and watchpoint registers are treated as undefined, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) * this results in boot time and runtime failures when these are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) * accessed and we unexpectedly take a trap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) * It's not clear if/how this can be worked around, so we blacklist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * Scorpion CPUs to avoid these issues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (read_cpuid_part() == ARM_CPU_PART_SCORPION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) has_ossr = core_has_os_save_restore();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) /* Determine how many BRPs/WRPs are available. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) core_num_brps = get_num_brps();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) core_num_wrps = get_num_wrps();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) * We need to tread carefully here because DBGSWENABLE may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * driven low on this core and there isn't an architected way to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * determine that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) cpus_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) register_undef_hook(&debug_reg_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) * Register CPU notifier which resets the breakpoint resources. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) * assume that a halting debugger will leave the world in a nice state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) ret = cpuhp_setup_state_cpuslocked(CPUHP_AP_ONLINE_DYN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) "arm/hw_breakpoint:online",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) dbg_reset_online, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) unregister_undef_hook(&debug_reg_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) if (WARN_ON(ret < 0) || !cpumask_empty(&debug_err_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) core_num_brps = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) core_num_wrps = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) cpuhp_remove_state_nocalls_cpuslocked(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) cpus_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) pr_info("found %d " "%s" "breakpoint and %d watchpoint registers.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) core_num_brps, core_has_mismatch_brps() ? "(+1 reserved) " :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) "", core_num_wrps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) /* Work out the maximum supported watchpoint length. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) max_watchpoint_len = get_max_wp_len();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) pr_info("maximum watchpoint size is %u bytes.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) max_watchpoint_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) /* Register debug fault handler. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) hook_fault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) TRAP_HWBKPT, "watchpoint debug exception");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) TRAP_HWBKPT, "breakpoint debug exception");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) cpus_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) /* Register PM notifiers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) pm_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) arch_initcall(arch_hw_breakpoint_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) void hw_breakpoint_pmu_read(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) * Dummy function to register with die_notifier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) unsigned long val, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }