^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * kernel/lockdep_internals.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Runtime locking correctness validator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * lockdep subsystem internal functions and variables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Lock-class usage-state bits:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) enum lock_usage_bit {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define LOCKDEP_STATE(__STATE) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) LOCK_USED_IN_##__STATE, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) LOCK_USED_IN_##__STATE##_READ, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) LOCK_ENABLED_##__STATE, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) LOCK_ENABLED_##__STATE##_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "lockdep_states.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #undef LOCKDEP_STATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) LOCK_USED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) LOCK_USED_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) LOCK_USAGE_STATES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /* states after LOCK_USED_READ are not traced and printed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static_assert(LOCK_TRACE_STATES == LOCK_USAGE_STATES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define LOCK_USAGE_READ_MASK 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define LOCK_USAGE_DIR_MASK 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * Usage-state bitmasks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define LOCKDEP_STATE(__STATE) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) __LOCKF(USED_IN_##__STATE) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) __LOCKF(USED_IN_##__STATE##_READ) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) __LOCKF(ENABLED_##__STATE) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) __LOCKF(ENABLED_##__STATE##_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include "lockdep_states.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #undef LOCKDEP_STATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) __LOCKF(USED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) __LOCKF(USED_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static const unsigned long LOCKF_ENABLED_IRQ =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include "lockdep_states.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #undef LOCKDEP_STATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static const unsigned long LOCKF_USED_IN_IRQ =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include "lockdep_states.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #undef LOCKDEP_STATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE##_READ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static const unsigned long LOCKF_ENABLED_IRQ_READ =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include "lockdep_states.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #undef LOCKDEP_STATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE##_READ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static const unsigned long LOCKF_USED_IN_IRQ_READ =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #include "lockdep_states.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #undef LOCKDEP_STATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define LOCKF_IRQ (LOCKF_ENABLED_IRQ | LOCKF_USED_IN_IRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define LOCKF_IRQ_READ (LOCKF_ENABLED_IRQ_READ | LOCKF_USED_IN_IRQ_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * .data and .bss to fit in required 32MB limit for the kernel. With
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * So, reduce the static allocations for lockdeps related structures so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * everything fits in current required size limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #ifdef CONFIG_LOCKDEP_SMALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * we track.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * We use the per-lock dependency maps in two ways: we grow it by adding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * every to-be-taken lock to all currently held lock's own dependency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * table (if it's not there yet), and we check it for lock order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * conflicts and deadlocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define MAX_LOCKDEP_ENTRIES 16384UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define MAX_LOCKDEP_CHAINS_BITS 15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define MAX_STACK_TRACE_ENTRIES 262144UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define STACK_TRACE_HASH_SIZE 8192
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define MAX_LOCKDEP_ENTRIES 32768UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define MAX_LOCKDEP_CHAINS_BITS 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * Stack-trace: tightly packed array of stack backtrace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * addresses. Protected by the hash_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define MAX_STACK_TRACE_ENTRIES 524288UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define STACK_TRACE_HASH_SIZE 16384
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * Bit definitions for lock_chain.irq_context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define LOCK_CHAIN_SOFTIRQ_CONTEXT (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define LOCK_CHAIN_HARDIRQ_CONTEXT (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) extern struct lock_chain lock_chains[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define LOCK_USAGE_CHARS (2*XXX_LOCK_USAGE_STATES + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) extern void get_usage_chars(struct lock_class *class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) char usage[LOCK_USAGE_CHARS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) extern const char *__get_key_name(const struct lockdep_subclass_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) char *str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) extern unsigned long nr_lock_classes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) extern unsigned long nr_zapped_classes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) extern unsigned long nr_zapped_lock_chains;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) extern unsigned long nr_list_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) long lockdep_next_lockchain(long i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) unsigned long lock_chain_count(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) extern unsigned long nr_stack_trace_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) extern unsigned int nr_hardirq_chains;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) extern unsigned int nr_softirq_chains;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) extern unsigned int nr_process_chains;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) extern unsigned int nr_free_chain_hlocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) extern unsigned int nr_lost_chain_hlocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) extern unsigned int nr_large_chain_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) extern unsigned int max_lockdep_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) extern unsigned int max_bfs_queue_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) extern unsigned long max_lock_class_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) extern unsigned long lock_classes_in_use[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #ifdef CONFIG_PROVE_LOCKING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) extern unsigned long lockdep_count_forward_deps(struct lock_class *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) extern unsigned long lockdep_count_backward_deps(struct lock_class *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #ifdef CONFIG_TRACE_IRQFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) u64 lockdep_stack_trace_count(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) u64 lockdep_stack_hash_count(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) lockdep_count_forward_deps(struct lock_class *class)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) lockdep_count_backward_deps(struct lock_class *class)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #ifdef CONFIG_DEBUG_LOCKDEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #include <asm/local.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * Various lockdep statistics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * We want them per cpu as they are often accessed in fast path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * and we want to avoid too much cache bouncing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct lockdep_stats {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) unsigned long chain_lookup_hits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) unsigned int chain_lookup_misses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) unsigned long hardirqs_on_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) unsigned long hardirqs_off_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) unsigned long redundant_hardirqs_on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) unsigned long redundant_hardirqs_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) unsigned long softirqs_on_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) unsigned long softirqs_off_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) unsigned long redundant_softirqs_on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) unsigned long redundant_softirqs_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) int nr_unused_locks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) unsigned int nr_redundant_checks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) unsigned int nr_redundant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) unsigned int nr_cyclic_checks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) unsigned int nr_find_usage_forwards_checks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) unsigned int nr_find_usage_backwards_checks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * Per lock class locking operation stat counts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) unsigned long lock_class_ops[MAX_LOCKDEP_KEYS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #define __debug_atomic_inc(ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) this_cpu_inc(lockdep_stats.ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #define debug_atomic_inc(ptr) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) WARN_ON_ONCE(!irqs_disabled()); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) __this_cpu_inc(lockdep_stats.ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) #define debug_atomic_dec(ptr) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) WARN_ON_ONCE(!irqs_disabled()); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) __this_cpu_dec(lockdep_stats.ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) #define debug_atomic_read(ptr) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct lockdep_stats *__cpu_lockdep_stats; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) unsigned long long __total = 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) int __cpu; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) for_each_possible_cpu(__cpu) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) __total += __cpu_lockdep_stats->ptr; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) __total; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static inline void debug_class_ops_inc(struct lock_class *class)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) idx = class - lock_classes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) __debug_atomic_inc(lock_class_ops[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static inline unsigned long debug_class_ops_read(struct lock_class *class)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) int idx, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) unsigned long ops = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) idx = class - lock_classes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) # define __debug_atomic_inc(ptr) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) # define debug_atomic_inc(ptr) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) # define debug_atomic_dec(ptr) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) # define debug_atomic_read(ptr) 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) # define debug_class_ops_inc(ptr) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) #endif