| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| enum lock_usage_bit { |
| #define LOCKDEP_STATE(__STATE) \ |
| <------>LOCK_USED_IN_##__STATE, \ |
| <------>LOCK_USED_IN_##__STATE##_READ, \ |
| <------>LOCK_ENABLED_##__STATE, \ |
| <------>LOCK_ENABLED_##__STATE##_READ, |
| #include "lockdep_states.h" |
| #undef LOCKDEP_STATE |
| <------>LOCK_USED, |
| <------>LOCK_USED_READ, |
| <------>LOCK_USAGE_STATES, |
| }; |
| |
| |
| static_assert(LOCK_TRACE_STATES == LOCK_USAGE_STATES); |
| |
| #define LOCK_USAGE_READ_MASK 1 |
| #define LOCK_USAGE_DIR_MASK 2 |
| #define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK)) |
| |
| |
| |
| |
| #define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE), |
| |
| enum { |
| #define LOCKDEP_STATE(__STATE) \ |
| <------>__LOCKF(USED_IN_##__STATE) \ |
| <------>__LOCKF(USED_IN_##__STATE##_READ) \ |
| <------>__LOCKF(ENABLED_##__STATE) \ |
| <------>__LOCKF(ENABLED_##__STATE##_READ) |
| #include "lockdep_states.h" |
| #undef LOCKDEP_STATE |
| <------>__LOCKF(USED) |
| <------>__LOCKF(USED_READ) |
| }; |
| |
| #define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE | |
| static const unsigned long LOCKF_ENABLED_IRQ = |
| #include "lockdep_states.h" |
| <------>0; |
| #undef LOCKDEP_STATE |
| |
| #define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE | |
| static const unsigned long LOCKF_USED_IN_IRQ = |
| #include "lockdep_states.h" |
| <------>0; |
| #undef LOCKDEP_STATE |
| |
| #define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE##_READ | |
| static const unsigned long LOCKF_ENABLED_IRQ_READ = |
| #include "lockdep_states.h" |
| <------>0; |
| #undef LOCKDEP_STATE |
| |
| #define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE##_READ | |
| static const unsigned long LOCKF_USED_IN_IRQ_READ = |
| #include "lockdep_states.h" |
| <------>0; |
| #undef LOCKDEP_STATE |
| |
| #define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ) |
| #define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ) |
| |
| #define LOCKF_IRQ (LOCKF_ENABLED_IRQ | LOCKF_USED_IN_IRQ) |
| #define LOCKF_IRQ_READ (LOCKF_ENABLED_IRQ_READ | LOCKF_USED_IN_IRQ_READ) |
| |
| |
| |
| |
| |
| |
| |
| |
| #ifdef CONFIG_LOCKDEP_SMALL |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| #define MAX_LOCKDEP_ENTRIES 16384UL |
| #define MAX_LOCKDEP_CHAINS_BITS 15 |
| #define MAX_STACK_TRACE_ENTRIES 262144UL |
| #define STACK_TRACE_HASH_SIZE 8192 |
| #else |
| #define MAX_LOCKDEP_ENTRIES 32768UL |
| |
| #define MAX_LOCKDEP_CHAINS_BITS 16 |
| |
| |
| |
| |
| |
| #define MAX_STACK_TRACE_ENTRIES 524288UL |
| #define STACK_TRACE_HASH_SIZE 16384 |
| #endif |
| |
| |
| |
| |
| #define LOCK_CHAIN_SOFTIRQ_CONTEXT (1 << 0) |
| #define LOCK_CHAIN_HARDIRQ_CONTEXT (1 << 1) |
| |
| #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) |
| |
| #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) |
| |
| extern struct lock_chain lock_chains[]; |
| |
| #define LOCK_USAGE_CHARS (2*XXX_LOCK_USAGE_STATES + 1) |
| |
| extern void get_usage_chars(struct lock_class *class, |
| <------><------><------> char usage[LOCK_USAGE_CHARS]); |
| |
| extern const char *__get_key_name(const struct lockdep_subclass_key *key, |
| <------><------><------><------> char *str); |
| |
| struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i); |
| |
| extern unsigned long nr_lock_classes; |
| extern unsigned long nr_zapped_classes; |
| extern unsigned long nr_zapped_lock_chains; |
| extern unsigned long nr_list_entries; |
| long lockdep_next_lockchain(long i); |
| unsigned long lock_chain_count(void); |
| extern unsigned long nr_stack_trace_entries; |
| |
| extern unsigned int nr_hardirq_chains; |
| extern unsigned int nr_softirq_chains; |
| extern unsigned int nr_process_chains; |
| extern unsigned int nr_free_chain_hlocks; |
| extern unsigned int nr_lost_chain_hlocks; |
| extern unsigned int nr_large_chain_blocks; |
| |
| extern unsigned int max_lockdep_depth; |
| extern unsigned int max_bfs_queue_depth; |
| extern unsigned long max_lock_class_idx; |
| |
| extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; |
| extern unsigned long lock_classes_in_use[]; |
| |
| #ifdef CONFIG_PROVE_LOCKING |
| extern unsigned long lockdep_count_forward_deps(struct lock_class *); |
| extern unsigned long lockdep_count_backward_deps(struct lock_class *); |
| #ifdef CONFIG_TRACE_IRQFLAGS |
| u64 lockdep_stack_trace_count(void); |
| u64 lockdep_stack_hash_count(void); |
| #endif |
| #else |
| static inline unsigned long |
| lockdep_count_forward_deps(struct lock_class *class) |
| { |
| <------>return 0; |
| } |
| static inline unsigned long |
| lockdep_count_backward_deps(struct lock_class *class) |
| { |
| <------>return 0; |
| } |
| #endif |
| |
| #ifdef CONFIG_DEBUG_LOCKDEP |
| |
| #include <asm/local.h> |
| |
| |
| |
| |
| |
| struct lockdep_stats { |
| <------>unsigned long chain_lookup_hits; |
| <------>unsigned int chain_lookup_misses; |
| <------>unsigned long hardirqs_on_events; |
| <------>unsigned long hardirqs_off_events; |
| <------>unsigned long redundant_hardirqs_on; |
| <------>unsigned long redundant_hardirqs_off; |
| <------>unsigned long softirqs_on_events; |
| <------>unsigned long softirqs_off_events; |
| <------>unsigned long redundant_softirqs_on; |
| <------>unsigned long redundant_softirqs_off; |
| <------>int nr_unused_locks; |
| <------>unsigned int nr_redundant_checks; |
| <------>unsigned int nr_redundant; |
| <------>unsigned int nr_cyclic_checks; |
| <------>unsigned int nr_find_usage_forwards_checks; |
| <------>unsigned int nr_find_usage_backwards_checks; |
| |
| <------> |
| <------> * Per lock class locking operation stat counts |
| <------> */ |
| <------>unsigned long lock_class_ops[MAX_LOCKDEP_KEYS]; |
| }; |
| |
| DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats); |
| |
| #define __debug_atomic_inc(ptr) \ |
| <------>this_cpu_inc(lockdep_stats.ptr); |
| |
| #define debug_atomic_inc(ptr) { \ |
| <------>WARN_ON_ONCE(!irqs_disabled()); \ |
| <------>__this_cpu_inc(lockdep_stats.ptr); \ |
| } |
| |
| #define debug_atomic_dec(ptr) { \ |
| <------>WARN_ON_ONCE(!irqs_disabled()); \ |
| <------>__this_cpu_dec(lockdep_stats.ptr); \ |
| } |
| |
| #define debug_atomic_read(ptr) ({ \ |
| <------>struct lockdep_stats *__cpu_lockdep_stats; \ |
| <------>unsigned long long __total = 0; \ |
| <------>int __cpu; \ |
| <------>for_each_possible_cpu(__cpu) { \ |
| <------><------>__cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \ |
| <------><------>__total += __cpu_lockdep_stats->ptr; \ |
| <------>} \ |
| <------>__total; \ |
| }) |
| |
| static inline void debug_class_ops_inc(struct lock_class *class) |
| { |
| <------>int idx; |
| |
| <------>idx = class - lock_classes; |
| <------>__debug_atomic_inc(lock_class_ops[idx]); |
| } |
| |
| static inline unsigned long debug_class_ops_read(struct lock_class *class) |
| { |
| <------>int idx, cpu; |
| <------>unsigned long ops = 0; |
| |
| <------>idx = class - lock_classes; |
| <------>for_each_possible_cpu(cpu) |
| <------><------>ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu); |
| <------>return ops; |
| } |
| |
| #else |
| # define __debug_atomic_inc(ptr) do { } while (0) |
| # define debug_atomic_inc(ptr) do { } while (0) |
| # define debug_atomic_dec(ptr) do { } while (0) |
| # define debug_atomic_read(ptr) 0 |
| # define debug_class_ops_inc(ptr) do { } while (0) |
| #endif |
| |