^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) // Copyright (C) 2017 Thomas Gleixner <tglx@linutronix.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define IRQ_MATRIX_SIZE (BITS_TO_LONGS(IRQ_MATRIX_BITS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) struct cpumap {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) unsigned int available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) unsigned int allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) unsigned int managed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) unsigned int managed_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) bool initialized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) bool online;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) unsigned long alloc_map[IRQ_MATRIX_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) unsigned long managed_map[IRQ_MATRIX_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct irq_matrix {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) unsigned int matrix_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) unsigned int alloc_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) unsigned int alloc_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) unsigned int alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) unsigned int global_available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) unsigned int global_reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) unsigned int systembits_inalloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) unsigned int total_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) unsigned int online_maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct cpumap __percpu *maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) unsigned long scratch_map[IRQ_MATRIX_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) unsigned long system_map[IRQ_MATRIX_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <trace/events/irq_matrix.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * irq_alloc_matrix - Allocate a irq_matrix structure and initialize it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * @matrix_bits: Number of matrix bits must be <= IRQ_MATRIX_BITS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * @alloc_start: From which bit the allocation search starts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * @alloc_end: At which bit the allocation search ends, i.e first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * invalid bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) __init struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) unsigned int alloc_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned int alloc_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct irq_matrix *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (matrix_bits > IRQ_MATRIX_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) m = kzalloc(sizeof(*m), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (!m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) m->matrix_bits = matrix_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) m->alloc_start = alloc_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) m->alloc_end = alloc_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) m->alloc_size = alloc_end - alloc_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) m->maps = alloc_percpu(*m->maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (!m->maps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) kfree(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * irq_matrix_online - Bring the local CPU matrix online
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * @m: Matrix pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) void irq_matrix_online(struct irq_matrix *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct cpumap *cm = this_cpu_ptr(m->maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) BUG_ON(cm->online);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (!cm->initialized) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) cm->available = m->alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) cm->available -= cm->managed + m->systembits_inalloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) cm->initialized = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) m->global_available += cm->available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) cm->online = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) m->online_maps++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) trace_irq_matrix_online(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * irq_matrix_offline - Bring the local CPU matrix offline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * @m: Matrix pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) void irq_matrix_offline(struct irq_matrix *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct cpumap *cm = this_cpu_ptr(m->maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* Update the global available size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) m->global_available -= cm->available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) cm->online = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) m->online_maps--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) trace_irq_matrix_offline(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) unsigned int num, bool managed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) unsigned int area, start = m->alloc_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) unsigned int end = m->alloc_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) bitmap_or(m->scratch_map, cm->managed_map, m->system_map, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) bitmap_or(m->scratch_map, m->scratch_map, cm->alloc_map, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) area = bitmap_find_next_zero_area(m->scratch_map, end, start, num, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (area >= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (managed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) bitmap_set(cm->managed_map, area, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) bitmap_set(cm->alloc_map, area, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* Find the best CPU which has the lowest vector allocation count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static unsigned int matrix_find_best_cpu(struct irq_matrix *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) const struct cpumask *msk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) unsigned int cpu, best_cpu, maxavl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct cpumap *cm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) best_cpu = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) for_each_cpu(cpu, msk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) cm = per_cpu_ptr(m->maps, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (!cm->online || cm->available <= maxavl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) best_cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) maxavl = cm->available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return best_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* Find the best CPU which has the lowest number of managed IRQs allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static unsigned int matrix_find_best_cpu_managed(struct irq_matrix *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) const struct cpumask *msk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) unsigned int cpu, best_cpu, allocated = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct cpumap *cm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) best_cpu = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) for_each_cpu(cpu, msk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) cm = per_cpu_ptr(m->maps, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (!cm->online || cm->managed_allocated > allocated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) best_cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) allocated = cm->managed_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return best_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * irq_matrix_assign_system - Assign system wide entry in the matrix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * @m: Matrix pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * @bit: Which bit to reserve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * @replace: Replace an already allocated vector with a system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * vector at the same bit position.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * The BUG_ON()s below are on purpose. If this goes wrong in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * early boot process, then the chance to survive is about zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * If this happens when the system is life, it's not much better.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) bool replace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct cpumap *cm = this_cpu_ptr(m->maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) BUG_ON(bit > m->matrix_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) BUG_ON(m->online_maps > 1 || (m->online_maps && !replace));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) set_bit(bit, m->system_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (replace) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) BUG_ON(!test_and_clear_bit(bit, cm->alloc_map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) cm->allocated--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) m->total_allocated--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (bit >= m->alloc_start && bit < m->alloc_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) m->systembits_inalloc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) trace_irq_matrix_assign_system(bit, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * irq_matrix_reserve_managed - Reserve a managed interrupt in a CPU map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * @m: Matrix pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * @msk: On which CPUs the bits should be reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * Can be called for offline CPUs. Note, this will only reserve one bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * on all CPUs in @msk, but it's not guaranteed that the bits are at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * same offset on all CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) unsigned int cpu, failed_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) for_each_cpu(cpu, msk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) unsigned int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) bit = matrix_alloc_area(m, cm, 1, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (bit >= m->alloc_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) cm->managed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (cm->online) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) cm->available--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) m->global_available--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) trace_irq_matrix_reserve_managed(bit, cpu, m, cm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) failed_cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) for_each_cpu(cpu, msk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (cpu == failed_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) irq_matrix_remove_managed(m, cpumask_of(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * irq_matrix_remove_managed - Remove managed interrupts in a CPU map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * @m: Matrix pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * @msk: On which CPUs the bits should be removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * Can be called for offline CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * This removes not allocated managed interrupts from the map. It does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * not matter which one because the managed interrupts free their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * allocation when they shut down. If not, the accounting is screwed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * but all what can be done at this point is warn about it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) for_each_cpu(cpu, msk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) unsigned int bit, end = m->alloc_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (WARN_ON_ONCE(!cm->managed))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /* Get managed bit which are not allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) bit = find_first_bit(m->scratch_map, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (WARN_ON_ONCE(bit >= end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) clear_bit(bit, cm->managed_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) cm->managed--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (cm->online) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) cm->available++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) m->global_available++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) trace_irq_matrix_remove_managed(bit, cpu, m, cm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * irq_matrix_alloc_managed - Allocate a managed interrupt in a CPU map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * @m: Matrix pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * @cpu: On which CPU the interrupt should be allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) unsigned int *mapped_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) unsigned int bit, cpu, end = m->alloc_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct cpumap *cm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (cpumask_empty(msk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) cpu = matrix_find_best_cpu_managed(m, msk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (cpu == UINT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) cm = per_cpu_ptr(m->maps, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) end = m->alloc_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /* Get managed bit which are not allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) bit = find_first_bit(m->scratch_map, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (bit >= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) set_bit(bit, cm->alloc_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) cm->allocated++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) cm->managed_allocated++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) m->total_allocated++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) *mapped_cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * irq_matrix_assign - Assign a preallocated interrupt in the local CPU map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * @m: Matrix pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * @bit: Which bit to mark
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * This should only be used to mark preallocated vectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) void irq_matrix_assign(struct irq_matrix *m, unsigned int bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct cpumap *cm = this_cpu_ptr(m->maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (WARN_ON_ONCE(test_and_set_bit(bit, cm->alloc_map)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) cm->allocated++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) m->total_allocated++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) cm->available--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) m->global_available--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) trace_irq_matrix_assign(bit, smp_processor_id(), m, cm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * irq_matrix_reserve - Reserve interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * @m: Matrix pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * This is merily a book keeping call. It increments the number of globally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * reserved interrupt bits w/o actually allocating them. This allows to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * setup interrupt descriptors w/o assigning low level resources to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * The actual allocation happens when the interrupt gets activated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) void irq_matrix_reserve(struct irq_matrix *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (m->global_reserved <= m->global_available &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) m->global_reserved + 1 > m->global_available)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) pr_warn("Interrupt reservation exceeds available resources\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) m->global_reserved++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) trace_irq_matrix_reserve(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * irq_matrix_remove_reserved - Remove interrupt reservation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * @m: Matrix pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * This is merily a book keeping call. It decrements the number of globally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * reserved interrupt bits. This is used to undo irq_matrix_reserve() when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * interrupt was never in use and a real vector allocated, which undid the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * reservation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) void irq_matrix_remove_reserved(struct irq_matrix *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) m->global_reserved--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) trace_irq_matrix_remove_reserved(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * irq_matrix_alloc - Allocate a regular interrupt in a CPU map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * @m: Matrix pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * @msk: Which CPUs to search in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * @reserved: Allocate previously reserved interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * @mapped_cpu: Pointer to store the CPU for which the irq was allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) bool reserved, unsigned int *mapped_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) unsigned int cpu, bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct cpumap *cm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * Not required in theory, but matrix_find_best_cpu() uses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * for_each_cpu() which ignores the cpumask on UP .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (cpumask_empty(msk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) cpu = matrix_find_best_cpu(m, msk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (cpu == UINT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) cm = per_cpu_ptr(m->maps, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) bit = matrix_alloc_area(m, cm, 1, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (bit >= m->alloc_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) cm->allocated++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) cm->available--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) m->total_allocated++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) m->global_available--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) m->global_reserved--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) *mapped_cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) trace_irq_matrix_alloc(bit, cpu, m, cm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * irq_matrix_free - Free allocated interrupt in the matrix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * @m: Matrix pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * @cpu: Which CPU map needs be updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * @bit: The bit to remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * @managed: If true, the interrupt is managed and not accounted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * as available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) unsigned int bit, bool managed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (WARN_ON_ONCE(!test_and_clear_bit(bit, cm->alloc_map)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) cm->allocated--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if(managed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) cm->managed_allocated--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (cm->online)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) m->total_allocated--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (!managed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) cm->available++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (cm->online)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) m->global_available++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) trace_irq_matrix_free(bit, cpu, m, cm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * irq_matrix_available - Get the number of globally available irqs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * @m: Pointer to the matrix to query
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * @cpudown: If true, the local CPU is about to go down, adjust
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * the number of available irqs accordingly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct cpumap *cm = this_cpu_ptr(m->maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (!cpudown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return m->global_available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return m->global_available - cm->available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * irq_matrix_reserved - Get the number of globally reserved irqs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * @m: Pointer to the matrix to query
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) unsigned int irq_matrix_reserved(struct irq_matrix *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) return m->global_reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * irq_matrix_allocated - Get the number of allocated irqs on the local cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * @m: Pointer to the matrix to search
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * This returns number of allocated irqs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) unsigned int irq_matrix_allocated(struct irq_matrix *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) struct cpumap *cm = this_cpu_ptr(m->maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return cm->allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * irq_matrix_debug_show - Show detailed allocation information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * @sf: Pointer to the seq_file to print to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * @m: Pointer to the matrix allocator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * @ind: Indentation for the print format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * Note, this is a lockless snapshot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) unsigned int nsys = bitmap_weight(m->system_map, m->matrix_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) seq_printf(sf, "Online bitmaps: %6u\n", m->online_maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) seq_printf(sf, "Global available: %6u\n", m->global_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) seq_printf(sf, "Global reserved: %6u\n", m->global_reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) seq_printf(sf, "Total allocated: %6u\n", m->total_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) seq_printf(sf, "System: %u: %*pbl\n", nsys, m->matrix_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) m->system_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) seq_printf(sf, "%*s| CPU | avl | man | mac | act | vectors\n", ind, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) cpus_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) seq_printf(sf, "%*s %4d %4u %4u %4u %4u %*pbl\n", ind, " ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) cpu, cm->available, cm->managed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) cm->managed_allocated, cm->allocated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) m->matrix_bits, cm->alloc_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) cpus_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) #endif