^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * cpu_rmap.c: CPU affinity reverse-map support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright 2011 Solarflare Communications Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/cpu_rmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * These functions maintain a mapping from CPUs to some ordered set of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * objects with CPU affinities. This can be seen as a reverse-map of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * CPU affinity. However, we do not assume that the object affinities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * cover all CPUs in the system. For those CPUs not directly covered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * by object affinities, we attempt to find a nearest object based on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * CPU topology.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * alloc_cpu_rmap - allocate CPU affinity reverse-map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * @size: Number of objects to be mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * @flags: Allocation flags e.g. %GFP_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct cpu_rmap *rmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) size_t obj_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /* This is a silly number of objects, and we use u16 indices. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) if (size > 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* Offset of object pointer array from base structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) obj_offset = ALIGN(offsetof(struct cpu_rmap, near[nr_cpu_ids]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) sizeof(void *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) rmap = kzalloc(obj_offset + size * sizeof(rmap->obj[0]), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) if (!rmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) kref_init(&rmap->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) rmap->obj = (void **)((char *)rmap + obj_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /* Initially assign CPUs to objects on a rota, since we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * no idea where the objects are. Use infinite distance, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * any object with known distance is preferable. Include the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * CPUs that are not present/online, since we definitely want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * any newly-hotplugged CPUs to have some object assigned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) rmap->near[cpu].index = cpu % size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) rmap->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return rmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) EXPORT_SYMBOL(alloc_cpu_rmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * cpu_rmap_release - internal reclaiming helper called from kref_put
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * @ref: kref to struct cpu_rmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static void cpu_rmap_release(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct cpu_rmap *rmap = container_of(ref, struct cpu_rmap, refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) kfree(rmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * cpu_rmap_get - internal helper to get new ref on a cpu_rmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * @rmap: reverse-map allocated with alloc_cpu_rmap()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static inline void cpu_rmap_get(struct cpu_rmap *rmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) kref_get(&rmap->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * cpu_rmap_put - release ref on a cpu_rmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * @rmap: reverse-map allocated with alloc_cpu_rmap()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int cpu_rmap_put(struct cpu_rmap *rmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return kref_put(&rmap->refcount, cpu_rmap_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) EXPORT_SYMBOL(cpu_rmap_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* Reevaluate nearest object for given CPU, comparing with the given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * neighbours at the given distance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static bool cpu_rmap_copy_neigh(struct cpu_rmap *rmap, unsigned int cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) const struct cpumask *mask, u16 dist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) int neigh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) for_each_cpu(neigh, mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (rmap->near[cpu].dist > dist &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) rmap->near[neigh].dist <= dist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) rmap->near[cpu].index = rmap->near[neigh].index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) rmap->near[cpu].dist = dist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static void debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) unsigned index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) pr_info("cpu_rmap %p, %s:\n", rmap, prefix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) index = rmap->near[cpu].index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) pr_info("cpu %d -> obj %u (distance %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) cpu, index, rmap->near[cpu].dist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * cpu_rmap_add - add object to a rmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * @rmap: CPU rmap allocated with alloc_cpu_rmap()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * @obj: Object to add to rmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * Return index of object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) int cpu_rmap_add(struct cpu_rmap *rmap, void *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) u16 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) BUG_ON(rmap->used >= rmap->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) index = rmap->used++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) rmap->obj[index] = obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) EXPORT_SYMBOL(cpu_rmap_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * cpu_rmap_update - update CPU rmap following a change of object affinity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * @rmap: CPU rmap to update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * @index: Index of object whose affinity changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * @affinity: New CPU affinity of object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) const struct cpumask *affinity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) cpumask_var_t update_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (unlikely(!zalloc_cpumask_var(&update_mask, GFP_KERNEL)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* Invalidate distance for all CPUs for which this used to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * the nearest object. Mark those CPUs for update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (rmap->near[cpu].index == index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) cpumask_set_cpu(cpu, update_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) debug_print_rmap(rmap, "after invalidating old distances");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /* Set distance to 0 for all CPUs in the new affinity mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * Mark all CPUs within their NUMA nodes for update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) for_each_cpu(cpu, affinity) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) rmap->near[cpu].index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) rmap->near[cpu].dist = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) cpumask_or(update_mask, update_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) cpumask_of_node(cpu_to_node(cpu)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) debug_print_rmap(rmap, "after updating neighbours");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /* Update distances based on topology */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) for_each_cpu(cpu, update_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (cpu_rmap_copy_neigh(rmap, cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) topology_sibling_cpumask(cpu), 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (cpu_rmap_copy_neigh(rmap, cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) topology_core_cpumask(cpu), 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (cpu_rmap_copy_neigh(rmap, cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) cpumask_of_node(cpu_to_node(cpu)), 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /* We could continue into NUMA node distances, but for now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * we give up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) debug_print_rmap(rmap, "after copying neighbours");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) free_cpumask_var(update_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) EXPORT_SYMBOL(cpu_rmap_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* Glue between IRQ affinity notifiers and CPU rmaps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct irq_glue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct irq_affinity_notify notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct cpu_rmap *rmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) u16 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * Must be called in process context, before freeing the IRQs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) void free_irq_cpu_rmap(struct cpu_rmap *rmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct irq_glue *glue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) u16 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (!rmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) for (index = 0; index < rmap->used; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) glue = rmap->obj[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) irq_set_affinity_notifier(glue->notify.irq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) cpu_rmap_put(rmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) EXPORT_SYMBOL(free_irq_cpu_rmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * irq_cpu_rmap_notify - callback for IRQ subsystem when IRQ affinity updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * @notify: struct irq_affinity_notify passed by irq/manage.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * @mask: cpu mask for new SMP affinity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * This is executed in workqueue context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct irq_glue *glue =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) container_of(notify, struct irq_glue, notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) rc = cpu_rmap_update(glue->rmap, glue->index, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) pr_warn("irq_cpu_rmap_notify: update failed: %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * irq_cpu_rmap_release - reclaiming callback for IRQ subsystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * @ref: kref to struct irq_affinity_notify passed by irq/manage.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static void irq_cpu_rmap_release(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct irq_glue *glue =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) container_of(ref, struct irq_glue, notify.kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) cpu_rmap_put(glue->rmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) kfree(glue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * irq_cpu_rmap_add - add an IRQ to a CPU affinity reverse-map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * @rmap: The reverse-map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * @irq: The IRQ number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * This adds an IRQ affinity notifier that will update the reverse-map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * automatically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * Must be called in process context, after the IRQ is allocated but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * before it is bound with request_irq().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct irq_glue *glue = kzalloc(sizeof(*glue), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (!glue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) glue->notify.notify = irq_cpu_rmap_notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) glue->notify.release = irq_cpu_rmap_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) glue->rmap = rmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) cpu_rmap_get(rmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) glue->index = cpu_rmap_add(rmap, glue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) rc = irq_set_affinity_notifier(irq, &glue->notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) cpu_rmap_put(glue->rmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) kfree(glue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) EXPORT_SYMBOL(irq_cpu_rmap_add);