^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2015 Imagination Technologies Ltd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Qais Yousef <qais.yousef@imgtec.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * This file contains driver APIs to the IPI subsystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define pr_fmt(fmt) "genirq/ipi: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * irq_reserve_ipi() - Setup an IPI to destination cpumask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * @domain: IPI domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * @dest: cpumask of cpus which can receive the IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * Allocate a virq that can be used to send IPI to any CPU in dest mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * On success it'll return linux irq number and error code on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) int irq_reserve_ipi(struct irq_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) const struct cpumask *dest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) unsigned int nr_irqs, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct irq_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) int virq, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) if (!domain ||!irq_domain_is_ipi(domain)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) pr_warn("Reservation on a non IPI domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) if (!cpumask_subset(dest, cpu_possible_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) pr_warn("Reservation is not in possible_cpu_mask\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) nr_irqs = cpumask_weight(dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) if (!nr_irqs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) pr_warn("Reservation for empty destination mask\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if (irq_domain_is_ipi_single(domain)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * If the underlying implementation uses a single HW irq on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * all cpus then we only need a single Linux irq number for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * it. We have no restrictions vs. the destination mask. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * underlying implementation can deal with holes nicely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) nr_irqs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) unsigned int next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * The IPI requires a separate HW irq on each CPU. We require
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * that the destination mask is consecutive. If an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * implementation needs to support holes, it can reserve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * several IPI ranges.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) offset = cpumask_first(dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * Find a hole and if found look for another set bit after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * hole. For now we don't support this scenario.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) next = cpumask_next_zero(offset, dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (next < nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) next = cpumask_next(next, dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (next < nr_cpu_ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) pr_warn("Destination mask has holes\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (virq <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) pr_warn("Can't reserve IPI, failed to alloc descs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) virq = __irq_domain_alloc_irqs(domain, virq, nr_irqs, NUMA_NO_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) (void *) dest, true, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (virq <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) pr_warn("Can't reserve IPI, failed to alloc hw irqs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) goto free_descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) for (i = 0; i < nr_irqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) data = irq_get_irq_data(virq + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) cpumask_copy(data->common->affinity, dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) data->common->ipi_offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) irq_set_status_flags(virq + i, IRQ_NO_BALANCING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) free_descs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) irq_free_descs(virq, nr_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * irq_destroy_ipi() - unreserve an IPI that was previously allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * @irq: linux irq number to be destroyed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * @dest: cpumask of cpus which should have the IPI removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * The IPIs allocated with irq_reserve_ipi() are retuerned to the system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * destroying all virqs associated with them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * Return 0 on success or error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct irq_data *data = irq_get_irq_data(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct irq_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) unsigned int nr_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (!irq || !data || !ipimask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) domain = data->domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (WARN_ON(domain == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (!irq_domain_is_ipi(domain)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) pr_warn("Trying to destroy a non IPI domain!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (WARN_ON(!cpumask_subset(dest, ipimask)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * Must be destroying a subset of CPUs to which this IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * was set up to target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (irq_domain_is_ipi_per_cpu(domain)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) irq = irq + cpumask_first(dest) - data->common->ipi_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) nr_irqs = cpumask_weight(dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) nr_irqs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) irq_domain_free_irqs(irq, nr_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * ipi_get_hwirq - Get the hwirq associated with an IPI to a cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * @irq: linux irq number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * @cpu: the target cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * When dealing with coprocessors IPI, we need to inform the coprocessor of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * the hwirq it needs to use to receive and send IPIs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * Returns hwirq value on success and INVALID_HWIRQ on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct irq_data *data = irq_get_irq_data(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (!data || !ipimask || cpu >= nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return INVALID_HWIRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (!cpumask_test_cpu(cpu, ipimask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return INVALID_HWIRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * Get the real hardware irq number if the underlying implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * uses a separate irq per cpu. If the underlying implementation uses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * a single hardware irq for all cpus then the IPI send mechanism
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * needs to take care of the cpu destinations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (irq_domain_is_ipi_per_cpu(data->domain))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) data = irq_get_irq_data(irq + cpu - data->common->ipi_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return data ? irqd_to_hwirq(data) : INVALID_HWIRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) EXPORT_SYMBOL_GPL(ipi_get_hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) const struct cpumask *dest, unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct cpumask *ipimask = irq_data_get_affinity_mask(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (!chip || !ipimask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (!chip->ipi_send_single && !chip->ipi_send_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (cpu >= nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (dest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (!cpumask_subset(dest, ipimask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (!cpumask_test_cpu(cpu, ipimask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * __ipi_send_single - send an IPI to a target Linux SMP CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * @desc: pointer to irq_desc of the IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * @cpu: destination CPU, must in the destination mask passed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * irq_reserve_ipi()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * This function is for architecture or core code to speed up IPI sending. Not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * usable from driver code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * Returns zero on success and negative error number on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) int __ipi_send_single(struct irq_desc *desc, unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct irq_data *data = irq_desc_get_irq_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct irq_chip *chip = irq_data_get_irq_chip(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * Minimise the overhead by omitting the checks for Linux SMP IPIs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * Since the callers should be arch or core code which is generally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * trusted, only check for errors when debugging.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (!chip->ipi_send_single) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) chip->ipi_send_mask(data, cpumask_of(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /* FIXME: Store this information in irqdata flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (irq_domain_is_ipi_per_cpu(data->domain) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) cpu != data->common->ipi_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /* use the correct data for that cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) unsigned irq = data->irq + cpu - data->common->ipi_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) data = irq_get_irq_data(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) chip->ipi_send_single(data, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * ipi_send_mask - send an IPI to target Linux SMP CPU(s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * @desc: pointer to irq_desc of the IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * @dest: dest CPU(s), must be a subset of the mask passed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * irq_reserve_ipi()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * This function is for architecture or core code to speed up IPI sending. Not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * usable from driver code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * Returns zero on success and negative error number on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct irq_data *data = irq_desc_get_irq_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct irq_chip *chip = irq_data_get_irq_chip(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * Minimise the overhead by omitting the checks for Linux SMP IPIs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * Since the callers should be arch or core code which is generally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * trusted, only check for errors when debugging.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (WARN_ON_ONCE(ipi_send_verify(chip, data, dest, 0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (chip->ipi_send_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) chip->ipi_send_mask(data, dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (irq_domain_is_ipi_per_cpu(data->domain)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) unsigned int base = data->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) for_each_cpu(cpu, dest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) unsigned irq = base + cpu - data->common->ipi_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) data = irq_get_irq_data(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) chip->ipi_send_single(data, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) for_each_cpu(cpu, dest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) chip->ipi_send_single(data, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * ipi_send_single - Send an IPI to a single CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * @virq: linux irq number from irq_reserve_ipi()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * @cpu: destination CPU, must in the destination mask passed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * irq_reserve_ipi()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * Returns zero on success and negative error number on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) int ipi_send_single(unsigned int virq, unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct irq_desc *desc = irq_to_desc(virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct irq_data *data = desc ? irq_desc_get_irq_data(desc) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct irq_chip *chip = data ? irq_data_get_irq_chip(data) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return __ipi_send_single(desc, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) EXPORT_SYMBOL_GPL(ipi_send_single);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * ipi_send_mask - Send an IPI to target CPU(s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * @virq: linux irq number from irq_reserve_ipi()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * @dest: dest CPU(s), must be a subset of the mask passed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * irq_reserve_ipi()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * Returns zero on success and negative error number on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) int ipi_send_mask(unsigned int virq, const struct cpumask *dest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) struct irq_desc *desc = irq_to_desc(virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct irq_data *data = desc ? irq_desc_get_irq_data(desc) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct irq_chip *chip = data ? irq_data_get_irq_chip(data) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (WARN_ON_ONCE(ipi_send_verify(chip, data, dest, 0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return __ipi_send_mask(desc, dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) EXPORT_SYMBOL_GPL(ipi_send_mask);