^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2011 IBM Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/xics.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/hvcall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static inline unsigned int icp_hv_get_xirr(unsigned char cppr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) long rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) unsigned int ret = XICS_IRQ_SPURIOUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) rc = plpar_hcall(H_XIRR, retbuf, cppr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) if (rc == H_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) ret = (unsigned int)retbuf[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) pr_err("%s: bad return code xirr cppr=0x%x returned %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) __func__, cppr, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static inline void icp_hv_set_cppr(u8 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) long rc = plpar_hcall_norets(H_CPPR, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) if (rc != H_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) pr_err("%s: bad return code cppr cppr=0x%x returned %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) __func__, value, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static inline void icp_hv_set_xirr(unsigned int value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) long rc = plpar_hcall_norets(H_EOI, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (rc != H_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) pr_err("%s: bad return code eoi xirr=0x%x returned %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) __func__, value, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) icp_hv_set_cppr(value >> 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static inline void icp_hv_set_qirr(int n_cpu , u8 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) int hw_cpu = get_hard_smp_processor_id(n_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) long rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /* Make sure all previous accesses are ordered before IPI sending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (rc != H_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) "returned %ld\n", __func__, n_cpu, hw_cpu, value, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static void icp_hv_eoi(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) iosync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) icp_hv_set_xirr((xics_pop_cppr() << 24) | hw_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static void icp_hv_teardown_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /* Clear any pending IPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) icp_hv_set_qirr(cpu, 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static void icp_hv_flush_ipi(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* We take the ipi irq but and never return so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * need to EOI the IPI, but want to leave our priority 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * should we check all the other interrupts too?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * should we be flagging idle loop instead?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * or creating some task to be scheduled?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) icp_hv_set_xirr((0x00 << 24) | XICS_IPI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static unsigned int icp_hv_get_irq(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) unsigned int xirr = icp_hv_get_xirr(xics_cppr_top());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) unsigned int vec = xirr & 0x00ffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) unsigned int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (vec == XICS_IRQ_SPURIOUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) irq = irq_find_mapping(xics_host, vec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (likely(irq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) xics_push_cppr(vec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /* We don't have a linux mapping, so have rtas mask it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) xics_mask_unknown_vec(vec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /* We might learn about it later, so EOI it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) icp_hv_set_xirr(xirr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static void icp_hv_set_cpu_priority(unsigned char cppr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) xics_set_base_cppr(cppr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) icp_hv_set_cppr(cppr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) iosync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static void icp_hv_cause_ipi(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) icp_hv_set_qirr(cpu, IPI_PRIORITY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static irqreturn_t icp_hv_ipi_action(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) icp_hv_set_qirr(cpu, 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return smp_ipi_demux();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static const struct icp_ops icp_hv_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) .get_irq = icp_hv_get_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) .eoi = icp_hv_eoi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) .set_priority = icp_hv_set_cpu_priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) .teardown_cpu = icp_hv_teardown_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) .flush_ipi = icp_hv_flush_ipi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) .ipi_action = icp_hv_ipi_action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) .cause_ipi = icp_hv_cause_ipi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) int icp_hv_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xicp");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (!np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) np = of_find_node_by_type(NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) "PowerPC-External-Interrupt-Presentation");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (!np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) icp_ops = &icp_hv_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)