^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * ARC ARConnect (MultiCore IP) support (formerly known as MCIP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/irqchip/chained_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <soc/arc/mcip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/irqflags-arcv2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) static DEFINE_RAW_SPINLOCK(mcip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static char smp_cpuinfo_buf[128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Set mask to halt GFRC if any online core in SMP cluster is halted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Only works for ARC HS v3.0+, on earlier versions has no effect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static void mcip_update_gfrc_halt_mask(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct bcr_generic gfrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) u32 gfrc_halt_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) READ_BCR(ARC_REG_GFRC_BUILD, gfrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * CMD_GFRC_SET_CORE and CMD_GFRC_READ_CORE commands were added in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * GFRC 0x3 version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) if (gfrc.ver < 0x3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) raw_spin_lock_irqsave(&mcip_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) __mcip_cmd(CMD_GFRC_READ_CORE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) gfrc_halt_mask = read_aux_reg(ARC_REG_MCIP_READBACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) gfrc_halt_mask |= BIT(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) __mcip_cmd_data(CMD_GFRC_SET_CORE, 0, gfrc_halt_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) raw_spin_unlock_irqrestore(&mcip_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static void mcip_update_debug_halt_mask(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) u32 mcip_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) raw_spin_lock_irqsave(&mcip_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * mcip_mask is same for CMD_DEBUG_SET_SELECT and CMD_DEBUG_SET_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * commands. So read it once instead of reading both CMD_DEBUG_READ_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * and CMD_DEBUG_READ_SELECT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) __mcip_cmd(CMD_DEBUG_READ_SELECT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) mcip_mask = read_aux_reg(ARC_REG_MCIP_READBACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) mcip_mask |= BIT(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, mcip_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * Parameter specified halt cause:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * STATUS32[H]/actionpoint/breakpoint/self-halt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * We choose all of them (0xF).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xF, mcip_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) raw_spin_unlock_irqrestore(&mcip_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static void mcip_setup_per_cpu(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct mcip_bcr mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) READ_BCR(ARC_REG_MCIP_BCR, mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) smp_ipi_irq_setup(cpu, IPI_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /* Update GFRC halt mask as new CPU came online */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (mp.gfrc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) mcip_update_gfrc_halt_mask(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* Update MCIP debug mask as new CPU came online */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (mp.dbg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) mcip_update_debug_halt_mask(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static void mcip_ipi_send(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) int ipi_was_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* ARConnect can only send IPI to others */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (unlikely(cpu == raw_smp_processor_id())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) arc_softirq_trigger(SOFTIRQ_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) raw_spin_lock_irqsave(&mcip_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * If receiver already has a pending interrupt, elide sending this one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * Linux cross core calling works well with concurrent IPIs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * coalesced into one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * see arch/arc/kernel/smp.c: ipi_send_msg_one()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (!ipi_was_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) raw_spin_unlock_irqrestore(&mcip_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static void mcip_ipi_clear(int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) unsigned int cpu, c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (unlikely(irq == SOFTIRQ_IRQ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) arc_softirq_clear(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) raw_spin_lock_irqsave(&mcip_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* Who sent the IPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * In rare case, multiple concurrent IPIs sent to same target can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * possibly be coalesced by MCIP into 1 asserted IRQ, so @cpus can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * "vectored" (multiple bits sets) as opposed to typical single bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) c = __ffs(cpu); /* 0,1,2,3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) __mcip_cmd(CMD_INTRPT_GENERATE_ACK, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) cpu &= ~(1U << c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) } while (cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) raw_spin_unlock_irqrestore(&mcip_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static void mcip_probe_n_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct mcip_bcr mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) READ_BCR(ARC_REG_MCIP_BCR, mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) sprintf(smp_cpuinfo_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) mp.ver, mp.num_cores,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) IS_AVAIL1(mp.ipi, "IPI "),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) IS_AVAIL1(mp.idu, "IDU "),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) IS_AVAIL1(mp.dbg, "DEBUG "),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) IS_AVAIL1(mp.gfrc, "GFRC"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) cpuinfo_arc700[0].extn.gfrc = mp.gfrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct plat_smp_ops plat_smp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) .info = smp_cpuinfo_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) .init_early_smp = mcip_probe_n_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) .init_per_cpu = mcip_setup_per_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) .ipi_send = mcip_ipi_send,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) .ipi_clear = mcip_ipi_clear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /***************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * ARCv2 Interrupt Distribution Unit (IDU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * Connects external "COMMON" IRQs to core intc, providing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * -dynamic routing (IRQ affinity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * -load balancing (Round Robin interrupt distribution)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * -1:N distribution
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * It physically resides in the MCIP hw block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #include <linux/irqchip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static void idu_set_mode(unsigned int cmn_irq, bool set_lvl, unsigned int lvl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) bool set_distr, unsigned int distr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) unsigned int word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) unsigned int distr:2, pad:2, lvl:1, pad2:27;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) } data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) data.word = __mcip_cmd_read(CMD_IDU_READ_MODE, cmn_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (set_distr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) data.distr = distr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (set_lvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) data.lvl = lvl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static void idu_irq_mask_raw(irq_hw_number_t hwirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) raw_spin_lock_irqsave(&mcip_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) __mcip_cmd_data(CMD_IDU_SET_MASK, hwirq, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) raw_spin_unlock_irqrestore(&mcip_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static void idu_irq_mask(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) idu_irq_mask_raw(data->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static void idu_irq_unmask(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) raw_spin_lock_irqsave(&mcip_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) raw_spin_unlock_irqrestore(&mcip_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static void idu_irq_ack(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) raw_spin_lock_irqsave(&mcip_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) raw_spin_unlock_irqrestore(&mcip_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static void idu_irq_mask_ack(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) raw_spin_lock_irqsave(&mcip_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) raw_spin_unlock_irqrestore(&mcip_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) cpumask_t online;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) unsigned int destination_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) unsigned int distribution_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /* errout if no online cpu per @cpumask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (!cpumask_and(&online, cpumask, cpu_online_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) raw_spin_lock_irqsave(&mcip_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) destination_bits = cpumask_bits(&online)[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) idu_set_dest(data->hwirq, destination_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (ffs(destination_bits) == fls(destination_bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) distribution_mode = IDU_M_DISTRI_DEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) distribution_mode = IDU_M_DISTRI_RR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) idu_set_mode(data->hwirq, false, 0, true, distribution_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) raw_spin_unlock_irqrestore(&mcip_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return IRQ_SET_MASK_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static int idu_irq_set_type(struct irq_data *data, u32 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * ARCv2 IDU HW does not support inverse polarity, so these are the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * only interrupt types supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (type & ~(IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) raw_spin_lock_irqsave(&mcip_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) idu_set_mode(data->hwirq, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) type & IRQ_TYPE_EDGE_RISING ? IDU_M_TRIG_EDGE :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) IDU_M_TRIG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) false, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) raw_spin_unlock_irqrestore(&mcip_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static void idu_irq_enable(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * By default send all common interrupts to all available online CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * The affinity of common interrupts in IDU must be set manually since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * in some cases the kernel will not call irq_set_affinity() by itself:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * 1. When the kernel is not configured with support of SMP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * 2. When the kernel is configured with support of SMP but upper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * interrupt controllers does not support setting of the affinity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * and cannot propagate it to IDU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) idu_irq_set_affinity(data, cpu_online_mask, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) idu_irq_unmask(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) static struct irq_chip idu_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) .name = "MCIP IDU Intc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) .irq_mask = idu_irq_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) .irq_unmask = idu_irq_unmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) .irq_ack = idu_irq_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) .irq_mask_ack = idu_irq_mask_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) .irq_enable = idu_irq_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) .irq_set_type = idu_irq_set_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) .irq_set_affinity = idu_irq_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static void idu_cascade_isr(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct irq_chip *core_chip = irq_desc_get_chip(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) irq_hw_number_t idu_hwirq = core_hwirq - FIRST_EXT_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) chained_irq_enter(core_chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) chained_irq_exit(core_chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) irq_set_chip_and_handler(virq, &idu_irq_chip, handle_level_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static const struct irq_domain_ops idu_irq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) .xlate = irq_domain_xlate_onetwocell,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) .map = idu_irq_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * [16, 23]: Statically assigned always private-per-core (Timers, WDT, IPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * [24, 23+C]: If C > 0 then "C" common IRQs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * [24+C, N]: Not statically assigned, private-per-core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) idu_of_init(struct device_node *intc, struct device_node *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct irq_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) int nr_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) int i, virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct mcip_bcr mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct mcip_idu_bcr idu_bcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) READ_BCR(ARC_REG_MCIP_BCR, mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (!mp.idu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) panic("IDU not detected, but DeviceTree using it");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) READ_BCR(ARC_REG_MCIP_IDU_BCR, idu_bcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) nr_irqs = mcip_idu_bcr_to_nr_irqs(idu_bcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) pr_info("MCIP: IDU supports %u common irqs\n", nr_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /* Parent interrupts (core-intc) are already mapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) for (i = 0; i < nr_irqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /* Mask all common interrupts by default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) idu_irq_mask_raw(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * Return parent uplink IRQs (towards core intc) 24,25,.....
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * this step has been done before already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * however we need it to get the parent virq and set IDU handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * as first level isr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) virq = irq_create_mapping(NULL, i + FIRST_EXT_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) BUG_ON(!virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) __mcip_cmd(CMD_IDU_ENABLE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) IRQCHIP_DECLARE(arcv2_idu_intc, "snps,archs-idu-intc", idu_of_init);