^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2014 Synopsys, Inc. (www.synopsys.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/irqchip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define NR_EXCEPTIONS 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) struct bcr_irq_arcv2 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #ifdef CONFIG_CPU_BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) unsigned int pad:3, firq:1, prio:4, exts:8, irqs:8, ver:8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) unsigned int ver:8, irqs:8, exts:8, prio:4, firq:1, pad:3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Early Hardware specific Interrupt setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * -Called very early (start_kernel -> setup_arch -> setup_processor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * -Platform Independent (must for any ARC Core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * -Needed for each CPU (hence not foldable into init_IRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) void arc_init_IRQ(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) unsigned int tmp, irq_prio, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct bcr_irq_arcv2 irq_bcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct aux_irq_ctrl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #ifdef CONFIG_CPU_BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) unsigned int res3:18, save_idx_regs:1, res2:1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) save_u_to_u:1, save_lp_regs:1, save_blink:1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) res:4, save_nr_gpr_pairs:5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned int save_nr_gpr_pairs:5, res:4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) save_blink:1, save_lp_regs:1, save_u_to_u:1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) res2:1, save_idx_regs:1, res3:18;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) } ictrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) *(unsigned int *)&ictrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #ifndef CONFIG_ARC_IRQ_NO_AUTOSAVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) ictrl.save_nr_gpr_pairs = 6; /* r0 to r11 (r12 saved manually) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) ictrl.save_blink = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) ictrl.save_lp_regs = 1; /* LP_COUNT, LP_START, LP_END */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) ictrl.save_u_to_u = 0; /* user ctxt saved on kernel stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) ictrl.save_idx_regs = 1; /* JLI, LDI, EI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) WRITE_AUX(AUX_IRQ_CTRL, ictrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * ARCv2 core intc provides multiple interrupt priorities (upto 16).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * Typical builds though have only two levels (0-high, 1-low)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * Linux by default uses lower prio 1 for most irqs, reserving 0 for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * NMI style interrupts in future (say perf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) READ_BCR(ARC_REG_IRQ_BCR, irq_bcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) irq_prio = irq_bcr.prio; /* Encoded as N-1 for N levels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) pr_info("archs-intc\t: %d priority levels (default %d)%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) irq_prio + 1, ARCV2_IRQ_DEF_PRIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) irq_bcr.firq ? " FIRQ (not used)":"");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * Set a default priority for all available interrupts to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * switching of register banks if Fast IRQ and multiple register banks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * are supported by CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * Also disable private-per-core IRQ lines so faulty external HW won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * trigger interrupt that kernel is not ready to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) for (i = NR_EXCEPTIONS; i < irq_bcr.irqs + NR_EXCEPTIONS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) write_aux_reg(AUX_IRQ_SELECT, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * Only mask cpu private IRQs here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * "common" interrupts are masked at IDU, otherwise it would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * need to be unmasked at each cpu, with IPIs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (i < FIRST_EXT_IRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) write_aux_reg(AUX_IRQ_ENABLE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* setup status32, don't enable intr yet as kernel doesn't want */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) tmp = read_aux_reg(ARC_REG_STATUS32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) tmp |= ARCV2_IRQ_DEF_PRIO << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) tmp &= ~STATUS_IE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) asm volatile("kflag %0 \n"::"r"(tmp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static void arcv2_irq_mask(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) write_aux_reg(AUX_IRQ_ENABLE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static void arcv2_irq_unmask(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) write_aux_reg(AUX_IRQ_ENABLE, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) void arcv2_irq_enable(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* set default priority */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * hw auto enables (linux unmask) all by default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * So no need to do IRQ_ENABLE here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * XXX: However OSCI LAN need it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) write_aux_reg(AUX_IRQ_ENABLE, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static struct irq_chip arcv2_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) .name = "ARCv2 core Intc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) .irq_mask = arcv2_irq_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) .irq_unmask = arcv2_irq_unmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) .irq_enable = arcv2_irq_enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static int arcv2_irq_map(struct irq_domain *d, unsigned int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) irq_hw_number_t hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * core intc IRQs [16, 23]:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * Statically assigned always private-per-core (Timers, WDT, IPI, PCT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (hw < FIRST_EXT_IRQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * A subsequent request_percpu_irq() fails if percpu_devid is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * not set. That in turns sets NOAUTOEN, meaning each core needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * to call enable_percpu_irq()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) irq_set_percpu_devid(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_percpu_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_level_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static const struct irq_domain_ops arcv2_irq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) .xlate = irq_domain_xlate_onecell,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) .map = arcv2_irq_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) init_onchip_IRQ(struct device_node *intc, struct device_node *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct irq_domain *root_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct bcr_irq_arcv2 irq_bcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) unsigned int nr_cpu_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) READ_BCR(ARC_REG_IRQ_BCR, irq_bcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) nr_cpu_irqs = irq_bcr.irqs + NR_EXCEPTIONS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) panic("DeviceTree incore intc not a root irq controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) root_domain = irq_domain_add_linear(intc, nr_cpu_irqs, &arcv2_irq_ops, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (!root_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) panic("root irq domain not avail\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * Needed for primary domain lookup to succeed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * This is a primary irqchip, and can never have a parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) irq_set_default_host(root_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) irq_create_mapping(root_domain, IPI_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) irq_create_mapping(root_domain, SOFTIRQ_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) IRQCHIP_DECLARE(arc_intc, "snps,archs-intc", init_onchip_IRQ);