^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2011-12 Synopsys, Inc. (www.synopsys.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/irqchip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define NR_CPU_IRQS 32 /* number of irq lines coming in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define TIMER0_IRQ 3 /* Fixed by ISA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Early Hardware specific Interrupt setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * -Platform independent, needed for each CPU (not foldable into init_IRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * -Called very early (start_kernel -> setup_arch -> setup_processor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * what it does ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * -Optionally, setup the High priority Interrupts as Level 2 IRQs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) void arc_init_IRQ(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) unsigned int level_mask = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /* Is timer high priority Interrupt (Level2 in ARCompact jargon) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * Write to register, even if no LV2 IRQs configured to reset it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * in case bootloader had mucked with it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) write_aux_reg(AUX_IRQ_LEV, level_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (level_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) pr_info("Level-2 interrupts bitset %x\n", level_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * Disable all IRQ lines so faulty external hardware won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * trigger interrupt that kernel is not ready to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) for (i = TIMER0_IRQ; i < NR_CPU_IRQS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) unsigned int ienb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) ienb = read_aux_reg(AUX_IENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) ienb &= ~(1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) write_aux_reg(AUX_IENABLE, ienb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * ARC700 core includes a simple on-chip intc supporting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * -per IRQ enable/disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * -2 levels of interrupts (high/low)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * -all interrupts being level triggered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * To reduce platform code, we assume all IRQs directly hooked-up into intc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * Platforms with external intc, hence cascaded IRQs, are free to over-ride
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * below, per IRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static void arc_irq_mask(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) unsigned int ienb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) ienb = read_aux_reg(AUX_IENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) ienb &= ~(1 << data->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) write_aux_reg(AUX_IENABLE, ienb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static void arc_irq_unmask(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) unsigned int ienb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) ienb = read_aux_reg(AUX_IENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) ienb |= (1 << data->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) write_aux_reg(AUX_IENABLE, ienb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static struct irq_chip onchip_intc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) .name = "ARC In-core Intc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) .irq_mask = arc_irq_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) .irq_unmask = arc_irq_unmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) irq_hw_number_t hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) switch (hw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) case TIMER0_IRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) irq_set_percpu_devid(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static const struct irq_domain_ops arc_intc_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) .xlate = irq_domain_xlate_onecell,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) .map = arc_intc_domain_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) init_onchip_IRQ(struct device_node *intc, struct device_node *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct irq_domain *root_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) panic("DeviceTree incore intc not a root irq controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) root_domain = irq_domain_add_linear(intc, NR_CPU_IRQS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) &arc_intc_domain_ops, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (!root_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) panic("root irq domain not avail\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * Needed for primary domain lookup to succeed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * This is a primary irqchip, and can never have a parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) irq_set_default_host(root_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) IRQCHIP_DECLARE(arc_intc, "snps,arc700-intc", init_onchip_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * arch_local_irq_enable - Enable interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * 1. Explicitly called to re-enable interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * 2. Implicitly called from spin_unlock_irq, write_unlock_irq etc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * which maybe in hard ISR itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * Semantics of this function change depending on where it is called from:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * -If called from hard-ISR, it must not invert interrupt priorities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * e.g. suppose TIMER is high priority (Level 2) IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * Time hard-ISR, timer_interrupt( ) calls spin_unlock_irq several times.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * Here local_irq_enable( ) shd not re-enable lower priority interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * -If called from soft-ISR, it must re-enable all interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * soft ISR are low prioity jobs which can be very slow, thus all IRQs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * must be enabled while they run.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * Now hardware context wise we may still be in L2 ISR (not done rtie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * still we must re-enable both L1 and L2 IRQs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * Another twist is prev scenario with flow being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * L1 ISR ==> interrupted by L2 ISR ==> L2 soft ISR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * here we must not re-enable Ll as prev Ll Interrupt's h/w context will get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * over-written (this is deficiency in ARC700 Interrupt mechanism)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS /* Complex version for 2 IRQ levels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) void arch_local_irq_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) unsigned long flags = arch_local_save_flags();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (flags & STATUS_A2_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) flags |= STATUS_E2_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) else if (flags & STATUS_A1_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) flags |= STATUS_E1_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) arch_local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) EXPORT_SYMBOL(arch_local_irq_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #endif