^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * DEC I/O ASIC interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2002, 2003, 2013 Maciej W. Rozycki
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/dec/ioasic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/dec/ioasic_addrs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/dec/ioasic_ints.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) static int ioasic_irq_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) static void unmask_ioasic_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) u32 simr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) simr = ioasic_read(IO_REG_SIMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) simr |= (1 << (d->irq - ioasic_irq_base));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) ioasic_write(IO_REG_SIMR, simr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static void mask_ioasic_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) u32 simr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) simr = ioasic_read(IO_REG_SIMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) simr &= ~(1 << (d->irq - ioasic_irq_base));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) ioasic_write(IO_REG_SIMR, simr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static void ack_ioasic_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) mask_ioasic_irq(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) fast_iob();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static struct irq_chip ioasic_irq_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) .name = "IO-ASIC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) .irq_ack = ack_ioasic_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) .irq_mask = mask_ioasic_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) .irq_mask_ack = ack_ioasic_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) .irq_unmask = unmask_ioasic_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static void clear_ioasic_dma_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) u32 sir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) sir = ~(1 << (d->irq - ioasic_irq_base));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) ioasic_write(IO_REG_SIR, sir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) fast_iob();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static struct irq_chip ioasic_dma_irq_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) .name = "IO-ASIC-DMA",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) .irq_ack = clear_ioasic_dma_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) .irq_mask = mask_ioasic_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) .irq_unmask = unmask_ioasic_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) .irq_eoi = clear_ioasic_dma_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * I/O ASIC implements two kinds of DMA interrupts, informational and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * error interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * The formers do not stop DMA and should be cleared as soon as possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * so that if they retrigger before the handler has completed, usually as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * a side effect of actions taken by the handler, then they are reissued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * These use the `handle_edge_irq' handler that clears the request right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * The latters stop DMA and do not resume it until the interrupt has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * cleared. This cannot be done until after a corrective action has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * taken and this also means they will not retrigger. Therefore they use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * the `handle_fasteoi_irq' handler that only clears the request on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * way out. Because MIPS processor interrupt inputs, one of which the I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * ASIC is cascaded to, are level-triggered it is recommended that error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * DMA interrupt action handlers are registered with the IRQF_ONESHOT flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * set so that they are run with the interrupt line masked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * This mask has `1' bits in the positions of informational interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define IO_IRQ_DMA_INFO \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) (IO_IRQ_MASK(IO_INR_SCC0A_RXDMA) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) IO_IRQ_MASK(IO_INR_SCC1A_RXDMA) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) IO_IRQ_MASK(IO_INR_ISDN_TXDMA) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) IO_IRQ_MASK(IO_INR_ISDN_RXDMA) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) IO_IRQ_MASK(IO_INR_ASC_DMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) void __init init_ioasic_irqs(int base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* Mask interrupts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) ioasic_write(IO_REG_SIMR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) fast_iob();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) for (i = base; i < base + IO_INR_DMA; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) irq_set_chip_and_handler(i, &ioasic_irq_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) handle_level_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) for (; i < base + IO_IRQ_LINES; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) irq_set_chip_and_handler(i, &ioasic_dma_irq_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 1 << (i - base) & IO_IRQ_DMA_INFO ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) handle_edge_irq : handle_fasteoi_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) ioasic_irq_base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }