^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * GT641xx IRQ routines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/gt64120.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define GT641XX_IRQ_TO_BIT(irq) (1U << (irq - GT641XX_IRQ_BASE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) static DEFINE_RAW_SPINLOCK(gt641xx_irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static void ack_gt641xx_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) u32 cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) raw_spin_lock_irqsave(>641xx_irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) cause = GT_READ(GT_INTRCAUSE_OFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) cause &= ~GT641XX_IRQ_TO_BIT(d->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) GT_WRITE(GT_INTRCAUSE_OFS, cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) raw_spin_unlock_irqrestore(>641xx_irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static void mask_gt641xx_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) raw_spin_lock_irqsave(>641xx_irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) mask = GT_READ(GT_INTRMASK_OFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) mask &= ~GT641XX_IRQ_TO_BIT(d->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) GT_WRITE(GT_INTRMASK_OFS, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) raw_spin_unlock_irqrestore(>641xx_irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static void mask_ack_gt641xx_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u32 cause, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) raw_spin_lock_irqsave(>641xx_irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) mask = GT_READ(GT_INTRMASK_OFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) mask &= ~GT641XX_IRQ_TO_BIT(d->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) GT_WRITE(GT_INTRMASK_OFS, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) cause = GT_READ(GT_INTRCAUSE_OFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) cause &= ~GT641XX_IRQ_TO_BIT(d->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) GT_WRITE(GT_INTRCAUSE_OFS, cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) raw_spin_unlock_irqrestore(>641xx_irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static void unmask_gt641xx_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) raw_spin_lock_irqsave(>641xx_irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) mask = GT_READ(GT_INTRMASK_OFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) mask |= GT641XX_IRQ_TO_BIT(d->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) GT_WRITE(GT_INTRMASK_OFS, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) raw_spin_unlock_irqrestore(>641xx_irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static struct irq_chip gt641xx_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) .name = "GT641xx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) .irq_ack = ack_gt641xx_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) .irq_mask = mask_gt641xx_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) .irq_mask_ack = mask_ack_gt641xx_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) .irq_unmask = unmask_gt641xx_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) void gt641xx_irq_dispatch(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) u32 cause, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) cause = GT_READ(GT_INTRCAUSE_OFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) mask = GT_READ(GT_INTRMASK_OFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) cause &= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * bit0 : logical or of all the interrupt bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * bit30: logical or of bits[29:26,20:1].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * bit31: logical or of bits[25:1].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) for (i = 1; i < 30; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (cause & (1U << i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) do_IRQ(GT641XX_IRQ_BASE + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) atomic_inc(&irq_err_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) void __init gt641xx_irq_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) GT_WRITE(GT_INTRMASK_OFS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) GT_WRITE(GT_INTRCAUSE_OFS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * bit0 : logical or of all the interrupt bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * bit30: logical or of bits[29:26,20:1].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * bit31: logical or of bits[25:1].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) for (i = 1; i < 30; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) irq_set_chip_and_handler(GT641XX_IRQ_BASE + i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) >641xx_irq_chip, handle_level_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }