^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * RM200 specific code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * License. See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2006,2007 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * i8259 parts ripped out of arch/mips/kernel/i8259.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/serial_8250.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/sni.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/irq_cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define RM200_I8259A_IRQ_BASE 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define MEMPORT(_base,_irq) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) .mapbase = _base, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) .irq = _irq, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) .uartclk = 1843200, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) .iotype = UPIO_MEM, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) .flags = UPF_BOOT_AUTOCONF|UPF_IOREMAP, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static struct plat_serial8250_port rm200_data[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) MEMPORT(0x160003f8, RM200_I8259A_IRQ_BASE + 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) MEMPORT(0x160002f8, RM200_I8259A_IRQ_BASE + 3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static struct platform_device rm200_serial8250_device = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) .name = "serial8250",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) .id = PLAT8250_DEV_PLATFORM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) .dev = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) .platform_data = rm200_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static struct resource rm200_ds1216_rsrc[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) .start = 0x1cd41ffc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) .end = 0x1cd41fff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) .flags = IORESOURCE_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static struct platform_device rm200_ds1216_device = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) .name = "rtc-ds1216",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) .num_resources = ARRAY_SIZE(rm200_ds1216_rsrc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) .resource = rm200_ds1216_rsrc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static struct resource snirm_82596_rm200_rsrc[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) .start = 0x18000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) .end = 0x180fffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) .flags = IORESOURCE_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) .start = 0x1b000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) .end = 0x1b000004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) .flags = IORESOURCE_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) .start = 0x1ff00000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) .end = 0x1ff00020,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) .flags = IORESOURCE_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) .start = 27,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) .end = 27,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) .flags = IORESOURCE_IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) .flags = 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static struct platform_device snirm_82596_rm200_pdev = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) .name = "snirm_82596",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) .num_resources = ARRAY_SIZE(snirm_82596_rm200_rsrc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) .resource = snirm_82596_rm200_rsrc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static struct resource snirm_53c710_rm200_rsrc[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) .start = 0x19000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) .end = 0x190fffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) .flags = IORESOURCE_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) .start = 26,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) .end = 26,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) .flags = IORESOURCE_IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static struct platform_device snirm_53c710_rm200_pdev = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) .name = "snirm_53c710",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) .num_resources = ARRAY_SIZE(snirm_53c710_rm200_rsrc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) .resource = snirm_53c710_rm200_rsrc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static int __init snirm_setup_devinit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (sni_brd_type == SNI_BRD_RM200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) platform_device_register(&rm200_serial8250_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) platform_device_register(&rm200_ds1216_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) platform_device_register(&snirm_82596_rm200_pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) platform_device_register(&snirm_53c710_rm200_pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) sni_eisa_root_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) device_initcall(snirm_setup_devinit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * RM200 has an ISA and an EISA bus. The iSA bus is only used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * for onboard devices and also has twi i8259 PICs. Since these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * PICs are no accessible via inb/outb the following code uses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * readb/writeb to access them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static DEFINE_RAW_SPINLOCK(sni_rm200_i8259A_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define PIC_CMD 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define PIC_IMR 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define PIC_ISR PIC_CMD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define PIC_POLL PIC_ISR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define PIC_OCW3 PIC_ISR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /* i8259A PIC related value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define PIC_CASCADE_IR 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define MASTER_ICW4_DEFAULT 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define SLAVE_ICW4_DEFAULT 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * This contains the irq mask for both 8259A irq controllers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static unsigned int rm200_cached_irq_mask = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static __iomem u8 *rm200_pic_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static __iomem u8 *rm200_pic_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define cached_master_mask (rm200_cached_irq_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define cached_slave_mask (rm200_cached_irq_mask >> 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static void sni_rm200_disable_8259A_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) unsigned int mask, irq = d->irq - RM200_I8259A_IRQ_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) mask = 1 << irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) raw_spin_lock_irqsave(&sni_rm200_i8259A_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) rm200_cached_irq_mask |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (irq & 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) writeb(cached_slave_mask, rm200_pic_slave + PIC_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) writeb(cached_master_mask, rm200_pic_master + PIC_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) raw_spin_unlock_irqrestore(&sni_rm200_i8259A_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static void sni_rm200_enable_8259A_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) unsigned int mask, irq = d->irq - RM200_I8259A_IRQ_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) mask = ~(1 << irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) raw_spin_lock_irqsave(&sni_rm200_i8259A_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) rm200_cached_irq_mask &= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (irq & 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) writeb(cached_slave_mask, rm200_pic_slave + PIC_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) writeb(cached_master_mask, rm200_pic_master + PIC_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) raw_spin_unlock_irqrestore(&sni_rm200_i8259A_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static inline int sni_rm200_i8259A_irq_real(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) int value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) int irqmask = 1 << irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (irq < 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) writeb(0x0B, rm200_pic_master + PIC_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) value = readb(rm200_pic_master + PIC_CMD) & irqmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) writeb(0x0A, rm200_pic_master + PIC_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) writeb(0x0B, rm200_pic_slave + PIC_CMD); /* ISR register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) value = readb(rm200_pic_slave + PIC_CMD) & (irqmask >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) writeb(0x0A, rm200_pic_slave + PIC_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * Careful! The 8259A is a fragile beast, it pretty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * much _has_ to be done exactly like this (mask it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * first, _then_ send the EOI, and the order of EOI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * to the two 8259s is important!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) void sni_rm200_mask_and_ack_8259A(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) unsigned int irqmask, irq = d->irq - RM200_I8259A_IRQ_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) irqmask = 1 << irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) raw_spin_lock_irqsave(&sni_rm200_i8259A_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * Lightweight spurious IRQ detection. We do not want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * to overdo spurious IRQ handling - it's usually a sign
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * of hardware problems, so we only do the checks we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * do without slowing down good hardware unnecessarily.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * Note that IRQ7 and IRQ15 (the two spurious IRQs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * usually resulting from the 8259A-1|2 PICs) occur
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * even if the IRQ is masked in the 8259A. Thus we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * can check spurious 8259A IRQs without doing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * quite slow i8259A_irq_real() call for every IRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * This does not cover 100% of spurious interrupts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * but should be enough to warn the user that there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * is something bad going on ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (rm200_cached_irq_mask & irqmask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) goto spurious_8259A_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) rm200_cached_irq_mask |= irqmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) handle_real_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (irq & 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) readb(rm200_pic_slave + PIC_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) writeb(cached_slave_mask, rm200_pic_slave + PIC_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) writeb(0x60+(irq & 7), rm200_pic_slave + PIC_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) writeb(0x60+PIC_CASCADE_IR, rm200_pic_master + PIC_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) readb(rm200_pic_master + PIC_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) writeb(cached_master_mask, rm200_pic_master + PIC_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) writeb(0x60+irq, rm200_pic_master + PIC_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) raw_spin_unlock_irqrestore(&sni_rm200_i8259A_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) spurious_8259A_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * this is the slow path - should happen rarely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (sni_rm200_i8259A_irq_real(irq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * oops, the IRQ _is_ in service according to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * 8259A - not spurious, go handle it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) goto handle_real_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static int spurious_irq_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * At this point we can be sure the IRQ is spurious,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * let's ACK and report it. [once per IRQ]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (!(spurious_irq_mask & irqmask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) spurious_irq_mask |= irqmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) atomic_inc(&irq_err_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * Theoretically we do not have to handle this IRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * but in Linux this does not cause problems and is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * simpler for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) goto handle_real_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static struct irq_chip sni_rm200_i8259A_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) .name = "RM200-XT-PIC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) .irq_mask = sni_rm200_disable_8259A_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) .irq_unmask = sni_rm200_enable_8259A_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) .irq_mask_ack = sni_rm200_mask_and_ack_8259A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * Do the traditional i8259 interrupt polling thing. This is for the few
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * cases where no better interrupt acknowledge method is available and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * absolutely must touch the i8259.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static inline int sni_rm200_i8259_irq(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) raw_spin_lock(&sni_rm200_i8259A_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /* Perform an interrupt acknowledge cycle on controller 1. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) writeb(0x0C, rm200_pic_master + PIC_CMD); /* prepare for poll */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) irq = readb(rm200_pic_master + PIC_CMD) & 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (irq == PIC_CASCADE_IR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * Interrupt is cascaded so perform interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * acknowledge on controller 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) writeb(0x0C, rm200_pic_slave + PIC_CMD); /* prepare for poll */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) irq = (readb(rm200_pic_slave + PIC_CMD) & 7) + 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (unlikely(irq == 7)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * This may be a spurious interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * Read the interrupt status register (ISR). If the most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * significant bit is not set then there is no valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) writeb(0x0B, rm200_pic_master + PIC_ISR); /* ISR register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (~readb(rm200_pic_master + PIC_ISR) & 0x80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) irq = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) raw_spin_unlock(&sni_rm200_i8259A_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return likely(irq >= 0) ? irq + RM200_I8259A_IRQ_BASE : irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) void sni_rm200_init_8259A(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) raw_spin_lock_irqsave(&sni_rm200_i8259A_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) writeb(0xff, rm200_pic_master + PIC_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) writeb(0xff, rm200_pic_slave + PIC_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) writeb(0x11, rm200_pic_master + PIC_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) writeb(0, rm200_pic_master + PIC_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) writeb(1U << PIC_CASCADE_IR, rm200_pic_master + PIC_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) writeb(MASTER_ICW4_DEFAULT, rm200_pic_master + PIC_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) writeb(0x11, rm200_pic_slave + PIC_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) writeb(8, rm200_pic_slave + PIC_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) writeb(PIC_CASCADE_IR, rm200_pic_slave + PIC_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) writeb(SLAVE_ICW4_DEFAULT, rm200_pic_slave + PIC_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) udelay(100); /* wait for 8259A to initialize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) writeb(cached_master_mask, rm200_pic_master + PIC_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) writeb(cached_slave_mask, rm200_pic_slave + PIC_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) raw_spin_unlock_irqrestore(&sni_rm200_i8259A_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * IRQ2 is cascade interrupt to second interrupt controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static struct resource sni_rm200_pic1_resource = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) .name = "onboard ISA pic1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) .start = 0x16000020,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) .end = 0x16000023,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) .flags = IORESOURCE_BUSY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static struct resource sni_rm200_pic2_resource = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) .name = "onboard ISA pic2",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) .start = 0x160000a0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) .end = 0x160000a3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) .flags = IORESOURCE_BUSY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /* ISA irq handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static irqreturn_t sni_rm200_i8259A_irq_handler(int dummy, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) irq = sni_rm200_i8259_irq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (unlikely(irq < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) do_IRQ(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) void __init sni_rm200_i8259_irqs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) rm200_pic_master = ioremap(0x16000020, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (!rm200_pic_master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) rm200_pic_slave = ioremap(0x160000a0, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (!rm200_pic_slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) iounmap(rm200_pic_master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) insert_resource(&iomem_resource, &sni_rm200_pic1_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) insert_resource(&iomem_resource, &sni_rm200_pic2_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) sni_rm200_init_8259A();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) for (i = RM200_I8259A_IRQ_BASE; i < RM200_I8259A_IRQ_BASE + 16; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) irq_set_chip_and_handler(i, &sni_rm200_i8259A_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) handle_level_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (request_irq(RM200_I8259A_IRQ_BASE + PIC_CASCADE_IR, no_action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) IRQF_NO_THREAD, "cascade", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) pr_err("Failed to register cascade interrupt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) #define SNI_RM200_INT_STAT_REG CKSEG1ADDR(0xbc000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) #define SNI_RM200_INT_ENA_REG CKSEG1ADDR(0xbc080000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) #define SNI_RM200_INT_START 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) #define SNI_RM200_INT_END 28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static void enable_rm200_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) unsigned int mask = 1 << (d->irq - SNI_RM200_INT_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) *(volatile u8 *)SNI_RM200_INT_ENA_REG &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) void disable_rm200_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) unsigned int mask = 1 << (d->irq - SNI_RM200_INT_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) *(volatile u8 *)SNI_RM200_INT_ENA_REG |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static struct irq_chip rm200_irq_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) .name = "RM200",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) .irq_mask = disable_rm200_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) .irq_unmask = enable_rm200_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static void sni_rm200_hwint(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) u32 pending = read_c0_cause() & read_c0_status();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) u8 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) u8 stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (pending & C_IRQ5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) do_IRQ(MIPS_CPU_IRQ_BASE + 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) else if (pending & C_IRQ0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) clear_c0_status(IE_IRQ0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) mask = *(volatile u8 *)SNI_RM200_INT_ENA_REG ^ 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) stat = *(volatile u8 *)SNI_RM200_INT_STAT_REG ^ 0x14;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) irq = ffs(stat & mask & 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (likely(irq > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) do_IRQ(irq + SNI_RM200_INT_START - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) set_c0_status(IE_IRQ0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) void __init sni_rm200_irq_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * (volatile u8 *)SNI_RM200_INT_ENA_REG = 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) sni_rm200_i8259_irqs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) mips_cpu_irq_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /* Actually we've got more interrupts to handle ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) for (i = SNI_RM200_INT_START; i <= SNI_RM200_INT_END; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) irq_set_chip_and_handler(i, &rm200_irq_type, handle_level_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) sni_hwint = sni_rm200_hwint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) change_c0_status(ST0_IM, IE_IRQ0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (request_irq(SNI_RM200_INT_START + 0, sni_rm200_i8259A_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 0, "onboard ISA", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) pr_err("Failed to register onboard ISA interrupt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (request_irq(SNI_RM200_INT_START + 1, sni_isa_irq_handler, 0, "ISA",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) pr_err("Failed to register ISA interrupt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) void __init sni_rm200_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }