^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/timex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kernel_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/syscore_ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/hw_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/desc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/i8259.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * This is the 'legacy' 8259A Programmable Interrupt Controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * present in the majority of PC/AT boxes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * plus some generic x86 specific things if generic specifics makes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * any sense at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static void init_8259A(int auto_eoi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static int i8259A_auto_eoi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) DEFINE_RAW_SPINLOCK(i8259A_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * 8259A PIC functions to handle ISA devices:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * This contains the irq mask for both 8259A irq controllers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) unsigned int cached_irq_mask = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * Not all IRQs can be routed through the IO-APIC, eg. on certain (older)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * boards the timer interrupt is not really connected to any IO-APIC pin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * it's fed to the master 8259A's IR0 line only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * Any '1' bit in this mask means the IRQ is routed through the IO-APIC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * this 'mixed mode' IRQ handling costs nothing because it's only used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * at IRQ setup time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) unsigned long io_apic_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static void mask_8259A_irq(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) unsigned int mask = 1 << irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) raw_spin_lock_irqsave(&i8259A_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) cached_irq_mask |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (irq & 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) outb(cached_slave_mask, PIC_SLAVE_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) outb(cached_master_mask, PIC_MASTER_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) raw_spin_unlock_irqrestore(&i8259A_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static void disable_8259A_irq(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) mask_8259A_irq(data->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static void unmask_8259A_irq(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) unsigned int mask = ~(1 << irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) raw_spin_lock_irqsave(&i8259A_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) cached_irq_mask &= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (irq & 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) outb(cached_slave_mask, PIC_SLAVE_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) outb(cached_master_mask, PIC_MASTER_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) raw_spin_unlock_irqrestore(&i8259A_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static void enable_8259A_irq(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) unmask_8259A_irq(data->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static int i8259A_irq_pending(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) unsigned int mask = 1<<irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) raw_spin_lock_irqsave(&i8259A_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (irq < 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) ret = inb(PIC_MASTER_CMD) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) raw_spin_unlock_irqrestore(&i8259A_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static void make_8259A_irq(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) disable_irq_nosync(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) io_apic_irqs &= ~(1<<irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) enable_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) lapic_assign_legacy_vector(irq, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * This function assumes to be called rarely. Switching between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * 8259A registers is slow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * This has to be protected by the irq controller spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * before being called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static inline int i8259A_irq_real(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) int value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) int irqmask = 1<<irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (irq < 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) outb(0x0B, PIC_MASTER_CMD); /* ISR register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) value = inb(PIC_MASTER_CMD) & irqmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) outb(0x0A, PIC_MASTER_CMD); /* back to the IRR register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) outb(0x0B, PIC_SLAVE_CMD); /* ISR register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) outb(0x0A, PIC_SLAVE_CMD); /* back to the IRR register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * Careful! The 8259A is a fragile beast, it pretty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * much _has_ to be done exactly like this (mask it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * first, _then_ send the EOI, and the order of EOI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * to the two 8259s is important!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static void mask_and_ack_8259A(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) unsigned int irq = data->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) unsigned int irqmask = 1 << irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) raw_spin_lock_irqsave(&i8259A_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * Lightweight spurious IRQ detection. We do not want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * to overdo spurious IRQ handling - it's usually a sign
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * of hardware problems, so we only do the checks we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * do without slowing down good hardware unnecessarily.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * Note that IRQ7 and IRQ15 (the two spurious IRQs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * usually resulting from the 8259A-1|2 PICs) occur
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * even if the IRQ is masked in the 8259A. Thus we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * can check spurious 8259A IRQs without doing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * quite slow i8259A_irq_real() call for every IRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * This does not cover 100% of spurious interrupts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * but should be enough to warn the user that there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * is something bad going on ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (cached_irq_mask & irqmask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) goto spurious_8259A_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) cached_irq_mask |= irqmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) handle_real_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (irq & 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) outb(cached_slave_mask, PIC_SLAVE_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* 'Specific EOI' to slave */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) outb(0x60+(irq&7), PIC_SLAVE_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /* 'Specific EOI' to master-IRQ2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) outb(0x60+PIC_CASCADE_IR, PIC_MASTER_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) outb(cached_master_mask, PIC_MASTER_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) raw_spin_unlock_irqrestore(&i8259A_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) spurious_8259A_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * this is the slow path - should happen rarely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (i8259A_irq_real(irq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * oops, the IRQ _is_ in service according to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * 8259A - not spurious, go handle it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) goto handle_real_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static int spurious_irq_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * At this point we can be sure the IRQ is spurious,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * lets ACK and report it. [once per IRQ]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (!(spurious_irq_mask & irqmask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) printk_deferred(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) "spurious 8259A interrupt: IRQ%d.\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) spurious_irq_mask |= irqmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) atomic_inc(&irq_err_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * Theoretically we do not have to handle this IRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * but in Linux this does not cause problems and is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * simpler for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) goto handle_real_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct irq_chip i8259A_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) .name = "XT-PIC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) .irq_mask = disable_8259A_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) .irq_disable = disable_8259A_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) .irq_unmask = enable_8259A_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) .irq_mask_ack = mask_and_ack_8259A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static char irq_trigger[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * ELCR registers (0x4d0, 0x4d1) control edge/level of IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static void restore_ELCR(char *trigger)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) outb(trigger[0], 0x4d0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) outb(trigger[1], 0x4d1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static void save_ELCR(char *trigger)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /* IRQ 0,1,2,8,13 are marked as reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) trigger[0] = inb(0x4d0) & 0xF8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) trigger[1] = inb(0x4d1) & 0xDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static void i8259A_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) init_8259A(i8259A_auto_eoi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) restore_ELCR(irq_trigger);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static int i8259A_suspend(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) save_ELCR(irq_trigger);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static void i8259A_shutdown(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /* Put the i8259A into a quiescent state that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * the kernel initialization code can get it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * out of.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static struct syscore_ops i8259_syscore_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) .suspend = i8259A_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) .resume = i8259A_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) .shutdown = i8259A_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static void mask_8259A(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) raw_spin_lock_irqsave(&i8259A_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) raw_spin_unlock_irqrestore(&i8259A_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static void unmask_8259A(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) raw_spin_lock_irqsave(&i8259A_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) raw_spin_unlock_irqrestore(&i8259A_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static int probe_8259A(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) unsigned char probe_val = ~(1 << PIC_CASCADE_IR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) unsigned char new_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * Check to see if we have a PIC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * Mask all except the cascade and read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * back the value we just wrote. If we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * have a PIC, we will read 0xff as opposed to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * value we wrote.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) raw_spin_lock_irqsave(&i8259A_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) outb(probe_val, PIC_MASTER_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) new_val = inb(PIC_MASTER_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (new_val != probe_val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) printk(KERN_INFO "Using NULL legacy PIC\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) legacy_pic = &null_legacy_pic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) raw_spin_unlock_irqrestore(&i8259A_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return nr_legacy_irqs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static void init_8259A(int auto_eoi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) i8259A_auto_eoi = auto_eoi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) raw_spin_lock_irqsave(&i8259A_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * outb_pic - this has to work on a wide range of PC hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /* ICW2: 8259A-1 IR0-7 mapped to ISA_IRQ_VECTOR(0) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) outb_pic(ISA_IRQ_VECTOR(0), PIC_MASTER_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /* 8259A-1 (the master) has a slave on IR2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) outb_pic(1U << PIC_CASCADE_IR, PIC_MASTER_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (auto_eoi) /* master does Auto EOI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) outb_pic(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) else /* master expects normal EOI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) outb_pic(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) outb_pic(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /* ICW2: 8259A-2 IR0-7 mapped to ISA_IRQ_VECTOR(8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) outb_pic(ISA_IRQ_VECTOR(8), PIC_SLAVE_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /* 8259A-2 is a slave on master's IR2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /* (slave's support for AEOI in flat mode is to be investigated) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (auto_eoi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * In AEOI mode we just have to mask the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * when acking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) i8259A_chip.irq_mask_ack = disable_8259A_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) udelay(100); /* wait for 8259A to initialize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) raw_spin_unlock_irqrestore(&i8259A_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * make i8259 a driver so that we can select pic functions at run time. the goal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * is to make x86 binary compatible among pc compatible and non-pc compatible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * platforms, such as x86 MID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static void legacy_pic_noop(void) { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static void legacy_pic_uint_noop(unsigned int unused) { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static void legacy_pic_int_noop(int unused) { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static int legacy_pic_irq_pending_noop(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) static int legacy_pic_probe(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct legacy_pic null_legacy_pic = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) .nr_legacy_irqs = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) .chip = &dummy_irq_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) .mask = legacy_pic_uint_noop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) .unmask = legacy_pic_uint_noop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) .mask_all = legacy_pic_noop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) .restore_mask = legacy_pic_noop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) .init = legacy_pic_int_noop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) .probe = legacy_pic_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) .irq_pending = legacy_pic_irq_pending_noop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) .make_irq = legacy_pic_uint_noop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct legacy_pic default_legacy_pic = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) .nr_legacy_irqs = NR_IRQS_LEGACY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) .chip = &i8259A_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) .mask = mask_8259A_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) .unmask = unmask_8259A_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) .mask_all = mask_8259A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) .restore_mask = unmask_8259A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) .init = init_8259A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) .probe = probe_8259A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) .irq_pending = i8259A_irq_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) .make_irq = make_8259A_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct legacy_pic *legacy_pic = &default_legacy_pic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) EXPORT_SYMBOL(legacy_pic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) static int __init i8259A_init_ops(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (legacy_pic == &default_legacy_pic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) register_syscore_ops(&i8259_syscore_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) device_initcall(i8259A_init_ops);