^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * bcsr.h -- Db1xxx/Pb1xxx Devboard CPLD registers ("BCSR") abstraction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * All Alchemy development boards (except, of course, the weird PB1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * have a few registers in a CPLD with standardised layout; they mostly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * only differ in base address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * All registers are 16bits wide with 32bit spacing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/irqchip/chained_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/addrspace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/mach-db1x00/bcsr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static struct bcsr_reg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) void __iomem *raddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) } bcsr_regs[BCSR_CNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static void __iomem *bcsr_virt; /* KSEG1 addr of BCSR base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static int bcsr_csc_base; /* linux-irq of first cascaded irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) void __init bcsr_init(unsigned long bcsr1_phys, unsigned long bcsr2_phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) bcsr1_phys = KSEG1ADDR(CPHYSADDR(bcsr1_phys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) bcsr2_phys = KSEG1ADDR(CPHYSADDR(bcsr2_phys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) bcsr_virt = (void __iomem *)bcsr1_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) for (i = 0; i < BCSR_CNT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) if (i >= BCSR_HEXLEDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) bcsr_regs[i].raddr = (void __iomem *)bcsr2_phys +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) (0x04 * (i - BCSR_HEXLEDS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) bcsr_regs[i].raddr = (void __iomem *)bcsr1_phys +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) (0x04 * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) spin_lock_init(&bcsr_regs[i].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) unsigned short bcsr_read(enum bcsr_id reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) unsigned short r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) spin_lock_irqsave(&bcsr_regs[reg].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) r = __raw_readw(bcsr_regs[reg].raddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) EXPORT_SYMBOL_GPL(bcsr_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) void bcsr_write(enum bcsr_id reg, unsigned short val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) spin_lock_irqsave(&bcsr_regs[reg].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) __raw_writew(val, bcsr_regs[reg].raddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) EXPORT_SYMBOL_GPL(bcsr_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) void bcsr_mod(enum bcsr_id reg, unsigned short clr, unsigned short set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) unsigned short r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) spin_lock_irqsave(&bcsr_regs[reg].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) r = __raw_readw(bcsr_regs[reg].raddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) r &= ~clr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) r |= set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) __raw_writew(r, bcsr_regs[reg].raddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) EXPORT_SYMBOL_GPL(bcsr_mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * DB1200/PB1200 CPLD IRQ muxer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static void bcsr_csc_handler(struct irq_desc *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct irq_chip *chip = irq_desc_get_chip(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) chained_irq_enter(chip, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) generic_handle_irq(bcsr_csc_base + __ffs(bisr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) chained_irq_exit(chip, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static void bcsr_irq_mask(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) unsigned short v = 1 << (d->irq - bcsr_csc_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) __raw_writew(v, bcsr_virt + BCSR_REG_MASKCLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static void bcsr_irq_maskack(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) unsigned short v = 1 << (d->irq - bcsr_csc_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) __raw_writew(v, bcsr_virt + BCSR_REG_MASKCLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) __raw_writew(v, bcsr_virt + BCSR_REG_INTSTAT); /* ack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static void bcsr_irq_unmask(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) unsigned short v = 1 << (d->irq - bcsr_csc_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) __raw_writew(v, bcsr_virt + BCSR_REG_MASKSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static struct irq_chip bcsr_irq_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) .name = "CPLD",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) .irq_mask = bcsr_irq_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) .irq_mask_ack = bcsr_irq_maskack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) .irq_unmask = bcsr_irq_unmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) void __init bcsr_init_irq(int csc_start, int csc_end, int hook_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) unsigned int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* mask & enable & ack all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) __raw_writew(0xffff, bcsr_virt + BCSR_REG_MASKCLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) __raw_writew(0xffff, bcsr_virt + BCSR_REG_INTSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) __raw_writew(0xffff, bcsr_virt + BCSR_REG_INTSTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) bcsr_csc_base = csc_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) for (irq = csc_start; irq <= csc_end; irq++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) irq_set_chip_and_handler_name(irq, &bcsr_irq_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) handle_level_irq, "level");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) irq_set_chained_handler(hook_irq, bcsr_csc_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }