^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) // Copyright (c) 2014-2018 MediaTek Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Library for MediaTek External Interrupt Support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Maoguang Meng <maoguang.meng@mediatek.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Sean Wang <sean.wang@mediatek.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/gpio/driver.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/irqchip/chained_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "mtk-eint.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define MTK_EINT_EDGE_SENSITIVE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define MTK_EINT_LEVEL_SENSITIVE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define MTK_EINT_DBNC_SET_DBNC_BITS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define MTK_EINT_DBNC_RST_BIT (0x1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define MTK_EINT_DBNC_SET_EN (0x1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static const struct mtk_eint_regs mtk_generic_eint_regs = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) .stat = 0x000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) .ack = 0x040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) .mask = 0x080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) .mask_set = 0x0c0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) .mask_clr = 0x100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) .sens = 0x140,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) .sens_set = 0x180,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) .sens_clr = 0x1c0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) .soft = 0x200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) .soft_set = 0x240,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) .soft_clr = 0x280,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) .pol = 0x300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) .pol_set = 0x340,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) .pol_clr = 0x380,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) .dom_en = 0x400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) .dbnc_ctrl = 0x500,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) .dbnc_set = 0x600,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) .dbnc_clr = 0x700,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static void __iomem *mtk_eint_get_offset(struct mtk_eint *eint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) unsigned int eint_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) unsigned int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned int eint_base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) void __iomem *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (eint_num >= eint->hw->ap_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) eint_base = eint->hw->ap_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) reg = eint->base + offset + ((eint_num - eint_base) / 32) * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static unsigned int mtk_eint_can_en_debounce(struct mtk_eint *eint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) unsigned int eint_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) unsigned int sens;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) unsigned int bit = BIT(eint_num % 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) void __iomem *reg = mtk_eint_get_offset(eint, eint_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) eint->regs->sens);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (readl(reg) & bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) sens = MTK_EINT_LEVEL_SENSITIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) sens = MTK_EINT_EDGE_SENSITIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (eint_num < eint->hw->db_cnt && sens != MTK_EINT_EDGE_SENSITIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static int mtk_eint_flip_edge(struct mtk_eint *eint, int hwirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) int start_level, curr_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned int reg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) u32 mask = BIT(hwirq & 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) u32 port = (hwirq >> 5) & eint->hw->port_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) void __iomem *reg = eint->base + (port << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) curr_level = eint->gpio_xlate->get_gpio_state(eint->pctl, hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) start_level = curr_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (start_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) reg_offset = eint->regs->pol_clr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) reg_offset = eint->regs->pol_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) writel(mask, reg + reg_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) curr_level = eint->gpio_xlate->get_gpio_state(eint->pctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) } while (start_level != curr_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return start_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static void mtk_eint_mask(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) u32 mask = BIT(d->hwirq & 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) eint->regs->mask_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) eint->cur_mask[d->hwirq >> 5] &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) writel(mask, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static void mtk_eint_unmask(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) u32 mask = BIT(d->hwirq & 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) eint->regs->mask_clr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) eint->cur_mask[d->hwirq >> 5] |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) writel(mask, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (eint->dual_edge[d->hwirq])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) mtk_eint_flip_edge(eint, d->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static unsigned int mtk_eint_get_mask(struct mtk_eint *eint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) unsigned int eint_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) unsigned int bit = BIT(eint_num % 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) void __iomem *reg = mtk_eint_get_offset(eint, eint_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) eint->regs->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return !!(readl(reg) & bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static void mtk_eint_ack(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) u32 mask = BIT(d->hwirq & 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) eint->regs->ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) writel(mask, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static int mtk_eint_set_type(struct irq_data *d, unsigned int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) u32 mask = BIT(d->hwirq & 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) void __iomem *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (((type & IRQ_TYPE_EDGE_BOTH) && (type & IRQ_TYPE_LEVEL_MASK)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) ((type & IRQ_TYPE_LEVEL_MASK) == IRQ_TYPE_LEVEL_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) dev_err(eint->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) "Can't configure IRQ%d (EINT%lu) for type 0x%X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) d->irq, d->hwirq, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) eint->dual_edge[d->hwirq] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) eint->dual_edge[d->hwirq] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->pol_clr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) writel(mask, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->pol_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) writel(mask, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->sens_clr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) writel(mask, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->sens_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) writel(mask, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (eint->dual_edge[d->hwirq])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) mtk_eint_flip_edge(eint, d->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static int mtk_eint_irq_set_wake(struct irq_data *d, unsigned int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) int shift = d->hwirq & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) int reg = d->hwirq >> 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) eint->wake_mask[reg] |= BIT(shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) eint->wake_mask[reg] &= ~BIT(shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static void mtk_eint_chip_write_mask(const struct mtk_eint *eint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) void __iomem *base, u32 *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) int port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) void __iomem *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) for (port = 0; port < eint->hw->ports; port++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) reg = base + (port << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) writel_relaxed(~buf[port], reg + eint->regs->mask_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) writel_relaxed(buf[port], reg + eint->regs->mask_clr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static int mtk_eint_irq_request_resources(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct gpio_chip *gpio_c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) unsigned int gpio_n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) err = eint->gpio_xlate->get_gpio_n(eint->pctl, d->hwirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) &gpio_n, &gpio_c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) dev_err(eint->dev, "Can not find pin\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) err = gpiochip_lock_as_irq(gpio_c, gpio_n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) dev_err(eint->dev, "unable to lock HW IRQ %lu for IRQ\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) irqd_to_hwirq(d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) err = eint->gpio_xlate->set_gpio_as_eint(eint->pctl, d->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) dev_err(eint->dev, "Can not eint mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static void mtk_eint_irq_release_resources(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct gpio_chip *gpio_c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) unsigned int gpio_n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) eint->gpio_xlate->get_gpio_n(eint->pctl, d->hwirq, &gpio_n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) &gpio_c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) gpiochip_unlock_as_irq(gpio_c, gpio_n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static struct irq_chip mtk_eint_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) .name = "mt-eint",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) .irq_disable = mtk_eint_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) .irq_mask = mtk_eint_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) .irq_unmask = mtk_eint_unmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) .irq_ack = mtk_eint_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) .irq_set_type = mtk_eint_set_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) .irq_set_wake = mtk_eint_irq_set_wake,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) .irq_request_resources = mtk_eint_irq_request_resources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) .irq_release_resources = mtk_eint_irq_release_resources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static unsigned int mtk_eint_hw_init(struct mtk_eint *eint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) void __iomem *reg = eint->base + eint->regs->dom_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) for (i = 0; i < eint->hw->ap_num; i += 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) writel(0xffffffff, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) reg += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) mtk_eint_debounce_process(struct mtk_eint *eint, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) unsigned int rst, ctrl_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) unsigned int bit, dbnc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) ctrl_offset = (index / 4) * 4 + eint->regs->dbnc_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) dbnc = readl(eint->base + ctrl_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) bit = MTK_EINT_DBNC_SET_EN << ((index % 4) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if ((bit & dbnc) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) ctrl_offset = (index / 4) * 4 + eint->regs->dbnc_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) rst = MTK_EINT_DBNC_RST_BIT << ((index % 4) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) writel(rst, eint->base + ctrl_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static void mtk_eint_irq_handler(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct irq_chip *chip = irq_desc_get_chip(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct mtk_eint *eint = irq_desc_get_handler_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) unsigned int status, eint_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) int offset, mask_offset, index, virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) void __iomem *reg = mtk_eint_get_offset(eint, 0, eint->regs->stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) int dual_edge, start_level, curr_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) chained_irq_enter(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) for (eint_num = 0; eint_num < eint->hw->ap_num; eint_num += 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) reg += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) status = readl(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) while (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) offset = __ffs(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) mask_offset = eint_num >> 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) index = eint_num + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) virq = irq_find_mapping(eint->domain, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) status &= ~BIT(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * If we get an interrupt on pin that was only required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * for wake (but no real interrupt requested), mask the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * interrupt (as would mtk_eint_resume do anyway later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * in the resume sequence).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (eint->wake_mask[mask_offset] & BIT(offset) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) !(eint->cur_mask[mask_offset] & BIT(offset))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) writel_relaxed(BIT(offset), reg -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) eint->regs->stat +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) eint->regs->mask_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) dual_edge = eint->dual_edge[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (dual_edge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * Clear soft-irq in case we raised it last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) writel(BIT(offset), reg - eint->regs->stat +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) eint->regs->soft_clr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) start_level =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) eint->gpio_xlate->get_gpio_state(eint->pctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) generic_handle_irq(virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (dual_edge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) curr_level = mtk_eint_flip_edge(eint, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * If level changed, we might lost one edge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * interrupt, raised it through soft-irq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (start_level != curr_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) writel(BIT(offset), reg -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) eint->regs->stat +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) eint->regs->soft_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (index < eint->hw->db_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) mtk_eint_debounce_process(eint, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) chained_irq_exit(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) int mtk_eint_do_suspend(struct mtk_eint *eint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) mtk_eint_chip_write_mask(eint, eint->base, eint->wake_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) EXPORT_SYMBOL_GPL(mtk_eint_do_suspend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) int mtk_eint_do_resume(struct mtk_eint *eint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) mtk_eint_chip_write_mask(eint, eint->base, eint->cur_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) EXPORT_SYMBOL_GPL(mtk_eint_do_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) unsigned int debounce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) int virq, eint_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) dbnc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) static const unsigned int debounce_time[] = {500, 1000, 16000, 32000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 64000, 128000, 256000};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct irq_data *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) virq = irq_find_mapping(eint->domain, eint_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) eint_offset = (eint_num % 4) * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) d = irq_get_irq_data(virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) set_offset = (eint_num / 4) * 4 + eint->regs->dbnc_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) clr_offset = (eint_num / 4) * 4 + eint->regs->dbnc_clr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (!mtk_eint_can_en_debounce(eint, eint_num))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) dbnc = ARRAY_SIZE(debounce_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (debounce <= debounce_time[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) dbnc = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (!mtk_eint_get_mask(eint, eint_num)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) mtk_eint_mask(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) unmask = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) unmask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) clr_bit = 0xff << eint_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) writel(clr_bit, eint->base + clr_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) bit = ((dbnc << MTK_EINT_DBNC_SET_DBNC_BITS) | MTK_EINT_DBNC_SET_EN) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) eint_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) rst = MTK_EINT_DBNC_RST_BIT << eint_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) writel(rst | bit, eint->base + set_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * Delay a while (more than 2T) to wait for hw debounce counter reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * work correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (unmask == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) mtk_eint_unmask(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) EXPORT_SYMBOL_GPL(mtk_eint_set_debounce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) int mtk_eint_find_irq(struct mtk_eint *eint, unsigned long eint_n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) irq = irq_find_mapping(eint->domain, eint_n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (!irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) EXPORT_SYMBOL_GPL(mtk_eint_find_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) int mtk_eint_do_init(struct mtk_eint *eint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) /* If clients don't assign a specific regs, let's use generic one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (!eint->regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) eint->regs = &mtk_generic_eint_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) eint->wake_mask = devm_kcalloc(eint->dev, eint->hw->ports,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) sizeof(*eint->wake_mask), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (!eint->wake_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) eint->cur_mask = devm_kcalloc(eint->dev, eint->hw->ports,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) sizeof(*eint->cur_mask), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (!eint->cur_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) eint->dual_edge = devm_kcalloc(eint->dev, eint->hw->ap_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) sizeof(int), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (!eint->dual_edge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) eint->domain = irq_domain_add_linear(eint->dev->of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) eint->hw->ap_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) &irq_domain_simple_ops, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (!eint->domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) mtk_eint_hw_init(eint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) for (i = 0; i < eint->hw->ap_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) int virq = irq_create_mapping(eint->domain, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) irq_set_chip_and_handler(virq, &mtk_eint_irq_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) handle_level_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) irq_set_chip_data(virq, eint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) irq_set_chained_handler_and_data(eint->irq, mtk_eint_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) eint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) EXPORT_SYMBOL_GPL(mtk_eint_do_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) MODULE_DESCRIPTION("MediaTek EINT Driver");