^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Library implementing the most common irq chip callback functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2011, Thomas Gleixner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kernel_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/syscore_ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "internals.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) static LIST_HEAD(gc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static DEFINE_RAW_SPINLOCK(gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * irq_gc_noop - NOOP function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * @d: irq_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) void irq_gc_noop(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * irq_gc_mask_disable_reg - Mask chip via disable register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * @d: irq_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * Chip has separate enable/disable registers instead of a single mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) void irq_gc_mask_disable_reg(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct irq_chip_type *ct = irq_data_get_chip_type(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) u32 mask = d->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) irq_gc_lock(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) irq_reg_writel(gc, mask, ct->regs.disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) *ct->mask_cache &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) irq_gc_unlock(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * irq_gc_mask_set_bit - Mask chip via setting bit in mask register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * @d: irq_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * Chip has a single mask register. Values of this register are cached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * and protected by gc->lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) void irq_gc_mask_set_bit(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct irq_chip_type *ct = irq_data_get_chip_type(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) u32 mask = d->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) irq_gc_lock(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) *ct->mask_cache |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) irq_gc_unlock(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) EXPORT_SYMBOL_GPL(irq_gc_mask_set_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * irq_gc_mask_clr_bit - Mask chip via clearing bit in mask register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * @d: irq_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * Chip has a single mask register. Values of this register are cached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * and protected by gc->lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) void irq_gc_mask_clr_bit(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct irq_chip_type *ct = irq_data_get_chip_type(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) u32 mask = d->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) irq_gc_lock(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) *ct->mask_cache &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) irq_gc_unlock(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) EXPORT_SYMBOL_GPL(irq_gc_mask_clr_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * irq_gc_unmask_enable_reg - Unmask chip via enable register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * @d: irq_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * Chip has separate enable/disable registers instead of a single mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) void irq_gc_unmask_enable_reg(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct irq_chip_type *ct = irq_data_get_chip_type(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) u32 mask = d->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) irq_gc_lock(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) irq_reg_writel(gc, mask, ct->regs.enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) *ct->mask_cache |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) irq_gc_unlock(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * irq_gc_ack_set_bit - Ack pending interrupt via setting bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * @d: irq_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) void irq_gc_ack_set_bit(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct irq_chip_type *ct = irq_data_get_chip_type(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) u32 mask = d->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) irq_gc_lock(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) irq_reg_writel(gc, mask, ct->regs.ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) irq_gc_unlock(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) EXPORT_SYMBOL_GPL(irq_gc_ack_set_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * irq_gc_ack_clr_bit - Ack pending interrupt via clearing bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * @d: irq_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) void irq_gc_ack_clr_bit(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct irq_chip_type *ct = irq_data_get_chip_type(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) u32 mask = ~d->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) irq_gc_lock(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) irq_reg_writel(gc, mask, ct->regs.ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) irq_gc_unlock(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * irq_gc_mask_disable_and_ack_set - Mask and ack pending interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * @d: irq_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * This generic implementation of the irq_mask_ack method is for chips
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * with separate enable/disable registers instead of a single mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * register and where a pending interrupt is acknowledged by setting a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * Note: This is the only permutation currently used. Similar generic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * functions should be added here if other permutations are required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) void irq_gc_mask_disable_and_ack_set(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct irq_chip_type *ct = irq_data_get_chip_type(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) u32 mask = d->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) irq_gc_lock(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) irq_reg_writel(gc, mask, ct->regs.disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) *ct->mask_cache &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) irq_reg_writel(gc, mask, ct->regs.ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) irq_gc_unlock(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * irq_gc_eoi - EOI interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * @d: irq_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) void irq_gc_eoi(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct irq_chip_type *ct = irq_data_get_chip_type(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) u32 mask = d->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) irq_gc_lock(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) irq_reg_writel(gc, mask, ct->regs.eoi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) irq_gc_unlock(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * irq_gc_set_wake - Set/clr wake bit for an interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * @d: irq_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * @on: Indicates whether the wake bit should be set or cleared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * For chips where the wake from suspend functionality is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * configured in a separate register and the wakeup active state is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * just stored in a bitmask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) int irq_gc_set_wake(struct irq_data *d, unsigned int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) u32 mask = d->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (!(mask & gc->wake_enabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) irq_gc_lock(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) gc->wake_active |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) gc->wake_active &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) irq_gc_unlock(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) EXPORT_SYMBOL_GPL(irq_gc_set_wake);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static u32 irq_readl_be(void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return ioread32be(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static void irq_writel_be(u32 val, void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) iowrite32be(val, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) void irq_init_generic_chip(struct irq_chip_generic *gc, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) int num_ct, unsigned int irq_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) void __iomem *reg_base, irq_flow_handler_t handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) raw_spin_lock_init(&gc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) gc->num_ct = num_ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) gc->irq_base = irq_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) gc->reg_base = reg_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) gc->chip_types->chip.name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) gc->chip_types->handler = handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * irq_alloc_generic_chip - Allocate a generic chip and initialize it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * @name: Name of the irq chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * @num_ct: Number of irq_chip_type instances associated with this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * @irq_base: Interrupt base nr for this chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * @reg_base: Register base address (virtual)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * @handler: Default flow handler associated with this chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * Returns an initialized irq_chip_generic structure. The chip defaults
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * to the primary (index 0) irq_chip_type and @handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct irq_chip_generic *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) irq_alloc_generic_chip(const char *name, int num_ct, unsigned int irq_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) void __iomem *reg_base, irq_flow_handler_t handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct irq_chip_generic *gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) unsigned long sz = sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) gc = kzalloc(sz, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (gc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) irq_init_generic_chip(gc, name, num_ct, irq_base, reg_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) EXPORT_SYMBOL_GPL(irq_alloc_generic_chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) irq_gc_init_mask_cache(struct irq_chip_generic *gc, enum irq_gc_flags flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct irq_chip_type *ct = gc->chip_types;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) u32 *mskptr = &gc->mask_cache, mskreg = ct->regs.mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) for (i = 0; i < gc->num_ct; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (flags & IRQ_GC_MASK_CACHE_PER_TYPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) mskptr = &ct[i].mask_cache_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) mskreg = ct[i].regs.mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) ct[i].mask_cache = mskptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (flags & IRQ_GC_INIT_MASK_CACHE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) *mskptr = irq_reg_readl(gc, mskreg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * __irq_alloc_domain_generic_chip - Allocate generic chips for an irq domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * @d: irq domain for which to allocate chips
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * @irqs_per_chip: Number of interrupts each chip handles (max 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * @num_ct: Number of irq_chip_type instances associated with this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * @name: Name of the irq chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * @handler: Default flow handler associated with these chips
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * @clr: IRQ_* bits to clear in the mapping function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * @set: IRQ_* bits to set in the mapping function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * @gcflags: Generic chip specific setup flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) int num_ct, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) irq_flow_handler_t handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) unsigned int clr, unsigned int set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) enum irq_gc_flags gcflags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct irq_domain_chip_generic *dgc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct irq_chip_generic *gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) int numchips, sz, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) void *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (d->gc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) numchips = DIV_ROUND_UP(d->revmap_size, irqs_per_chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (!numchips)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /* Allocate a pointer, generic chip and chiptypes for each chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) sz = sizeof(*dgc) + numchips * sizeof(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) sz += numchips * (sizeof(*gc) + num_ct * sizeof(struct irq_chip_type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) tmp = dgc = kzalloc(sz, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (!dgc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) dgc->irqs_per_chip = irqs_per_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) dgc->num_chips = numchips;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) dgc->irq_flags_to_set = set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) dgc->irq_flags_to_clear = clr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) dgc->gc_flags = gcflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) d->gc = dgc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /* Calc pointer to the first generic chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) tmp += sizeof(*dgc) + numchips * sizeof(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) for (i = 0; i < numchips; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /* Store the pointer to the generic chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) dgc->gc[i] = gc = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) irq_init_generic_chip(gc, name, num_ct, i * irqs_per_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) NULL, handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) gc->domain = d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (gcflags & IRQ_GC_BE_IO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) gc->reg_readl = &irq_readl_be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) gc->reg_writel = &irq_writel_be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) raw_spin_lock_irqsave(&gc_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) list_add_tail(&gc->list, &gc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) raw_spin_unlock_irqrestore(&gc_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /* Calc pointer to the next generic chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) tmp += sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) EXPORT_SYMBOL_GPL(__irq_alloc_domain_generic_chips);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static struct irq_chip_generic *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) __irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct irq_domain_chip_generic *dgc = d->gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (!dgc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) idx = hw_irq / dgc->irqs_per_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (idx >= dgc->num_chips)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return dgc->gc[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * irq_get_domain_generic_chip - Get a pointer to the generic chip of a hw_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * @d: irq domain pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * @hw_irq: Hardware interrupt number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct irq_chip_generic *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct irq_chip_generic *gc = __irq_get_domain_generic_chip(d, hw_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return !IS_ERR(gc) ? gc : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) EXPORT_SYMBOL_GPL(irq_get_domain_generic_chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * Separate lockdep classes for interrupt chip which can nest irq_desc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * lock and request mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static struct lock_class_key irq_nested_lock_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static struct lock_class_key irq_nested_request_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * irq_map_generic_chip - Map a generic chip for an irq domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) irq_hw_number_t hw_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct irq_data *data = irq_domain_get_irq_data(d, virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct irq_domain_chip_generic *dgc = d->gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct irq_chip_generic *gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct irq_chip_type *ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct irq_chip *chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) gc = __irq_get_domain_generic_chip(d, hw_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (IS_ERR(gc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return PTR_ERR(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) idx = hw_irq % dgc->irqs_per_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (test_bit(idx, &gc->unused))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (test_bit(idx, &gc->installed))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) ct = gc->chip_types;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) chip = &ct->chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /* We only init the cache for the first mapping of a generic chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (!gc->installed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) raw_spin_lock_irqsave(&gc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) irq_gc_init_mask_cache(gc, dgc->gc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) raw_spin_unlock_irqrestore(&gc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /* Mark the interrupt as installed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) set_bit(idx, &gc->installed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (dgc->gc_flags & IRQ_GC_INIT_NESTED_LOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) irq_set_lockdep_class(virq, &irq_nested_lock_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) &irq_nested_request_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (chip->irq_calc_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) chip->irq_calc_mask(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) data->mask = 1 << idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) irq_domain_set_info(d, virq, hw_irq, chip, gc, ct->handler, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) irq_modify_status(virq, dgc->irq_flags_to_clear, dgc->irq_flags_to_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct irq_data *data = irq_domain_get_irq_data(d, virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct irq_domain_chip_generic *dgc = d->gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) unsigned int hw_irq = data->hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct irq_chip_generic *gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) int irq_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) gc = irq_get_domain_generic_chip(d, hw_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (!gc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) irq_idx = hw_irq % dgc->irqs_per_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) clear_bit(irq_idx, &gc->installed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) irq_domain_set_info(d, virq, hw_irq, &no_irq_chip, NULL, NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct irq_domain_ops irq_generic_chip_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) .map = irq_map_generic_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) .unmap = irq_unmap_generic_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) .xlate = irq_domain_xlate_onetwocell,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) EXPORT_SYMBOL_GPL(irq_generic_chip_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * irq_setup_generic_chip - Setup a range of interrupts with a generic chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * @gc: Generic irq chip holding all data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * @flags: Flags for initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * @clr: IRQ_* bits to clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * @set: IRQ_* bits to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * Set up max. 32 interrupts starting from gc->irq_base. Note, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * initializes all interrupts to the primary irq_chip_type and its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * associated handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) enum irq_gc_flags flags, unsigned int clr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) unsigned int set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) struct irq_chip_type *ct = gc->chip_types;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct irq_chip *chip = &ct->chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) raw_spin_lock(&gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) list_add_tail(&gc->list, &gc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) raw_spin_unlock(&gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) irq_gc_init_mask_cache(gc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) for (i = gc->irq_base; msk; msk >>= 1, i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (!(msk & 0x01))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (flags & IRQ_GC_INIT_NESTED_LOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) irq_set_lockdep_class(i, &irq_nested_lock_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) &irq_nested_request_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (!(flags & IRQ_GC_NO_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct irq_data *d = irq_get_irq_data(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (chip->irq_calc_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) chip->irq_calc_mask(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) d->mask = 1 << (i - gc->irq_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) irq_set_chip_and_handler(i, chip, ct->handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) irq_set_chip_data(i, gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) irq_modify_status(i, clr, set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) gc->irq_cnt = i - gc->irq_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) EXPORT_SYMBOL_GPL(irq_setup_generic_chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * irq_setup_alt_chip - Switch to alternative chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * @d: irq_data for this interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * @type: Flow type to be initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * Only to be called from chip->irq_set_type() callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) int irq_setup_alt_chip(struct irq_data *d, unsigned int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) struct irq_chip_type *ct = gc->chip_types;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) for (i = 0; i < gc->num_ct; i++, ct++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (ct->type & type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) d->chip = &ct->chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) irq_data_to_desc(d)->handle_irq = ct->handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) EXPORT_SYMBOL_GPL(irq_setup_alt_chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * irq_remove_generic_chip - Remove a chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * @gc: Generic irq chip holding all data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * @clr: IRQ_* bits to clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * @set: IRQ_* bits to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * Remove up to 32 interrupts starting from gc->irq_base.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) unsigned int clr, unsigned int set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) unsigned int i = gc->irq_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) raw_spin_lock(&gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) list_del(&gc->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) raw_spin_unlock(&gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) for (; msk; msk >>= 1, i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (!(msk & 0x01))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /* Remove handler first. That will mask the irq line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) irq_set_handler(i, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) irq_set_chip(i, &no_irq_chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) irq_set_chip_data(i, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) irq_modify_status(i, clr, set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) EXPORT_SYMBOL_GPL(irq_remove_generic_chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) static struct irq_data *irq_gc_get_irq_data(struct irq_chip_generic *gc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) unsigned int virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (!gc->domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return irq_get_irq_data(gc->irq_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * We don't know which of the irqs has been actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * installed. Use the first one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (!gc->installed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) virq = irq_find_mapping(gc->domain, gc->irq_base + __ffs(gc->installed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return virq ? irq_get_irq_data(virq) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) static int irq_gc_suspend(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct irq_chip_generic *gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) list_for_each_entry(gc, &gc_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) struct irq_chip_type *ct = gc->chip_types;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (ct->chip.irq_suspend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) struct irq_data *data = irq_gc_get_irq_data(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) ct->chip.irq_suspend(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (gc->suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) gc->suspend(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static void irq_gc_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) struct irq_chip_generic *gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) list_for_each_entry(gc, &gc_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) struct irq_chip_type *ct = gc->chip_types;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (gc->resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) gc->resume(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (ct->chip.irq_resume) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) struct irq_data *data = irq_gc_get_irq_data(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) ct->chip.irq_resume(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) #define irq_gc_suspend NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) #define irq_gc_resume NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) static void irq_gc_shutdown(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) struct irq_chip_generic *gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) list_for_each_entry(gc, &gc_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) struct irq_chip_type *ct = gc->chip_types;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (ct->chip.irq_pm_shutdown) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) struct irq_data *data = irq_gc_get_irq_data(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) ct->chip.irq_pm_shutdown(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) static struct syscore_ops irq_gc_syscore_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) .suspend = irq_gc_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) .resume = irq_gc_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) .shutdown = irq_gc_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) static int __init irq_gc_init_ops(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) register_syscore_ops(&irq_gc_syscore_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) device_initcall(irq_gc_init_ops);