^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * License. See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2004-2016 Cavium, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/octeon/octeon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/octeon/cvmx-ciu2-defs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/octeon/cvmx-ciu3-defs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static DEFINE_PER_CPU(unsigned int, octeon_irq_ciu3_idt_ip2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static DEFINE_PER_CPU(unsigned int, octeon_irq_ciu3_idt_ip3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static DEFINE_PER_CPU(struct octeon_ciu3_info *, octeon_ciu3_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define CIU3_MBOX_PER_CORE 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * The 8 most significant bits of the intsn identify the interrupt major block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Each major block might use its own interrupt domain. Thus 256 domains are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define MAX_CIU3_DOMAINS 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) typedef irq_hw_number_t (*octeon_ciu3_intsn2hw_t)(struct irq_domain *, unsigned int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* Information for each ciu3 in the system */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct octeon_ciu3_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) u64 ciu3_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) int node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct irq_domain *domain[MAX_CIU3_DOMAINS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) octeon_ciu3_intsn2hw_t intsn2hw[MAX_CIU3_DOMAINS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* Each ciu3 in the system uses its own data (one ciu3 per node) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static struct octeon_ciu3_info *octeon_ciu3_info_per_node[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct octeon_irq_ciu_domain_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) int num_sum; /* number of sum registers (2 or 3). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* Register offsets from ciu3_addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define CIU3_CONST 0x220
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define CIU3_IDT_CTL(_idt) ((_idt) * 8 + 0x110000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define CIU3_IDT_PP(_idt, _idx) ((_idt) * 32 + (_idx) * 8 + 0x120000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define CIU3_IDT_IO(_idt) ((_idt) * 8 + 0x130000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define CIU3_DEST_PP_INT(_pp_ip) ((_pp_ip) * 8 + 0x200000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define CIU3_DEST_IO_INT(_io) ((_io) * 8 + 0x210000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define CIU3_ISC_CTL(_intsn) ((_intsn) * 8 + 0x80000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define CIU3_ISC_W1C(_intsn) ((_intsn) * 8 + 0x90000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define CIU3_ISC_W1S(_intsn) ((_intsn) * 8 + 0xa0000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static __read_mostly int octeon_irq_ciu_to_irq[8][64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct octeon_ciu_chip_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct { /* only used for ciu3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) u64 ciu3_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) unsigned int intsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct { /* only used for ciu/ciu2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) u8 line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) u8 bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) int gpio_line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) int current_cpu; /* Next CPU expected to take this irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) int ciu_node; /* NUMA node number of the CIU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct octeon_core_chip_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct mutex core_irq_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) bool current_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) bool desired_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) u8 bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define MIPS_CORE_IRQ_LINES 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static int octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct irq_chip *chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) irq_flow_handler_t handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) cd = kzalloc(sizeof(*cd), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (!cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) irq_set_chip_and_handler(irq, chip, handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) cd->line = line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) cd->bit = bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) cd->gpio_line = gpio_line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) irq_set_chip_data(irq, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) octeon_irq_ciu_to_irq[line][bit] = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct irq_data *data = irq_get_irq_data(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) irq_set_chip_data(irq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) kfree(cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static int octeon_irq_force_ciu_mapping(struct irq_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) int irq, int line, int bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return irq_domain_associate(domain, irq, line << 6 | bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static int octeon_coreid_for_cpu(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return cpu_logical_map(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return cvmx_get_core_num();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static int octeon_cpu_for_coreid(int coreid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return cpu_number_map(coreid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static void octeon_irq_core_ack(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) unsigned int bit = cd->bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * We don't need to disable IRQs to make these atomic since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * they are already disabled earlier in the low level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * interrupt code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) clear_c0_status(0x100 << bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /* The two user interrupts must be cleared manually. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (bit < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) clear_c0_cause(0x100 << bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static void octeon_irq_core_eoi(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * We don't need to disable IRQs to make these atomic since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * they are already disabled earlier in the low level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * interrupt code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) set_c0_status(0x100 << cd->bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static void octeon_irq_core_set_enable_local(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct irq_data *data = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) unsigned int mask = 0x100 << cd->bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * Interrupts are already disabled, so these are atomic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (cd->desired_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) set_c0_status(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) clear_c0_status(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static void octeon_irq_core_disable(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) cd->desired_en = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static void octeon_irq_core_enable(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) cd->desired_en = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static void octeon_irq_core_bus_lock(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) mutex_lock(&cd->core_irq_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static void octeon_irq_core_bus_sync_unlock(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (cd->desired_en != cd->current_en) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) on_each_cpu(octeon_irq_core_set_enable_local, data, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) cd->current_en = cd->desired_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) mutex_unlock(&cd->core_irq_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static struct irq_chip octeon_irq_chip_core = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) .name = "Core",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) .irq_enable = octeon_irq_core_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) .irq_disable = octeon_irq_core_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) .irq_ack = octeon_irq_core_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) .irq_eoi = octeon_irq_core_eoi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) .irq_bus_lock = octeon_irq_core_bus_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) .irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) .irq_cpu_online = octeon_irq_core_eoi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) .irq_cpu_offline = octeon_irq_core_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) .flags = IRQCHIP_ONOFFLINE_ENABLED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static void __init octeon_irq_init_core(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct octeon_core_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) cd = &octeon_irq_core_chip_data[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) cd->current_en = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) cd->desired_en = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) cd->bit = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) mutex_init(&cd->core_irq_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) irq = OCTEON_IRQ_SW0 + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) irq_set_chip_data(irq, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) handle_percpu_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static int next_cpu_for_irq(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct cpumask *mask = irq_data_get_affinity_mask(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) int weight = cpumask_weight(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (weight > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) cpu = cd->current_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) cpu = cpumask_next(cpu, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (cpu >= nr_cpu_ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) } else if (cpumask_test_cpu(cpu, cpu_online_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) } else if (weight == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) cpu = cpumask_first(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) cd->current_cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static void octeon_irq_ciu_enable(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) int cpu = next_cpu_for_irq(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) int coreid = octeon_coreid_for_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) unsigned long *pen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) raw_spin_lock_irqsave(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (cd->line == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) __set_bit(cd->bit, pen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * Must be visible to octeon_irq_ip{2,3}_ciu() before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * enabling the irq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) __set_bit(cd->bit, pen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * Must be visible to octeon_irq_ip{2,3}_ciu() before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * enabling the irq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) raw_spin_unlock_irqrestore(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static void octeon_irq_ciu_enable_local(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) unsigned long *pen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) raw_spin_lock_irqsave(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (cd->line == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) __set_bit(cd->bit, pen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * Must be visible to octeon_irq_ip{2,3}_ciu() before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * enabling the irq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) __set_bit(cd->bit, pen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * Must be visible to octeon_irq_ip{2,3}_ciu() before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * enabling the irq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) raw_spin_unlock_irqrestore(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) static void octeon_irq_ciu_disable_local(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) unsigned long *pen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) raw_spin_lock_irqsave(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (cd->line == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) __clear_bit(cd->bit, pen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * Must be visible to octeon_irq_ip{2,3}_ciu() before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * enabling the irq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) __clear_bit(cd->bit, pen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * Must be visible to octeon_irq_ip{2,3}_ciu() before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * enabling the irq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) raw_spin_unlock_irqrestore(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static void octeon_irq_ciu_disable_all(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) unsigned long *pen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) raw_spinlock_t *lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) int coreid = octeon_coreid_for_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (cd->line == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) raw_spin_lock_irqsave(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) __clear_bit(cd->bit, pen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * Must be visible to octeon_irq_ip{2,3}_ciu() before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * enabling the irq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (cd->line == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) raw_spin_unlock_irqrestore(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static void octeon_irq_ciu_enable_all(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) unsigned long *pen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) raw_spinlock_t *lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) int coreid = octeon_coreid_for_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (cd->line == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) raw_spin_lock_irqsave(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) __set_bit(cd->bit, pen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * Must be visible to octeon_irq_ip{2,3}_ciu() before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * enabling the irq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (cd->line == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) raw_spin_unlock_irqrestore(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * Enable the irq on the next core in the affinity set for chips that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * have the EN*_W1{S,C} registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static void octeon_irq_ciu_enable_v2(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) int cpu = next_cpu_for_irq(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) mask = 1ull << (cd->bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * Called under the desc lock, so these should never get out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * of sync.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (cd->line == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) int index = octeon_coreid_for_cpu(cpu) * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * Enable the irq in the sum2 registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) static void octeon_irq_ciu_enable_sum2(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) int cpu = next_cpu_for_irq(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) int index = octeon_coreid_for_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) mask = 1ull << (cd->bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * Disable the irq in the sum2 registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) static void octeon_irq_ciu_disable_local_sum2(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) int cpu = next_cpu_for_irq(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) int index = octeon_coreid_for_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) mask = 1ull << (cd->bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) static void octeon_irq_ciu_ack_sum2(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) int cpu = next_cpu_for_irq(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) int index = octeon_coreid_for_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) mask = 1ull << (cd->bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index), mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) static void octeon_irq_ciu_disable_all_sum2(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) mask = 1ull << (cd->bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) int coreid = octeon_coreid_for_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid), mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * Enable the irq on the current CPU for chips that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * have the EN*_W1{S,C} registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) mask = 1ull << (cd->bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (cd->line == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) int index = cvmx_get_core_num() * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) int index = cvmx_get_core_num() * 2 + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) mask = 1ull << (cd->bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (cd->line == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) int index = cvmx_get_core_num() * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) int index = cvmx_get_core_num() * 2 + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) static void octeon_irq_ciu_ack(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) mask = 1ull << (cd->bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (cd->line == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) int index = cvmx_get_core_num() * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) cvmx_write_csr(CVMX_CIU_INT_SUM1, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) static void octeon_irq_ciu_disable_all_v2(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) mask = 1ull << (cd->bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (cd->line == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) int index = octeon_coreid_for_cpu(cpu) * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) clear_bit(cd->bit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) clear_bit(cd->bit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * Enable the irq on the all cores for chips that have the EN*_W1{S,C}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) mask = 1ull << (cd->bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (cd->line == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) int index = octeon_coreid_for_cpu(cpu) * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) set_bit(cd->bit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) set_bit(cd->bit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) static int octeon_irq_ciu_set_type(struct irq_data *data, unsigned int t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) irqd_set_trigger_type(data, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (t & IRQ_TYPE_EDGE_BOTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) irq_set_handler_locked(data, handle_edge_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) irq_set_handler_locked(data, handle_level_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) return IRQ_SET_MASK_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) static void octeon_irq_gpio_setup(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) union cvmx_gpio_bit_cfgx cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) u32 t = irqd_get_trigger_type(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) cfg.u64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) cfg.s.int_en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) cfg.s.int_type = (t & IRQ_TYPE_EDGE_BOTH) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) cfg.s.rx_xor = (t & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) /* 140 nS glitch filter*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) cfg.s.fil_cnt = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) cfg.s.fil_sel = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), cfg.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) octeon_irq_gpio_setup(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) octeon_irq_ciu_enable_v2(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) static void octeon_irq_ciu_enable_gpio(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) octeon_irq_gpio_setup(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) octeon_irq_ciu_enable(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) irqd_set_trigger_type(data, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) octeon_irq_gpio_setup(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (t & IRQ_TYPE_EDGE_BOTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) irq_set_handler_locked(data, handle_edge_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) irq_set_handler_locked(data, handle_level_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) return IRQ_SET_MASK_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) octeon_irq_ciu_disable_all_v2(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) static void octeon_irq_ciu_disable_gpio(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) octeon_irq_ciu_disable_all(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) static void octeon_irq_ciu_gpio_ack(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) mask = 1ull << (cd->gpio_line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) cvmx_write_csr(CVMX_GPIO_INT_CLR, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) cpumask_t new_affinity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) struct cpumask *mask = irq_data_get_affinity_mask(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (!cpumask_test_cpu(cpu, mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (cpumask_weight(mask) > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * It has multi CPU affinity, just remove this CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * from the affinity set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) cpumask_copy(&new_affinity, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) cpumask_clear_cpu(cpu, &new_affinity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /* Otherwise, put it on lowest numbered online CPU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) cpumask_clear(&new_affinity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) irq_set_affinity_locked(data, &new_affinity, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) static int octeon_irq_ciu_set_affinity(struct irq_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) const struct cpumask *dest, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) unsigned long *pen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) raw_spinlock_t *lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * For non-v2 CIU, we will allow only single CPU affinity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * This removes the need to do locking in the .ack/.eoi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (cpumask_weight(dest) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (!enable_one)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) int coreid = octeon_coreid_for_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) raw_spin_lock_irqsave(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (cd->line == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (cpumask_test_cpu(cpu, dest) && enable_one) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) enable_one = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) __set_bit(cd->bit, pen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) __clear_bit(cd->bit, pen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * Must be visible to octeon_irq_ip{2,3}_ciu() before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * enabling the irq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (cd->line == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) raw_spin_unlock_irqrestore(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * Set affinity for the irq for chips that have the EN*_W1{S,C}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) const struct cpumask *dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (!enable_one)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) mask = 1ull << cd->bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (cd->line == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) int index = octeon_coreid_for_cpu(cpu) * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (cpumask_test_cpu(cpu, dest) && enable_one) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) enable_one = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) set_bit(cd->bit, pen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) clear_bit(cd->bit, pen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (cpumask_test_cpu(cpu, dest) && enable_one) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) enable_one = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) set_bit(cd->bit, pen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) clear_bit(cd->bit, pen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) static int octeon_irq_ciu_set_affinity_sum2(struct irq_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) const struct cpumask *dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (!enable_one)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) mask = 1ull << cd->bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) int index = octeon_coreid_for_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (cpumask_test_cpu(cpu, dest) && enable_one) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) enable_one = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) static unsigned int edge_startup(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /* ack any pending edge-irq at startup, so there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * an _edge_ to fire on when the event reappears.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) data->chip->irq_ack(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) data->chip->irq_enable(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * Newer octeon chips have support for lockless CIU operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) static struct irq_chip octeon_irq_chip_ciu_v2 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) .name = "CIU",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) .irq_enable = octeon_irq_ciu_enable_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) .irq_disable = octeon_irq_ciu_disable_all_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) .irq_mask = octeon_irq_ciu_disable_local_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) .irq_unmask = octeon_irq_ciu_enable_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) static struct irq_chip octeon_irq_chip_ciu_v2_edge = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) .name = "CIU",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) .irq_enable = octeon_irq_ciu_enable_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) .irq_disable = octeon_irq_ciu_disable_all_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) .irq_ack = octeon_irq_ciu_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) .irq_mask = octeon_irq_ciu_disable_local_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) .irq_unmask = octeon_irq_ciu_enable_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) * Newer octeon chips have support for lockless CIU operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) static struct irq_chip octeon_irq_chip_ciu_sum2 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) .name = "CIU",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) .irq_enable = octeon_irq_ciu_enable_sum2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) .irq_disable = octeon_irq_ciu_disable_all_sum2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) .irq_mask = octeon_irq_ciu_disable_local_sum2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) .irq_unmask = octeon_irq_ciu_enable_sum2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) static struct irq_chip octeon_irq_chip_ciu_sum2_edge = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) .name = "CIU",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) .irq_enable = octeon_irq_ciu_enable_sum2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) .irq_disable = octeon_irq_ciu_disable_all_sum2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) .irq_ack = octeon_irq_ciu_ack_sum2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) .irq_mask = octeon_irq_ciu_disable_local_sum2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) .irq_unmask = octeon_irq_ciu_enable_sum2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) static struct irq_chip octeon_irq_chip_ciu = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) .name = "CIU",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) .irq_enable = octeon_irq_ciu_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) .irq_disable = octeon_irq_ciu_disable_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) .irq_mask = octeon_irq_ciu_disable_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) .irq_unmask = octeon_irq_ciu_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) .irq_set_affinity = octeon_irq_ciu_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) static struct irq_chip octeon_irq_chip_ciu_edge = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) .name = "CIU",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) .irq_enable = octeon_irq_ciu_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) .irq_disable = octeon_irq_ciu_disable_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) .irq_ack = octeon_irq_ciu_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) .irq_mask = octeon_irq_ciu_disable_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) .irq_unmask = octeon_irq_ciu_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) .irq_set_affinity = octeon_irq_ciu_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) /* The mbox versions don't do any affinity or round-robin. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) .name = "CIU-M",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) .irq_enable = octeon_irq_ciu_enable_all_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) .irq_disable = octeon_irq_ciu_disable_all_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) .irq_ack = octeon_irq_ciu_disable_local_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) .irq_eoi = octeon_irq_ciu_enable_local_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) .irq_cpu_online = octeon_irq_ciu_enable_local_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) .irq_cpu_offline = octeon_irq_ciu_disable_local_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) .flags = IRQCHIP_ONOFFLINE_ENABLED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) static struct irq_chip octeon_irq_chip_ciu_mbox = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) .name = "CIU-M",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) .irq_enable = octeon_irq_ciu_enable_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) .irq_disable = octeon_irq_ciu_disable_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) .irq_ack = octeon_irq_ciu_disable_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) .irq_eoi = octeon_irq_ciu_enable_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) .irq_cpu_online = octeon_irq_ciu_enable_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) .irq_cpu_offline = octeon_irq_ciu_disable_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) .flags = IRQCHIP_ONOFFLINE_ENABLED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) .name = "CIU-GPIO",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) .irq_enable = octeon_irq_ciu_enable_gpio_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) .irq_disable = octeon_irq_ciu_disable_gpio_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) .irq_ack = octeon_irq_ciu_gpio_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) .irq_mask = octeon_irq_ciu_disable_local_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) .irq_unmask = octeon_irq_ciu_enable_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) .irq_set_type = octeon_irq_ciu_gpio_set_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) .flags = IRQCHIP_SET_TYPE_MASKED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) static struct irq_chip octeon_irq_chip_ciu_gpio = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) .name = "CIU-GPIO",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) .irq_enable = octeon_irq_ciu_enable_gpio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) .irq_disable = octeon_irq_ciu_disable_gpio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) .irq_mask = octeon_irq_ciu_disable_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) .irq_unmask = octeon_irq_ciu_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) .irq_ack = octeon_irq_ciu_gpio_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) .irq_set_type = octeon_irq_ciu_gpio_set_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) .irq_set_affinity = octeon_irq_ciu_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) .flags = IRQCHIP_SET_TYPE_MASKED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * Watchdog interrupts are special. They are associated with a single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * core, so we hardwire the affinity to that core.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) static void octeon_irq_ciu_wd_enable(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) unsigned long *pen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) int coreid = data->irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) int cpu = octeon_cpu_for_coreid(coreid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) raw_spin_lock_irqsave(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) __set_bit(coreid, pen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) * Must be visible to octeon_irq_ip{2,3}_ciu() before enabling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) * the irq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) raw_spin_unlock_irqrestore(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) * Watchdog interrupts are special. They are associated with a single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) * core, so we hardwire the affinity to that core.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) int coreid = data->irq - OCTEON_IRQ_WDOG0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) int cpu = octeon_cpu_for_coreid(coreid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) static struct irq_chip octeon_irq_chip_ciu_wd_v2 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) .name = "CIU-W",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) .irq_enable = octeon_irq_ciu1_wd_enable_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) .irq_disable = octeon_irq_ciu_disable_all_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) .irq_mask = octeon_irq_ciu_disable_local_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) .irq_unmask = octeon_irq_ciu_enable_local_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) static struct irq_chip octeon_irq_chip_ciu_wd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) .name = "CIU-W",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) .irq_enable = octeon_irq_ciu_wd_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) .irq_disable = octeon_irq_ciu_disable_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) .irq_mask = octeon_irq_ciu_disable_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) .irq_unmask = octeon_irq_ciu_enable_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) static bool octeon_irq_ciu_is_edge(unsigned int line, unsigned int bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) bool edge = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (line == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) switch (bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) case 48 ... 49: /* GMX DRP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) case 50: /* IPD_DRP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) case 52 ... 55: /* Timers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) case 58: /* MPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) edge = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) else /* line == 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) switch (bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) case 47: /* PTP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) edge = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) return edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) struct octeon_irq_gpio_domain_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) unsigned int base_hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) static int octeon_irq_gpio_xlat(struct irq_domain *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) struct device_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) const u32 *intspec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) unsigned int intsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) unsigned long *out_hwirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) unsigned int *out_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) unsigned int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) unsigned int pin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) unsigned int trigger;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) if (irq_domain_get_of_node(d) != node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (intsize < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) pin = intspec[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (pin >= 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) trigger = intspec[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) switch (trigger) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) type = IRQ_TYPE_EDGE_RISING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) type = IRQ_TYPE_EDGE_FALLING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) type = IRQ_TYPE_LEVEL_HIGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) type = IRQ_TYPE_LEVEL_LOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) pr_err("Error: (%pOFn) Invalid irq trigger specification: %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) trigger);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) type = IRQ_TYPE_LEVEL_LOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) *out_type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) *out_hwirq = pin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) static int octeon_irq_ciu_xlat(struct irq_domain *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) struct device_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) const u32 *intspec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) unsigned int intsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) unsigned long *out_hwirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) unsigned int *out_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) unsigned int ciu, bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) struct octeon_irq_ciu_domain_data *dd = d->host_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) ciu = intspec[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) bit = intspec[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (ciu >= dd->num_sum || bit > 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) *out_hwirq = (ciu << 6) | bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) *out_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) static struct irq_chip *octeon_irq_ciu_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) static struct irq_chip *octeon_irq_ciu_chip_edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) static struct irq_chip *octeon_irq_gpio_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) static int octeon_irq_ciu_map(struct irq_domain *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) unsigned int virq, irq_hw_number_t hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) unsigned int line = hw >> 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) unsigned int bit = hw & 63;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) struct octeon_irq_ciu_domain_data *dd = d->host_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (line == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) if (octeon_irq_ciu_is_edge(line, bit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) &octeon_irq_chip_ciu_sum2_edge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) handle_edge_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) &octeon_irq_chip_ciu_sum2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) handle_level_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) if (octeon_irq_ciu_is_edge(line, bit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) octeon_irq_ciu_chip_edge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) handle_edge_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) octeon_irq_ciu_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) handle_level_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) static int octeon_irq_gpio_map(struct irq_domain *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) unsigned int virq, irq_hw_number_t hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) struct octeon_irq_gpio_domain_data *gpiod = d->host_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) unsigned int line, bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) line = (hw + gpiod->base_hwirq) >> 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) bit = (hw + gpiod->base_hwirq) & 63;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) if (line >= ARRAY_SIZE(octeon_irq_ciu_to_irq) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) octeon_irq_ciu_to_irq[line][bit] != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) * Default to handle_level_irq. If the DT contains a different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) * trigger type, it will call the irq_set_type callback and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) * the handler gets updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) r = octeon_irq_set_ciu_mapping(virq, line, bit, hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) octeon_irq_gpio_chip, handle_level_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) static struct irq_domain_ops octeon_irq_domain_ciu_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) .map = octeon_irq_ciu_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) .unmap = octeon_irq_free_cd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) .xlate = octeon_irq_ciu_xlat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) static struct irq_domain_ops octeon_irq_domain_gpio_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) .map = octeon_irq_gpio_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) .unmap = octeon_irq_free_cd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) .xlate = octeon_irq_gpio_xlat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) static void octeon_irq_ip2_ciu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) const unsigned long core_id = cvmx_get_core_num();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) ciu_sum &= __this_cpu_read(octeon_irq_ciu0_en_mirror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (likely(ciu_sum)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) int bit = fls64(ciu_sum) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) int irq = octeon_irq_ciu_to_irq[0][bit];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) if (likely(irq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) do_IRQ(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) spurious_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) spurious_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) static void octeon_irq_ip3_ciu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) ciu_sum &= __this_cpu_read(octeon_irq_ciu1_en_mirror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) if (likely(ciu_sum)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) int bit = fls64(ciu_sum) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) int irq = octeon_irq_ciu_to_irq[1][bit];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) if (likely(irq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) do_IRQ(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) spurious_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) spurious_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) static void octeon_irq_ip4_ciu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) int coreid = cvmx_get_core_num();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) u64 ciu_sum = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) u64 ciu_en = cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) ciu_sum &= ciu_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) if (likely(ciu_sum)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) int bit = fls64(ciu_sum) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) int irq = octeon_irq_ciu_to_irq[2][bit];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) if (likely(irq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) do_IRQ(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) spurious_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) spurious_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) static bool octeon_irq_use_ip4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) static void octeon_irq_local_enable_ip4(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) set_c0_status(STATUSF_IP4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) static void octeon_irq_ip4_mask(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) clear_c0_status(STATUSF_IP4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) spurious_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) static void (*octeon_irq_ip2)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) static void (*octeon_irq_ip3)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) static void (*octeon_irq_ip4)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) void (*octeon_irq_setup_secondary)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) octeon_irq_ip4 = h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) octeon_irq_use_ip4 = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) static void octeon_irq_percpu_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) irq_cpu_online();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) static void octeon_irq_init_ciu_percpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) int coreid = cvmx_get_core_num();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) __this_cpu_write(octeon_irq_ciu0_en_mirror, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) __this_cpu_write(octeon_irq_ciu1_en_mirror, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) raw_spin_lock_init(this_cpu_ptr(&octeon_irq_ciu_spinlock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) * Disable All CIU Interrupts. The ones we need will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) * enabled later. Read the SUM register so we know the write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) * completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) static void octeon_irq_init_ciu2_percpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) u64 regx, ipx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) int coreid = cvmx_get_core_num();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) u64 base = CVMX_CIU2_EN_PPX_IP2_WRKQ(coreid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) * Disable All CIU2 Interrupts. The ones we need will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) * enabled later. Read the SUM register so we know the write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) * completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) * There are 9 registers and 3 IPX levels with strides 0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) * and 0x200 respectivly. Use loops to clear them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) for (regx = 0; regx <= 0x8000; regx += 0x1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) for (ipx = 0; ipx <= 0x400; ipx += 0x200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) cvmx_write_csr(base + regx + ipx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) static void octeon_irq_setup_secondary_ciu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) octeon_irq_init_ciu_percpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) octeon_irq_percpu_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) /* Enable the CIU lines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) set_c0_status(STATUSF_IP3 | STATUSF_IP2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if (octeon_irq_use_ip4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) set_c0_status(STATUSF_IP4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) clear_c0_status(STATUSF_IP4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) static void octeon_irq_setup_secondary_ciu2(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) octeon_irq_init_ciu2_percpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) octeon_irq_percpu_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) /* Enable the CIU lines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) set_c0_status(STATUSF_IP3 | STATUSF_IP2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) if (octeon_irq_use_ip4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) set_c0_status(STATUSF_IP4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) clear_c0_status(STATUSF_IP4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) static int __init octeon_irq_init_ciu(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) struct device_node *ciu_node, struct device_node *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) unsigned int i, r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) struct irq_chip *chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) struct irq_chip *chip_edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) struct irq_chip *chip_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) struct irq_chip *chip_wd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) struct irq_domain *ciu_domain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) struct octeon_irq_ciu_domain_data *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) dd = kzalloc(sizeof(*dd), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) if (!dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) octeon_irq_init_ciu_percpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) octeon_irq_ip2 = octeon_irq_ip2_ciu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) octeon_irq_ip3 = octeon_irq_ip3_ciu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) && !OCTEON_IS_MODEL(OCTEON_CN63XX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) octeon_irq_ip4 = octeon_irq_ip4_ciu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) dd->num_sum = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) octeon_irq_use_ip4 = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) octeon_irq_ip4 = octeon_irq_ip4_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) dd->num_sum = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) octeon_irq_use_ip4 = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) chip = &octeon_irq_chip_ciu_v2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) chip_edge = &octeon_irq_chip_ciu_v2_edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) chip_mbox = &octeon_irq_chip_ciu_mbox_v2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) chip_wd = &octeon_irq_chip_ciu_wd_v2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) chip = &octeon_irq_chip_ciu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) chip_edge = &octeon_irq_chip_ciu_edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) chip_mbox = &octeon_irq_chip_ciu_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) chip_wd = &octeon_irq_chip_ciu_wd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) octeon_irq_ciu_chip = chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) octeon_irq_ciu_chip_edge = chip_edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) /* Mips internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) octeon_irq_init_core();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) ciu_domain = irq_domain_add_tree(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) ciu_node, &octeon_irq_domain_ciu_ops, dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) irq_set_default_host(ciu_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) /* CIU_0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) for (i = 0; i < 16; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) r = octeon_irq_force_ciu_mapping(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) r = octeon_irq_set_ciu_mapping(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) r = octeon_irq_set_ciu_mapping(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) r = octeon_irq_force_ciu_mapping(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) r = octeon_irq_force_ciu_mapping(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) r = octeon_irq_force_ciu_mapping(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) /* CIU_1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) for (i = 0; i < 16; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) r = octeon_irq_set_ciu_mapping(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) handle_level_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) /* Enable the CIU lines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) set_c0_status(STATUSF_IP3 | STATUSF_IP2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) if (octeon_irq_use_ip4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) set_c0_status(STATUSF_IP4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) clear_c0_status(STATUSF_IP4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) static int __init octeon_irq_init_gpio(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) struct device_node *gpio_node, struct device_node *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) struct octeon_irq_gpio_domain_data *gpiod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) u32 interrupt_cells;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) unsigned int base_hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) r = of_property_read_u32(parent, "#interrupt-cells", &interrupt_cells);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) if (interrupt_cells == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) u32 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) pr_warn("No \"interrupts\" property.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) base_hwirq = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) } else if (interrupt_cells == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) u32 v0, v1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) pr_warn("No \"interrupts\" property.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) r = of_property_read_u32_index(gpio_node, "interrupts", 1, &v1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) pr_warn("No \"interrupts\" property.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) base_hwirq = (v0 << 6) | v1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) pr_warn("Bad \"#interrupt-cells\" property: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) interrupt_cells);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) if (gpiod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) /* gpio domain host_data is the base hwirq number. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) gpiod->base_hwirq = base_hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) irq_domain_add_linear(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) * Clear the OF_POPULATED flag that was set by of_irq_init()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) * so that all GPIO devices will be probed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) of_node_clear_flag(gpio_node, OF_POPULATED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) * Watchdog interrupts are special. They are associated with a single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) * core, so we hardwire the affinity to that core.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) static void octeon_irq_ciu2_wd_enable(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) u64 en_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) int coreid = data->irq - OCTEON_IRQ_WDOG0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) mask = 1ull << (cd->bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) (0x1000ull * cd->line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) cvmx_write_csr(en_addr, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) static void octeon_irq_ciu2_enable(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) u64 en_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) int cpu = next_cpu_for_irq(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) int coreid = octeon_coreid_for_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) mask = 1ull << (cd->bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) (0x1000ull * cd->line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) cvmx_write_csr(en_addr, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) static void octeon_irq_ciu2_enable_local(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) u64 en_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) int coreid = cvmx_get_core_num();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) mask = 1ull << (cd->bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) (0x1000ull * cd->line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) cvmx_write_csr(en_addr, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) static void octeon_irq_ciu2_disable_local(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) u64 en_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) int coreid = cvmx_get_core_num();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) mask = 1ull << (cd->bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) (0x1000ull * cd->line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) cvmx_write_csr(en_addr, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) static void octeon_irq_ciu2_ack(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) u64 en_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) int coreid = cvmx_get_core_num();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) mask = 1ull << (cd->bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd->line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) cvmx_write_csr(en_addr, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) static void octeon_irq_ciu2_disable_all(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) mask = 1ull << (cd->bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) cvmx_write_csr(en_addr, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) octeon_coreid_for_cpu(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) cvmx_write_csr(en_addr, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) octeon_coreid_for_cpu(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) cvmx_write_csr(en_addr, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) static void octeon_irq_ciu2_mbox_enable_local(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) u64 en_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) int coreid = cvmx_get_core_num();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(coreid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) cvmx_write_csr(en_addr, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) static void octeon_irq_ciu2_mbox_disable_local(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) u64 en_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) int coreid = cvmx_get_core_num();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(coreid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) cvmx_write_csr(en_addr, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) static int octeon_irq_ciu2_set_affinity(struct irq_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) const struct cpumask *dest, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) if (!enable_one)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) mask = 1ull << cd->bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) u64 en_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) if (cpumask_test_cpu(cpu, dest) && enable_one) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) enable_one = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) octeon_coreid_for_cpu(cpu)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) (0x1000ull * cd->line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) octeon_coreid_for_cpu(cpu)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) (0x1000ull * cd->line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) cvmx_write_csr(en_addr, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) static void octeon_irq_ciu2_enable_gpio(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) octeon_irq_gpio_setup(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) octeon_irq_ciu2_enable(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) static void octeon_irq_ciu2_disable_gpio(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) octeon_irq_ciu2_disable_all(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) static struct irq_chip octeon_irq_chip_ciu2 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) .name = "CIU2-E",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) .irq_enable = octeon_irq_ciu2_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) .irq_disable = octeon_irq_ciu2_disable_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) .irq_mask = octeon_irq_ciu2_disable_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) .irq_unmask = octeon_irq_ciu2_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) .irq_set_affinity = octeon_irq_ciu2_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) static struct irq_chip octeon_irq_chip_ciu2_edge = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) .name = "CIU2-E",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) .irq_enable = octeon_irq_ciu2_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) .irq_disable = octeon_irq_ciu2_disable_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) .irq_ack = octeon_irq_ciu2_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) .irq_mask = octeon_irq_ciu2_disable_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) .irq_unmask = octeon_irq_ciu2_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) .irq_set_affinity = octeon_irq_ciu2_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) static struct irq_chip octeon_irq_chip_ciu2_mbox = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) .name = "CIU2-M",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) .irq_enable = octeon_irq_ciu2_mbox_enable_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) .irq_disable = octeon_irq_ciu2_mbox_disable_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) .irq_ack = octeon_irq_ciu2_mbox_disable_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) .irq_eoi = octeon_irq_ciu2_mbox_enable_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) .irq_cpu_online = octeon_irq_ciu2_mbox_enable_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) .irq_cpu_offline = octeon_irq_ciu2_mbox_disable_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) .flags = IRQCHIP_ONOFFLINE_ENABLED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) static struct irq_chip octeon_irq_chip_ciu2_wd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) .name = "CIU2-W",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) .irq_enable = octeon_irq_ciu2_wd_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) .irq_disable = octeon_irq_ciu2_disable_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) .irq_mask = octeon_irq_ciu2_disable_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) .irq_unmask = octeon_irq_ciu2_enable_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) static struct irq_chip octeon_irq_chip_ciu2_gpio = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) .name = "CIU-GPIO",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) .irq_enable = octeon_irq_ciu2_enable_gpio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) .irq_disable = octeon_irq_ciu2_disable_gpio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) .irq_ack = octeon_irq_ciu_gpio_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) .irq_mask = octeon_irq_ciu2_disable_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) .irq_unmask = octeon_irq_ciu2_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) .irq_set_type = octeon_irq_ciu_gpio_set_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) .irq_set_affinity = octeon_irq_ciu2_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) .flags = IRQCHIP_SET_TYPE_MASKED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) static int octeon_irq_ciu2_xlat(struct irq_domain *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) struct device_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) const u32 *intspec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) unsigned int intsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) unsigned long *out_hwirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) unsigned int *out_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) unsigned int ciu, bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) ciu = intspec[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) bit = intspec[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) *out_hwirq = (ciu << 6) | bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) *out_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) bool edge = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) if (line == 3) /* MIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) switch (bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) case 2: /* IPD_DRP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) case 8 ... 11: /* Timers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) case 48: /* PTP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) edge = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) else if (line == 6) /* PKT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) switch (bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) case 52 ... 53: /* ILK_DRP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) case 8 ... 12: /* GMX_DRP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) edge = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) return edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) static int octeon_irq_ciu2_map(struct irq_domain *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) unsigned int virq, irq_hw_number_t hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) unsigned int line = hw >> 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) unsigned int bit = hw & 63;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) * Don't map irq if it is reserved for GPIO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) * (Line 7 are the GPIO lines.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) if (line == 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) if (line > 7 || octeon_irq_ciu_to_irq[line][bit] != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) if (octeon_irq_ciu2_is_edge(line, bit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) octeon_irq_set_ciu_mapping(virq, line, bit, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) &octeon_irq_chip_ciu2_edge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) handle_edge_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) octeon_irq_set_ciu_mapping(virq, line, bit, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) &octeon_irq_chip_ciu2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) handle_level_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) static struct irq_domain_ops octeon_irq_domain_ciu2_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) .map = octeon_irq_ciu2_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) .unmap = octeon_irq_free_cd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) .xlate = octeon_irq_ciu2_xlat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) static void octeon_irq_ciu2(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) int line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) u64 src_reg, src, sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) const unsigned long core_id = cvmx_get_core_num();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core_id)) & 0xfful;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) if (unlikely(!sum))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) goto spurious;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) line = fls64(sum) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) src_reg = CVMX_CIU2_SRC_PPX_IP2_WRKQ(core_id) + (0x1000 * line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) src = cvmx_read_csr(src_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) if (unlikely(!src))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) goto spurious;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) bit = fls64(src) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) irq = octeon_irq_ciu_to_irq[line][bit];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) if (unlikely(!irq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) goto spurious;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) do_IRQ(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) spurious:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) spurious_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) /* CN68XX pass 1.x has an errata that accessing the ACK registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) can stop interrupts from propagating */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) if (OCTEON_IS_MODEL(OCTEON_CN68XX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) static void octeon_irq_ciu2_mbox(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) int line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) const unsigned long core_id = cvmx_get_core_num();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) u64 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP3(core_id)) >> 60;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) if (unlikely(!sum))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) goto spurious;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) line = fls64(sum) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) do_IRQ(OCTEON_IRQ_MBOX0 + line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) spurious:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) spurious_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) /* CN68XX pass 1.x has an errata that accessing the ACK registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) can stop interrupts from propagating */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) if (OCTEON_IS_MODEL(OCTEON_CN68XX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP3(core_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) static int __init octeon_irq_init_ciu2(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) struct device_node *ciu_node, struct device_node *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) unsigned int i, r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) struct irq_domain *ciu_domain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) octeon_irq_init_ciu2_percpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) octeon_irq_gpio_chip = &octeon_irq_chip_ciu2_gpio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) octeon_irq_ip2 = octeon_irq_ciu2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) octeon_irq_ip3 = octeon_irq_ciu2_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) octeon_irq_ip4 = octeon_irq_ip4_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) /* Mips internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) octeon_irq_init_core();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) ciu_domain = irq_domain_add_tree(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) ciu_node, &octeon_irq_domain_ciu2_ops, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) irq_set_default_host(ciu_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) /* CUI2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) for (i = 0; i < 64; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) r = octeon_irq_force_ciu_mapping(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) for (i = 0; i < 32; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) r = octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) &octeon_irq_chip_ciu2_wd, handle_level_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) r = octeon_irq_force_ciu_mapping(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) r = octeon_irq_force_ciu_mapping(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) r = octeon_irq_force_ciu_mapping(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) irq_set_chip_and_handler(OCTEON_IRQ_MBOX2, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) irq_set_chip_and_handler(OCTEON_IRQ_MBOX3, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) /* Enable the CIU lines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) set_c0_status(STATUSF_IP3 | STATUSF_IP2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) clear_c0_status(STATUSF_IP4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) struct octeon_irq_cib_host_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) raw_spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) u64 raw_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) u64 en_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) int max_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) struct octeon_irq_cib_chip_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) struct octeon_irq_cib_host_data *host_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) static void octeon_irq_cib_enable(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) u64 en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) struct octeon_irq_cib_host_data *host_data = cd->host_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) raw_spin_lock_irqsave(&host_data->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) en = cvmx_read_csr(host_data->en_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) en |= 1ull << cd->bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) cvmx_write_csr(host_data->en_reg, en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) raw_spin_unlock_irqrestore(&host_data->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) static void octeon_irq_cib_disable(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) u64 en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) struct octeon_irq_cib_host_data *host_data = cd->host_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) raw_spin_lock_irqsave(&host_data->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) en = cvmx_read_csr(host_data->en_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) en &= ~(1ull << cd->bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) cvmx_write_csr(host_data->en_reg, en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) raw_spin_unlock_irqrestore(&host_data->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) static int octeon_irq_cib_set_type(struct irq_data *data, unsigned int t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) irqd_set_trigger_type(data, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) return IRQ_SET_MASK_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) static struct irq_chip octeon_irq_chip_cib = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) .name = "CIB",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) .irq_enable = octeon_irq_cib_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) .irq_disable = octeon_irq_cib_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) .irq_mask = octeon_irq_cib_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) .irq_unmask = octeon_irq_cib_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) .irq_set_type = octeon_irq_cib_set_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) static int octeon_irq_cib_xlat(struct irq_domain *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) struct device_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) const u32 *intspec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) unsigned int intsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) unsigned long *out_hwirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) unsigned int *out_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) unsigned int type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) if (intsize == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) type = intspec[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) case 0: /* unofficial value, but we might as well let it work. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) case 4: /* official value for level triggering. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) *out_type = IRQ_TYPE_LEVEL_HIGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) case 1: /* official value for edge triggering. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) *out_type = IRQ_TYPE_EDGE_RISING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) default: /* Nothing else is acceptable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) *out_hwirq = intspec[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) static int octeon_irq_cib_map(struct irq_domain *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) unsigned int virq, irq_hw_number_t hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) struct octeon_irq_cib_host_data *host_data = d->host_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) struct octeon_irq_cib_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) if (hw >= host_data->max_bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) pr_err("ERROR: %s mapping %u is too big!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) irq_domain_get_of_node(d)->name, (unsigned)hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) cd = kzalloc(sizeof(*cd), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) if (!cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) cd->host_data = host_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) cd->bit = hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) irq_set_chip_and_handler(virq, &octeon_irq_chip_cib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) handle_simple_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) irq_set_chip_data(virq, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) static struct irq_domain_ops octeon_irq_domain_cib_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) .map = octeon_irq_cib_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) .unmap = octeon_irq_free_cd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) .xlate = octeon_irq_cib_xlat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) /* Chain to real handler. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) u64 en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) u64 raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) u64 bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) struct irq_domain *cib_domain = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) struct octeon_irq_cib_host_data *host_data = cib_domain->host_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) en = cvmx_read_csr(host_data->en_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) raw = cvmx_read_csr(host_data->raw_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) bits = en & raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) for (i = 0; i < host_data->max_bits; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) if ((bits & 1ull << i) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) irq = irq_find_mapping(cib_domain, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) if (!irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) i, host_data->raw_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) raw_spin_lock_irqsave(&host_data->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) en = cvmx_read_csr(host_data->en_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) en &= ~(1ull << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) cvmx_write_csr(host_data->en_reg, en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) cvmx_write_csr(host_data->raw_reg, 1ull << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) raw_spin_unlock_irqrestore(&host_data->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) struct irq_desc *desc = irq_to_desc(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) struct irq_data *irq_data = irq_desc_get_irq_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) /* If edge, acknowledge the bit we will be sending. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) if (irqd_get_trigger_type(irq_data) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) IRQ_TYPE_EDGE_BOTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) cvmx_write_csr(host_data->raw_reg, 1ull << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) generic_handle_irq_desc(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) static int __init octeon_irq_init_cib(struct device_node *ciu_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) struct device_node *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) const __be32 *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) struct octeon_irq_cib_host_data *host_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) int parent_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) struct irq_domain *cib_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) parent_irq = irq_of_parse_and_map(ciu_node, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) if (!parent_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) pr_err("ERROR: Couldn't acquire parent_irq for %pOFn\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) ciu_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) if (!host_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) raw_spin_lock_init(&host_data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) addr = of_get_address(ciu_node, 0, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) if (!addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) pr_err("ERROR: Couldn't acquire reg(0) %pOFn\n", ciu_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) host_data->raw_reg = (u64)phys_to_virt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) of_translate_address(ciu_node, addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) addr = of_get_address(ciu_node, 1, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) if (!addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) pr_err("ERROR: Couldn't acquire reg(1) %pOFn\n", ciu_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) host_data->en_reg = (u64)phys_to_virt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) of_translate_address(ciu_node, addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) pr_err("ERROR: Couldn't read cavium,max-bits from %pOFn\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) ciu_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) host_data->max_bits = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) &octeon_irq_domain_cib_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) host_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) if (!cib_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) pr_err("ERROR: Couldn't irq_domain_add_linear()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) cvmx_write_csr(host_data->en_reg, 0); /* disable all IRQs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) cvmx_write_csr(host_data->raw_reg, ~0); /* ack any outstanding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) r = request_irq(parent_irq, octeon_irq_cib_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) IRQF_NO_THREAD, "cib", cib_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) pr_err("request_irq cib failed %d\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) pr_info("CIB interrupt controller probed: %llx %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) host_data->raw_reg, host_data->max_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) int octeon_irq_ciu3_xlat(struct irq_domain *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) struct device_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) const u32 *intspec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) unsigned int intsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) unsigned long *out_hwirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) unsigned int *out_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) struct octeon_ciu3_info *ciu3_info = d->host_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) unsigned int hwirq, type, intsn_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) union cvmx_ciu3_iscx_ctl isc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) if (intsize < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) hwirq = intspec[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) type = intspec[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) if (hwirq >= (1 << 20))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) intsn_major = hwirq >> 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) switch (intsn_major) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) case 0x04: /* Software handled separately. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) isc.u64 = cvmx_read_csr(ciu3_info->ciu3_addr + CIU3_ISC_CTL(hwirq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) if (!isc.s.imp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) case 4: /* official value for level triggering. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) *out_type = IRQ_TYPE_LEVEL_HIGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) case 0: /* unofficial value, but we might as well let it work. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) case 1: /* official value for edge triggering. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) *out_type = IRQ_TYPE_EDGE_RISING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) default: /* Nothing else is acceptable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) *out_hwirq = hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) void octeon_irq_ciu3_enable(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) union cvmx_ciu3_iscx_ctl isc_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) union cvmx_ciu3_iscx_w1c isc_w1c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) u64 isc_ctl_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) cpu = next_cpu_for_irq(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) isc_w1c.u64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) isc_w1c.s.en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) isc_ctl.u64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) isc_ctl.s.en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) cvmx_read_csr(isc_ctl_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) void octeon_irq_ciu3_disable(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) u64 isc_ctl_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) union cvmx_ciu3_iscx_w1c isc_w1c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) isc_w1c.u64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) isc_w1c.s.en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) cvmx_write_csr(isc_ctl_addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) cvmx_read_csr(isc_ctl_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) void octeon_irq_ciu3_ack(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) u64 isc_w1c_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) union cvmx_ciu3_iscx_w1c isc_w1c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) u32 trigger_type = irqd_get_trigger_type(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) * We use a single irq_chip, so we have to do nothing to ack a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) * level interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) if (!(trigger_type & IRQ_TYPE_EDGE_BOTH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) isc_w1c.u64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) isc_w1c.s.raw = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) cvmx_read_csr(isc_w1c_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) void octeon_irq_ciu3_mask(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) union cvmx_ciu3_iscx_w1c isc_w1c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) u64 isc_w1c_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) isc_w1c.u64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) isc_w1c.s.en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) cvmx_read_csr(isc_w1c_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) void octeon_irq_ciu3_mask_ack(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) union cvmx_ciu3_iscx_w1c isc_w1c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) u64 isc_w1c_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) struct octeon_ciu_chip_data *cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) u32 trigger_type = irqd_get_trigger_type(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) isc_w1c.u64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) isc_w1c.s.en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) * We use a single irq_chip, so only ack an edge (!level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) * interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) if (trigger_type & IRQ_TYPE_EDGE_BOTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) isc_w1c.s.raw = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) cvmx_read_csr(isc_w1c_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) static int octeon_irq_ciu3_set_affinity(struct irq_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) const struct cpumask *dest, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) union cvmx_ciu3_iscx_ctl isc_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) union cvmx_ciu3_iscx_w1c isc_w1c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) u64 isc_ctl_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) if (!cpumask_subset(dest, cpumask_of_node(cd->ciu_node)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) if (!enable_one)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) return IRQ_SET_MASK_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) cd = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) cpu = cpumask_first(dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) if (cpu >= nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) cd->current_cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) isc_w1c.u64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) isc_w1c.s.en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) isc_ctl.u64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) isc_ctl.s.en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) cvmx_read_csr(isc_ctl_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) return IRQ_SET_MASK_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) static struct irq_chip octeon_irq_chip_ciu3 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) .name = "CIU3",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) .irq_startup = edge_startup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) .irq_enable = octeon_irq_ciu3_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) .irq_disable = octeon_irq_ciu3_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) .irq_ack = octeon_irq_ciu3_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) .irq_mask = octeon_irq_ciu3_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) .irq_mask_ack = octeon_irq_ciu3_mask_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) .irq_unmask = octeon_irq_ciu3_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) .irq_set_type = octeon_irq_ciu_set_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) .irq_set_affinity = octeon_irq_ciu3_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) int octeon_irq_ciu3_mapx(struct irq_domain *d, unsigned int virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) irq_hw_number_t hw, struct irq_chip *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) struct octeon_ciu3_info *ciu3_info = d->host_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) struct octeon_ciu_chip_data *cd = kzalloc_node(sizeof(*cd), GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) ciu3_info->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) if (!cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) cd->intsn = hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) cd->current_cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) cd->ciu3_addr = ciu3_info->ciu3_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) cd->ciu_node = ciu3_info->node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) irq_set_chip_and_handler(virq, chip, handle_edge_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) irq_set_chip_data(virq, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) static int octeon_irq_ciu3_map(struct irq_domain *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) unsigned int virq, irq_hw_number_t hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) return octeon_irq_ciu3_mapx(d, virq, hw, &octeon_irq_chip_ciu3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) static struct irq_domain_ops octeon_dflt_domain_ciu3_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) .map = octeon_irq_ciu3_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) .unmap = octeon_irq_free_cd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) .xlate = octeon_irq_ciu3_xlat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) static void octeon_irq_ciu3_ip2(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) union cvmx_ciu3_destx_pp_int dest_pp_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) struct octeon_ciu3_info *ciu3_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) u64 ciu3_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) ciu3_info = __this_cpu_read(octeon_ciu3_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) ciu3_addr = ciu3_info->ciu3_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) dest_pp_int.u64 = cvmx_read_csr(ciu3_addr + CIU3_DEST_PP_INT(3 * cvmx_get_local_core_num()));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) if (likely(dest_pp_int.s.intr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) irq_hw_number_t intsn = dest_pp_int.s.intsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) irq_hw_number_t hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) struct irq_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) /* Get the domain to use from the major block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) int block = intsn >> 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) domain = ciu3_info->domain[block];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) if (ciu3_info->intsn2hw[block])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) hw = ciu3_info->intsn2hw[block](domain, intsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) hw = intsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) ret = handle_domain_irq(domain, hw, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) union cvmx_ciu3_iscx_w1c isc_w1c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) u64 isc_w1c_addr = ciu3_addr + CIU3_ISC_W1C(intsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) isc_w1c.u64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) isc_w1c.s.en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) cvmx_read_csr(isc_w1c_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) spurious_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) spurious_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) * 10 mbox per core starting from zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) * Base mbox is core * 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) static unsigned int octeon_irq_ciu3_base_mbox_intsn(int core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) /* SW (mbox) are 0x04 in bits 12..19 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) return 0x04000 + CIU3_MBOX_PER_CORE * core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) static unsigned int octeon_irq_ciu3_mbox_intsn_for_core(int core, unsigned int mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) return octeon_irq_ciu3_base_mbox_intsn(core) + mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) static unsigned int octeon_irq_ciu3_mbox_intsn_for_cpu(int cpu, unsigned int mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) int local_core = octeon_coreid_for_cpu(cpu) & 0x3f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) return octeon_irq_ciu3_mbox_intsn_for_core(local_core, mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) static void octeon_irq_ciu3_mbox(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) union cvmx_ciu3_destx_pp_int dest_pp_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) struct octeon_ciu3_info *ciu3_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) u64 ciu3_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) int core = cvmx_get_local_core_num();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) ciu3_info = __this_cpu_read(octeon_ciu3_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) ciu3_addr = ciu3_info->ciu3_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) dest_pp_int.u64 = cvmx_read_csr(ciu3_addr + CIU3_DEST_PP_INT(1 + 3 * core));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) if (likely(dest_pp_int.s.intr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) irq_hw_number_t intsn = dest_pp_int.s.intsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) int mbox = intsn - octeon_irq_ciu3_base_mbox_intsn(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) if (likely(mbox >= 0 && mbox < CIU3_MBOX_PER_CORE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) do_IRQ(mbox + OCTEON_IRQ_MBOX0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) union cvmx_ciu3_iscx_w1c isc_w1c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) u64 isc_w1c_addr = ciu3_addr + CIU3_ISC_W1C(intsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) isc_w1c.u64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) isc_w1c.s.en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) cvmx_read_csr(isc_w1c_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) spurious_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) spurious_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) void octeon_ciu3_mbox_send(int cpu, unsigned int mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) struct octeon_ciu3_info *ciu3_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) unsigned int intsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) union cvmx_ciu3_iscx_w1s isc_w1s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) u64 isc_w1s_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) if (WARN_ON_ONCE(mbox >= CIU3_MBOX_PER_CORE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) ciu3_info = per_cpu(octeon_ciu3_info, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) isc_w1s_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1S(intsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) isc_w1s.u64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) isc_w1s.s.raw = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) cvmx_write_csr(isc_w1s_addr, isc_w1s.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) cvmx_read_csr(isc_w1s_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) static void octeon_irq_ciu3_mbox_set_enable(struct irq_data *data, int cpu, bool en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) struct octeon_ciu3_info *ciu3_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) unsigned int intsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) u64 isc_ctl_addr, isc_w1c_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) union cvmx_ciu3_iscx_ctl isc_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) ciu3_info = per_cpu(octeon_ciu3_info, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) isc_w1c_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1C(intsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) isc_ctl_addr = ciu3_info->ciu3_addr + CIU3_ISC_CTL(intsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) isc_ctl.u64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) isc_ctl.s.en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) cvmx_write_csr(isc_w1c_addr, isc_ctl.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) cvmx_write_csr(isc_ctl_addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) if (en) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) unsigned int idt = per_cpu(octeon_irq_ciu3_idt_ip3, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) isc_ctl.u64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) isc_ctl.s.en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) isc_ctl.s.idt = idt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) cvmx_read_csr(isc_ctl_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) static void octeon_irq_ciu3_mbox_enable(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) WARN_ON(mbox >= CIU3_MBOX_PER_CORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) for_each_online_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) octeon_irq_ciu3_mbox_set_enable(data, cpu, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) static void octeon_irq_ciu3_mbox_disable(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) WARN_ON(mbox >= CIU3_MBOX_PER_CORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) for_each_online_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) octeon_irq_ciu3_mbox_set_enable(data, cpu, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) static void octeon_irq_ciu3_mbox_ack(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) struct octeon_ciu3_info *ciu3_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) unsigned int intsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) u64 isc_w1c_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) union cvmx_ciu3_iscx_w1c isc_w1c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) intsn = octeon_irq_ciu3_mbox_intsn_for_core(cvmx_get_local_core_num(), mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) isc_w1c.u64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) isc_w1c.s.raw = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) ciu3_info = __this_cpu_read(octeon_ciu3_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) isc_w1c_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1C(intsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) cvmx_read_csr(isc_w1c_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) static void octeon_irq_ciu3_mbox_cpu_online(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) octeon_irq_ciu3_mbox_set_enable(data, smp_processor_id(), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) static void octeon_irq_ciu3_mbox_cpu_offline(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) octeon_irq_ciu3_mbox_set_enable(data, smp_processor_id(), false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) static int octeon_irq_ciu3_alloc_resources(struct octeon_ciu3_info *ciu3_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) u64 b = ciu3_info->ciu3_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) int idt_ip2, idt_ip3, idt_ip4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) int unused_idt2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) int core = cvmx_get_local_core_num();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) __this_cpu_write(octeon_ciu3_info, ciu3_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) * 4 idt per core starting from 1 because zero is reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) * Base idt per core is 4 * core + 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) idt_ip2 = core * 4 + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) idt_ip3 = core * 4 + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) idt_ip4 = core * 4 + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) unused_idt2 = core * 4 + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) __this_cpu_write(octeon_irq_ciu3_idt_ip2, idt_ip2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) __this_cpu_write(octeon_irq_ciu3_idt_ip3, idt_ip3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) /* ip2 interrupts for this CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip2), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) cvmx_write_csr(b + CIU3_IDT_PP(idt_ip2, 0), 1ull << core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) cvmx_write_csr(b + CIU3_IDT_IO(idt_ip2), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) /* ip3 interrupts for this CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip3), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) cvmx_write_csr(b + CIU3_IDT_PP(idt_ip3, 0), 1ull << core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) cvmx_write_csr(b + CIU3_IDT_IO(idt_ip3), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) /* ip4 interrupts for this CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip4), 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) cvmx_write_csr(b + CIU3_IDT_PP(idt_ip4, 0), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) cvmx_write_csr(b + CIU3_IDT_IO(idt_ip4), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) cvmx_write_csr(b + CIU3_IDT_CTL(unused_idt2), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) cvmx_write_csr(b + CIU3_IDT_PP(unused_idt2, 0), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) cvmx_write_csr(b + CIU3_IDT_IO(unused_idt2), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) for (i = 0; i < CIU3_MBOX_PER_CORE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) unsigned int intsn = octeon_irq_ciu3_mbox_intsn_for_core(core, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) cvmx_write_csr(b + CIU3_ISC_W1C(intsn), 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) cvmx_write_csr(b + CIU3_ISC_CTL(intsn), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) static void octeon_irq_setup_secondary_ciu3(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) struct octeon_ciu3_info *ciu3_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) ciu3_info = octeon_ciu3_info_per_node[cvmx_get_node_num()];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) octeon_irq_ciu3_alloc_resources(ciu3_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) irq_cpu_online();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) /* Enable the CIU lines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) set_c0_status(STATUSF_IP3 | STATUSF_IP2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) if (octeon_irq_use_ip4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) set_c0_status(STATUSF_IP4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) clear_c0_status(STATUSF_IP4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) static struct irq_chip octeon_irq_chip_ciu3_mbox = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) .name = "CIU3-M",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) .irq_enable = octeon_irq_ciu3_mbox_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) .irq_disable = octeon_irq_ciu3_mbox_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) .irq_ack = octeon_irq_ciu3_mbox_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) .irq_cpu_online = octeon_irq_ciu3_mbox_cpu_online,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) .irq_cpu_offline = octeon_irq_ciu3_mbox_cpu_offline,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) .flags = IRQCHIP_ONOFFLINE_ENABLED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) static int __init octeon_irq_init_ciu3(struct device_node *ciu_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) struct device_node *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) int node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) struct irq_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) struct octeon_ciu3_info *ciu3_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) const __be32 *zero_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) u64 base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) union cvmx_ciu3_const consts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) node = 0; /* of_node_to_nid(ciu_node); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) ciu3_info = kzalloc_node(sizeof(*ciu3_info), GFP_KERNEL, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) if (!ciu3_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) zero_addr = of_get_address(ciu_node, 0, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) if (WARN_ON(!zero_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) base_addr = of_translate_address(ciu_node, zero_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) base_addr = (u64)phys_to_virt(base_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) ciu3_info->ciu3_addr = base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) ciu3_info->node = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) consts.u64 = cvmx_read_csr(base_addr + CIU3_CONST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) octeon_irq_ip2 = octeon_irq_ciu3_ip2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) octeon_irq_ip3 = octeon_irq_ciu3_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) octeon_irq_ip4 = octeon_irq_ip4_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) if (node == cvmx_get_node_num()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) /* Mips internal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) octeon_irq_init_core();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) /* Only do per CPU things if it is the CIU of the boot node. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) i = irq_alloc_descs_from(OCTEON_IRQ_MBOX0, 8, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) WARN_ON(i < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) for (i = 0; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) irq_set_chip_and_handler(i + OCTEON_IRQ_MBOX0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) &octeon_irq_chip_ciu3_mbox, handle_percpu_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) * Initialize all domains to use the default domain. Specific major
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) * blocks will overwrite the default domain as needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) domain = irq_domain_add_tree(ciu_node, &octeon_dflt_domain_ciu3_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) ciu3_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) for (i = 0; i < MAX_CIU3_DOMAINS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) ciu3_info->domain[i] = domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) octeon_ciu3_info_per_node[node] = ciu3_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) if (node == cvmx_get_node_num()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) /* Only do per CPU things if it is the CIU of the boot node. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) octeon_irq_ciu3_alloc_resources(ciu3_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) if (node == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) irq_set_default_host(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) octeon_irq_use_ip4 = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) /* Enable the CIU lines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) set_c0_status(STATUSF_IP2 | STATUSF_IP3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) clear_c0_status(STATUSF_IP4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) static struct of_device_id ciu_types[] __initdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) {.compatible = "cavium,octeon-3860-ciu", .data = octeon_irq_init_ciu},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) {.compatible = "cavium,octeon-3860-gpio", .data = octeon_irq_init_gpio},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) {.compatible = "cavium,octeon-6880-ciu2", .data = octeon_irq_init_ciu2},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) {.compatible = "cavium,octeon-7890-ciu3", .data = octeon_irq_init_ciu3},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) {.compatible = "cavium,octeon-7130-cib", .data = octeon_irq_init_cib},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) void __init arch_init_irq(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) /* Set the default affinity to the boot cpu. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) cpumask_clear(irq_default_affinity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) of_irq_init(ciu_types);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) asmlinkage void plat_irq_dispatch(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) unsigned long cop0_cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) unsigned long cop0_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) cop0_cause = read_c0_cause();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) cop0_status = read_c0_status();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) cop0_cause &= cop0_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) cop0_cause &= ST0_IM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) if (cop0_cause & STATUSF_IP2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) octeon_irq_ip2();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) else if (cop0_cause & STATUSF_IP3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) octeon_irq_ip3();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) else if (cop0_cause & STATUSF_IP4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) octeon_irq_ip4();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) else if (cop0_cause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) void octeon_fixup_irqs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) irq_cpu_offline();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) #endif /* CONFIG_HOTPLUG_CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) struct irq_domain *octeon_irq_get_block_domain(int node, uint8_t block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) struct octeon_ciu3_info *ciu3_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) ciu3_info = octeon_ciu3_info_per_node[node & CVMX_NODE_MASK];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) return ciu3_info->domain[block];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) EXPORT_SYMBOL(octeon_irq_get_block_domain);