Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *		http://www.samsung.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Combiner irqchip for EXYNOS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/syscore_ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/irqchip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/irqchip/chained_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #define COMBINER_ENABLE_SET	0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #define COMBINER_ENABLE_CLEAR	0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #define COMBINER_INT_STATUS	0xC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define IRQ_IN_COMBINER		8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) static DEFINE_SPINLOCK(irq_controller_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) struct combiner_chip_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	unsigned int hwirq_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	unsigned int irq_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	unsigned int parent_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	u32 pm_save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) static struct combiner_chip_data *combiner_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) static struct irq_domain *combiner_irq_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) static unsigned int max_nr = 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) static inline void __iomem *combiner_base(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	struct combiner_chip_data *combiner_data =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 		irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	return combiner_data->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) static void combiner_mask_irq(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	u32 mask = 1 << (data->hwirq % 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) static void combiner_unmask_irq(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	u32 mask = 1 << (data->hwirq % 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) static void combiner_handle_cascade_irq(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	struct irq_chip *chip = irq_desc_get_chip(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	unsigned int cascade_irq, combiner_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	unsigned long status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	chained_irq_enter(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	spin_lock(&irq_controller_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	status = readl_relaxed(chip_data->base + COMBINER_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	spin_unlock(&irq_controller_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	status &= chip_data->irq_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	if (status == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	combiner_irq = chip_data->hwirq_offset + __ffs(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	cascade_irq = irq_find_mapping(combiner_irq_domain, combiner_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	if (unlikely(!cascade_irq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		handle_bad_irq(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		generic_handle_irq(cascade_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	chained_irq_exit(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) static int combiner_set_affinity(struct irq_data *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 				 const struct cpumask *mask_val, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	struct combiner_chip_data *chip_data = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	struct irq_chip *chip = irq_get_chip(chip_data->parent_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	struct irq_data *data = irq_get_irq_data(chip_data->parent_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	if (chip && chip->irq_set_affinity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		return chip->irq_set_affinity(data, mask_val, force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static struct irq_chip combiner_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	.name			= "COMBINER",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	.irq_mask		= combiner_mask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	.irq_unmask		= combiner_unmask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	.irq_set_affinity	= combiner_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 					unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	irq_set_chained_handler_and_data(irq, combiner_handle_cascade_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 					 combiner_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static void __init combiner_init_one(struct combiner_chip_data *combiner_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 				     unsigned int combiner_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 				     void __iomem *base, unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	combiner_data->base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	combiner_data->hwirq_offset = (combiner_nr & ~3) * IRQ_IN_COMBINER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	combiner_data->irq_mask = 0xff << ((combiner_nr % 4) << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	combiner_data->parent_irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	/* Disable all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	writel_relaxed(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static int combiner_irq_domain_xlate(struct irq_domain *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 				     struct device_node *controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 				     const u32 *intspec, unsigned int intsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 				     unsigned long *out_hwirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 				     unsigned int *out_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	if (irq_domain_get_of_node(d) != controller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	if (intsize < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	*out_hwirq = intspec[0] * IRQ_IN_COMBINER + intspec[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	*out_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 				   irq_hw_number_t hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	struct combiner_chip_data *combiner_data = d->host_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	irq_set_chip_data(irq, &combiner_data[hw >> 3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	irq_set_probe(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static const struct irq_domain_ops combiner_irq_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	.xlate	= combiner_irq_domain_xlate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	.map	= combiner_irq_domain_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static void __init combiner_init(void __iomem *combiner_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 				 struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	int i, irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	unsigned int nr_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	nr_irq = max_nr * IRQ_IN_COMBINER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	if (!combiner_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		pr_warn("%s: could not allocate combiner data\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	combiner_irq_domain = irq_domain_add_linear(np, nr_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 				&combiner_irq_domain_ops, combiner_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	if (WARN_ON(!combiner_irq_domain)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		pr_warn("%s: irq domain init failed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	for (i = 0; i < max_nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		irq = irq_of_parse_and_map(np, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		combiner_init_one(&combiner_data[i], i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 				  combiner_base + (i >> 2) * 0x10, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		combiner_cascade_irq(&combiner_data[i], irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)  * combiner_suspend - save interrupt combiner state before suspend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)  * Save the interrupt enable set register for all combiner groups since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)  * the state is lost when the system enters into a sleep state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static int combiner_suspend(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	for (i = 0; i < max_nr; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		combiner_data[i].pm_save =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 			readl_relaxed(combiner_data[i].base + COMBINER_ENABLE_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)  * combiner_resume - restore interrupt combiner state after resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)  * Restore the interrupt enable set register for all combiner groups since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)  * the state is lost when the system enters into a sleep state on suspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static void combiner_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	for (i = 0; i < max_nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		writel_relaxed(combiner_data[i].irq_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 			     combiner_data[i].base + COMBINER_ENABLE_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		writel_relaxed(combiner_data[i].pm_save,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 			     combiner_data[i].base + COMBINER_ENABLE_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) #define combiner_suspend	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) #define combiner_resume		NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static struct syscore_ops combiner_syscore_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	.suspend	= combiner_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	.resume		= combiner_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static int __init combiner_of_init(struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 				   struct device_node *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	void __iomem *combiner_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	combiner_base = of_iomap(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	if (!combiner_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		pr_err("%s: failed to map combiner registers\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		pr_info("%s: number of combiners not specified, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 			"setting default as %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 			__func__, max_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	combiner_init(combiner_base, np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	register_syscore_ops(&combiner_syscore_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) IRQCHIP_DECLARE(exynos4210_combiner, "samsung,exynos4210-combiner",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		combiner_of_init);