Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3) // regmap based irq_chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5) // Copyright 2011 Wolfson Microelectronics plc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) // Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/regmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) struct regmap_irq_chip_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 	struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 	struct irq_chip irq_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 	struct regmap *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 	const struct regmap_irq_chip *chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 	int irq_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 	struct irq_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 	int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 	int wake_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 	void *status_reg_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 	unsigned int *main_status_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 	unsigned int *status_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	unsigned int *mask_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	unsigned int *mask_buf_def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	unsigned int *wake_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	unsigned int *type_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	unsigned int *type_buf_def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	unsigned int irq_reg_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	unsigned int type_reg_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	bool clear_status:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) static inline const
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 				     int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	return &data->chip->irqs[irq];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) static void regmap_irq_lock(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	mutex_lock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) static int regmap_irq_update_bits(struct regmap_irq_chip_data *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 				  unsigned int reg, unsigned int mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 				  unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	if (d->chip->mask_writeonly)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 		return regmap_write_bits(d->map, reg, mask, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 		return regmap_update_bits(d->map, reg, mask, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) static void regmap_irq_sync_unlock(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	struct regmap *map = d->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	u32 unmask_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	if (d->chip->runtime_pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 		ret = pm_runtime_get_sync(map->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 			dev_err(map->dev, "IRQ sync failed to resume: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 				ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	if (d->clear_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 		for (i = 0; i < d->chip->num_regs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 			reg = d->chip->status_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 				(i * map->reg_stride * d->irq_reg_stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 			ret = regmap_read(map, reg, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 				dev_err(d->map->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 					"Failed to clear the interrupt status bits\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 		d->clear_status = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	 * If there's been a change in the mask write it back to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	 * hardware.  We rely on the use of the regmap core cache to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	 * suppress pointless writes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	for (i = 0; i < d->chip->num_regs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 		if (!d->chip->mask_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		reg = d->chip->mask_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 			(i * map->reg_stride * d->irq_reg_stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		if (d->chip->mask_invert) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 			ret = regmap_irq_update_bits(d, reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 					 d->mask_buf_def[i], ~d->mask_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 		} else if (d->chip->unmask_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 			/* set mask with mask_base register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 			ret = regmap_irq_update_bits(d, reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 					d->mask_buf_def[i], ~d->mask_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 				dev_err(d->map->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 					"Failed to sync unmasks in %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 					reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 			unmask_offset = d->chip->unmask_base -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 							d->chip->mask_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 			/* clear mask with unmask_base register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 			ret = regmap_irq_update_bits(d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 					reg + unmask_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 					d->mask_buf_def[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 					d->mask_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 			ret = regmap_irq_update_bits(d, reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 					 d->mask_buf_def[i], d->mask_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 			dev_err(d->map->dev, "Failed to sync masks in %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 				reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 		reg = d->chip->wake_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 			(i * map->reg_stride * d->irq_reg_stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 		if (d->wake_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 			if (d->chip->wake_invert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 				ret = regmap_irq_update_bits(d, reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 							 d->mask_buf_def[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 							 ~d->wake_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 				ret = regmap_irq_update_bits(d, reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 							 d->mask_buf_def[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 							 d->wake_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 			if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 				dev_err(d->map->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 					"Failed to sync wakes in %x: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 					reg, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 		if (!d->chip->init_ack_masked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 		 * Ack all the masked interrupts unconditionally,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 		 * OR if there is masked interrupt which hasn't been Acked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 		 * it'll be ignored in irq handler, then may introduce irq storm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 		if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 			reg = d->chip->ack_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 				(i * map->reg_stride * d->irq_reg_stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 			/* some chips ack by write 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 			if (d->chip->ack_invert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 				ret = regmap_write(map, reg, ~d->mask_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 				ret = regmap_write(map, reg, d->mask_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 			if (d->chip->clear_ack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 				if (d->chip->ack_invert && !ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 					ret = regmap_write(map, reg, UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 				else if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 					ret = regmap_write(map, reg, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 			if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 				dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 					reg, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	/* Don't update the type bits if we're using mask bits for irq type. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	if (!d->chip->type_in_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		for (i = 0; i < d->chip->num_type_reg; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 			if (!d->type_buf_def[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 			reg = d->chip->type_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 				(i * map->reg_stride * d->type_reg_stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 			if (d->chip->type_invert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 				ret = regmap_irq_update_bits(d, reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 					d->type_buf_def[i], ~d->type_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 				ret = regmap_irq_update_bits(d, reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 					d->type_buf_def[i], d->type_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 			if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 				dev_err(d->map->dev, "Failed to sync type in %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 					reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	if (d->chip->runtime_pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		pm_runtime_put(map->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	/* If we've changed our wakeup count propagate it to the parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	if (d->wake_count < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		for (i = d->wake_count; i < 0; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 			irq_set_irq_wake(d->irq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	else if (d->wake_count > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		for (i = 0; i < d->wake_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 			irq_set_irq_wake(d->irq, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	d->wake_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	mutex_unlock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) static void regmap_irq_enable(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	struct regmap *map = d->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	unsigned int mask, type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	type = irq_data->type.type_falling_val | irq_data->type.type_rising_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	 * The type_in_mask flag means that the underlying hardware uses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	 * separate mask bits for rising and falling edge interrupts, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	 * we want to make them into a single virtual interrupt with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	 * configurable edge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	 * If the interrupt we're enabling defines the falling or rising
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	 * masks then instead of using the regular mask bits for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	 * interrupt, use the value previously written to the type buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	 * at the corresponding offset in regmap_irq_set_type().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	if (d->chip->type_in_mask && type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		mask = d->type_buf[irq_data->reg_offset / map->reg_stride];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		mask = irq_data->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	if (d->chip->clear_on_unmask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		d->clear_status = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) static void regmap_irq_disable(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	struct regmap *map = d->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	struct regmap *map = d->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	int reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	const struct regmap_irq_type *t = &irq_data->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	if ((t->types_supported & type) != type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	reg = t->type_reg_offset / map->reg_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	if (t->type_reg_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		d->type_buf[reg] &= ~t->type_reg_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		d->type_buf[reg] &= ~(t->type_falling_val |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 				      t->type_rising_val |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 				      t->type_level_low_val |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 				      t->type_level_high_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	case IRQ_TYPE_EDGE_FALLING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		d->type_buf[reg] |= t->type_falling_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	case IRQ_TYPE_EDGE_RISING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		d->type_buf[reg] |= t->type_rising_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	case IRQ_TYPE_EDGE_BOTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 		d->type_buf[reg] |= (t->type_falling_val |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 					t->type_rising_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	case IRQ_TYPE_LEVEL_HIGH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		d->type_buf[reg] |= t->type_level_high_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	case IRQ_TYPE_LEVEL_LOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		d->type_buf[reg] |= t->type_level_low_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	struct regmap *map = d->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	if (on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		if (d->wake_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 			d->wake_buf[irq_data->reg_offset / map->reg_stride]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 				&= ~irq_data->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 		d->wake_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		if (d->wake_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 			d->wake_buf[irq_data->reg_offset / map->reg_stride]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 				|= irq_data->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		d->wake_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) static const struct irq_chip regmap_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	.irq_bus_lock		= regmap_irq_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	.irq_bus_sync_unlock	= regmap_irq_sync_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	.irq_disable		= regmap_irq_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	.irq_enable		= regmap_irq_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	.irq_set_type		= regmap_irq_set_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	.irq_set_wake		= regmap_irq_set_wake,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 					   unsigned int b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	const struct regmap_irq_chip *chip = data->chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	struct regmap *map = data->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	struct regmap_irq_sub_irq_map *subreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	int i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	if (!chip->sub_reg_offsets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		/* Assume linear mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 		ret = regmap_read(map, chip->status_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 				  (b * map->reg_stride * data->irq_reg_stride),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 				   &data->status_buf[b]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		subreg = &chip->sub_reg_offsets[b];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		for (i = 0; i < subreg->num_regs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 			unsigned int offset = subreg->offset[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 			ret = regmap_read(map, chip->status_base + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 					  &data->status_buf[offset]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) static irqreturn_t regmap_irq_thread(int irq, void *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	struct regmap_irq_chip_data *data = d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	const struct regmap_irq_chip *chip = data->chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	struct regmap *map = data->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	bool handled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	if (chip->handle_pre_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		chip->handle_pre_irq(chip->irq_drv_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	if (chip->runtime_pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		ret = pm_runtime_get_sync(map->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 			dev_err(map->dev, "IRQ thread failed to resume: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 				ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 			goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	 * Read only registers with active IRQs if the chip has 'main status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	 * register'. Else read in the statuses, using a single bulk read if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	 * possible in order to reduce the I/O overheads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	if (chip->num_main_regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		unsigned int max_main_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		size = chip->num_regs * sizeof(unsigned int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		max_main_bits = (chip->num_main_status_bits) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 				 chip->num_main_status_bits : chip->num_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		/* Clear the status buf as we don't read all status regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		memset(data->status_buf, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		/* We could support bulk read for main status registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		 * but I don't expect to see devices with really many main
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		 * status registers so let's only support single reads for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		 * sake of simplicity. and add bulk reads only if needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		for (i = 0; i < chip->num_main_regs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 			ret = regmap_read(map, chip->main_status +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 				  (i * map->reg_stride
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 				   * data->irq_reg_stride),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 				  &data->main_status_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 			if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 				dev_err(map->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 					"Failed to read IRQ status %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 					ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 				goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		/* Read sub registers with active IRQs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		for (i = 0; i < chip->num_main_regs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 			unsigned int b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 			const unsigned long mreg = data->main_status_buf[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 			for_each_set_bit(b, &mreg, map->format.val_bytes * 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 				if (i * map->format.val_bytes * 8 + b >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 				    max_main_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 				ret = read_sub_irq_data(data, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 				if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 					dev_err(map->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 						"Failed to read IRQ status %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 						ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 					goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	} else if (!map->use_single_read && map->reg_stride == 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		   data->irq_reg_stride == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		u8 *buf8 = data->status_reg_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		u16 *buf16 = data->status_reg_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		u32 *buf32 = data->status_reg_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		BUG_ON(!data->status_reg_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		ret = regmap_bulk_read(map, chip->status_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 				       data->status_reg_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 				       chip->num_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 			dev_err(map->dev, "Failed to read IRQ status: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 				ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 			goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		for (i = 0; i < data->chip->num_regs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 			switch (map->format.val_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 			case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 				data->status_buf[i] = buf8[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 			case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 				data->status_buf[i] = buf16[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 			case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 				data->status_buf[i] = buf32[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 				BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 				goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		for (i = 0; i < data->chip->num_regs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 			ret = regmap_read(map, chip->status_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 					  (i * map->reg_stride
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 					   * data->irq_reg_stride),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 					  &data->status_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 			if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 				dev_err(map->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 					"Failed to read IRQ status: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 					ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 				goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	 * Ignore masked IRQs and ack if we need to; we ack early so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	 * there is no race between handling and acknowleding the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	 * interrupt.  We assume that typically few of the interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	 * will fire simultaneously so don't worry about overhead from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	 * doing a write per register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	for (i = 0; i < data->chip->num_regs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		data->status_buf[i] &= ~data->mask_buf[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 			reg = chip->ack_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 				(i * map->reg_stride * data->irq_reg_stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 			if (chip->ack_invert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 				ret = regmap_write(map, reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 						~data->status_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 				ret = regmap_write(map, reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 						data->status_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 			if (chip->clear_ack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 				if (chip->ack_invert && !ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 					ret = regmap_write(map, reg, UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 				else if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 					ret = regmap_write(map, reg, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 			if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 					reg, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	for (i = 0; i < chip->num_irqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		if (data->status_buf[chip->irqs[i].reg_offset /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 				     map->reg_stride] & chip->irqs[i].mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 			handle_nested_irq(irq_find_mapping(data->domain, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 			handled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	if (chip->runtime_pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		pm_runtime_put(map->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	if (chip->handle_post_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		chip->handle_post_irq(chip->irq_drv_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	if (handled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 			  irq_hw_number_t hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	struct regmap_irq_chip_data *data = h->host_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	irq_set_chip_data(virq, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	irq_set_chip(virq, &data->irq_chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	irq_set_nested_thread(virq, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	irq_set_parent(virq, data->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	irq_set_noprobe(virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) static const struct irq_domain_ops regmap_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	.map	= regmap_irq_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	.xlate	= irq_domain_xlate_onetwocell,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561)  * regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563)  * @fwnode: The firmware node where the IRQ domain should be added to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564)  * @map: The regmap for the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565)  * @irq: The IRQ the device uses to signal interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566)  * @irq_flags: The IRQF_ flags to use for the primary interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567)  * @irq_base: Allocate at specific IRQ number if irq_base > 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568)  * @chip: Configuration for the interrupt controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569)  * @data: Runtime data structure for the controller, allocated on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571)  * Returns 0 on success or an errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573)  * In order for this to be efficient the chip really should use a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574)  * register cache.  The chip driver is responsible for restoring the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575)  * register values used by the IRQ controller over suspend and resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 			       struct regmap *map, int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 			       int irq_flags, int irq_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 			       const struct regmap_irq_chip *chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 			       struct regmap_irq_chip_data **data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	struct regmap_irq_chip_data *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	int num_type_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	u32 unmask_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	if (chip->num_regs <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	for (i = 0; i < chip->num_irqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		if (chip->irqs[i].reg_offset % map->reg_stride)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		if (chip->irqs[i].reg_offset / map->reg_stride >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		    chip->num_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	if (irq_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		if (irq_base < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 			dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 				 irq_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 			return irq_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	d = kzalloc(sizeof(*d), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	if (chip->num_main_regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		d->main_status_buf = kcalloc(chip->num_main_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 					     sizeof(unsigned int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 					     GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		if (!d->main_status_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 			goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 				GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	if (!d->status_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 			      GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	if (!d->mask_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 				  GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	if (!d->mask_buf_def)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	if (chip->wake_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 				      GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		if (!d->wake_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 			goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	num_type_reg = chip->type_in_mask ? chip->num_regs : chip->num_type_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	if (num_type_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		d->type_buf_def = kcalloc(num_type_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 					  sizeof(unsigned int), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		if (!d->type_buf_def)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 			goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		d->type_buf = kcalloc(num_type_reg, sizeof(unsigned int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 				      GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		if (!d->type_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 			goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	d->irq_chip = regmap_irq_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	d->irq_chip.name = chip->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	d->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	d->map = map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	d->chip = chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	d->irq_base = irq_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	if (chip->irq_reg_stride)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		d->irq_reg_stride = chip->irq_reg_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		d->irq_reg_stride = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	if (chip->type_reg_stride)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		d->type_reg_stride = chip->type_reg_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		d->type_reg_stride = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	if (!map->use_single_read && map->reg_stride == 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	    d->irq_reg_stride == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		d->status_reg_buf = kmalloc_array(chip->num_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 						  map->format.val_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 						  GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		if (!d->status_reg_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 			goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	mutex_init(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	for (i = 0; i < chip->num_irqs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 			|= chip->irqs[i].mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	/* Mask all the interrupts by default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	for (i = 0; i < chip->num_regs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		d->mask_buf[i] = d->mask_buf_def[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		if (!chip->mask_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		reg = chip->mask_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 			(i * map->reg_stride * d->irq_reg_stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		if (chip->mask_invert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 			ret = regmap_irq_update_bits(d, reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 					 d->mask_buf[i], ~d->mask_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		else if (d->chip->unmask_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 			unmask_offset = d->chip->unmask_base -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 					d->chip->mask_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 			ret = regmap_irq_update_bits(d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 					reg + unmask_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 					d->mask_buf[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 					d->mask_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 			ret = regmap_irq_update_bits(d, reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 					 d->mask_buf[i], d->mask_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 			dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 				reg, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 			goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		if (!chip->init_ack_masked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		/* Ack masked but set interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		reg = chip->status_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 			(i * map->reg_stride * d->irq_reg_stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		ret = regmap_read(map, reg, &d->status_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 			dev_err(map->dev, "Failed to read IRQ status: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 				ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 			goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 			reg = chip->ack_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 				(i * map->reg_stride * d->irq_reg_stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 			if (chip->ack_invert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 				ret = regmap_write(map, reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 					~(d->status_buf[i] & d->mask_buf[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 				ret = regmap_write(map, reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 					d->status_buf[i] & d->mask_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 			if (chip->clear_ack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 				if (chip->ack_invert && !ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 					ret = regmap_write(map, reg, UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 				else if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 					ret = regmap_write(map, reg, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 			if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 					reg, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 				goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	/* Wake is disabled by default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	if (d->wake_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		for (i = 0; i < chip->num_regs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 			d->wake_buf[i] = d->mask_buf_def[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 			reg = chip->wake_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 				(i * map->reg_stride * d->irq_reg_stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 			if (chip->wake_invert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 				ret = regmap_irq_update_bits(d, reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 							 d->mask_buf_def[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 							 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 				ret = regmap_irq_update_bits(d, reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 							 d->mask_buf_def[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 							 d->wake_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 			if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 				dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 					reg, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 				goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	if (chip->num_type_reg && !chip->type_in_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		for (i = 0; i < chip->num_type_reg; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 			reg = chip->type_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 				(i * map->reg_stride * d->type_reg_stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 			ret = regmap_read(map, reg, &d->type_buf_def[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 			if (d->chip->type_invert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 				d->type_buf_def[i] = ~d->type_buf_def[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 			if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 				dev_err(map->dev, "Failed to get type defaults at 0x%x: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 					reg, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 				goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	if (irq_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		d->domain = irq_domain_add_legacy(to_of_node(fwnode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 						  chip->num_irqs, irq_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 						  0, &regmap_domain_ops, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		d->domain = irq_domain_add_linear(to_of_node(fwnode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 						  chip->num_irqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 						  &regmap_domain_ops, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	if (!d->domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		dev_err(map->dev, "Failed to create IRQ domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 				   irq_flags | IRQF_ONESHOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 				   chip->name, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 			irq, chip->name, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		goto err_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	*data = d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) err_domain:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	/* Should really dispose of the domain but... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) err_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	kfree(d->type_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	kfree(d->type_buf_def);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	kfree(d->wake_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	kfree(d->mask_buf_def);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	kfree(d->mask_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	kfree(d->status_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	kfree(d->status_reg_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	kfree(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) EXPORT_SYMBOL_GPL(regmap_add_irq_chip_fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840)  * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842)  * @map: The regmap for the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843)  * @irq: The IRQ the device uses to signal interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844)  * @irq_flags: The IRQF_ flags to use for the primary interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845)  * @irq_base: Allocate at specific IRQ number if irq_base > 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846)  * @chip: Configuration for the interrupt controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847)  * @data: Runtime data structure for the controller, allocated on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849)  * Returns 0 on success or an errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851)  * This is the same as regmap_add_irq_chip_fwnode, except that the firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852)  * node of the regmap is used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 			int irq_base, const struct regmap_irq_chip *chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 			struct regmap_irq_chip_data **data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	return regmap_add_irq_chip_fwnode(dev_fwnode(map->dev), map, irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 					  irq_flags, irq_base, chip, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864)  * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866)  * @irq: Primary IRQ for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867)  * @d: &regmap_irq_chip_data allocated by regmap_add_irq_chip()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869)  * This function also disposes of all mapped IRQs on the chip.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	unsigned int virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	int hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	free_irq(irq, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	/* Dispose all virtual irq from irq domain before removing it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		/* Ignore hwirq if holes in the IRQ list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		if (!d->chip->irqs[hwirq].mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		 * Find the virtual irq of hwirq on chip and if it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		 * there then dispose it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		virq = irq_find_mapping(d->domain, hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		if (virq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 			irq_dispose_mapping(virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	irq_domain_remove(d->domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	kfree(d->type_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	kfree(d->type_buf_def);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	kfree(d->wake_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	kfree(d->mask_buf_def);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	kfree(d->mask_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	kfree(d->status_reg_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	kfree(d->status_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	kfree(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) static void devm_regmap_irq_chip_release(struct device *dev, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	regmap_del_irq_chip(d->irq, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	struct regmap_irq_chip_data **r = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	if (!r || !*r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		WARN_ON(!r || !*r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	return *r == data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928)  * devm_regmap_add_irq_chip_fwnode() - Resource managed regmap_add_irq_chip_fwnode()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930)  * @dev: The device pointer on which irq_chip belongs to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931)  * @fwnode: The firmware node where the IRQ domain should be added to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932)  * @map: The regmap for the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933)  * @irq: The IRQ the device uses to signal interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934)  * @irq_flags: The IRQF_ flags to use for the primary interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935)  * @irq_base: Allocate at specific IRQ number if irq_base > 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936)  * @chip: Configuration for the interrupt controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937)  * @data: Runtime data structure for the controller, allocated on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939)  * Returns 0 on success or an errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941)  * The &regmap_irq_chip_data will be automatically released when the device is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942)  * unbound.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) int devm_regmap_add_irq_chip_fwnode(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 				    struct fwnode_handle *fwnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 				    struct regmap *map, int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 				    int irq_flags, int irq_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 				    const struct regmap_irq_chip *chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 				    struct regmap_irq_chip_data **data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	struct regmap_irq_chip_data **ptr, *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 			   GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	if (!ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	ret = regmap_add_irq_chip_fwnode(fwnode, map, irq, irq_flags, irq_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 					 chip, &d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		devres_free(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	*ptr = d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	devres_add(dev, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	*data = d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974)  * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976)  * @dev: The device pointer on which irq_chip belongs to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977)  * @map: The regmap for the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978)  * @irq: The IRQ the device uses to signal interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979)  * @irq_flags: The IRQF_ flags to use for the primary interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980)  * @irq_base: Allocate at specific IRQ number if irq_base > 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981)  * @chip: Configuration for the interrupt controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982)  * @data: Runtime data structure for the controller, allocated on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984)  * Returns 0 on success or an errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986)  * The &regmap_irq_chip_data will be automatically released when the device is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987)  * unbound.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 			     int irq_flags, int irq_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 			     const struct regmap_irq_chip *chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 			     struct regmap_irq_chip_data **data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(map->dev), map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 					       irq, irq_flags, irq_base, chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 					       data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)  * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)  * @dev: Device for which which resource was allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)  * @irq: Primary IRQ for the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)  * @data: &regmap_irq_chip_data allocated by regmap_add_irq_chip().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)  * A resource managed version of regmap_del_irq_chip().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) void devm_regmap_del_irq_chip(struct device *dev, int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 			      struct regmap_irq_chip_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	WARN_ON(irq != data->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	rc = devres_release(dev, devm_regmap_irq_chip_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 			    devm_regmap_irq_chip_match, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		WARN_ON(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)  * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)  * @data: regmap irq controller to operate on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)  * Useful for drivers to request their own IRQs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	WARN_ON(!data->irq_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	return data->irq_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)  * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)  * @data: regmap irq controller to operate on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)  * @irq: index of the interrupt requested in the chip IRQs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)  * Useful for drivers to request their own IRQs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	/* Handle holes in the IRQ list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	if (!data->chip->irqs[irq].mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	return irq_create_mapping(data->domain, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)  * regmap_irq_get_domain() - Retrieve the irq_domain for the chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)  * @data: regmap_irq controller to operate on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)  * Useful for drivers to request their own IRQs and for integration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)  * with subsystems.  For ease of integration NULL is accepted as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)  * domain, allowing devices to just call this even if no domain is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)  * allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	if (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		return data->domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) EXPORT_SYMBOL_GPL(regmap_irq_get_domain);