Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3) // Register map access API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5) // Copyright 2011 Wolfson Microelectronics plc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) // Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/property.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/rbtree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/hwspinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  * Sometimes for failures during very early init the trace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  * infrastructure isn't available early enough to be used.  For this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  * sort of problem defining LOG_DEVICE will add printks for basic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  * register I/O on a specific device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #undef LOG_DEVICE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #ifdef LOG_DEVICE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) static inline bool regmap_should_log(struct regmap *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) static inline bool regmap_should_log(struct regmap *map) { return false; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) static int _regmap_update_bits(struct regmap *map, unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 			       unsigned int mask, unsigned int val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 			       bool *change, bool force_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) static int _regmap_bus_reg_read(void *context, unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 				unsigned int *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) static int _regmap_bus_read(void *context, unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 			    unsigned int *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) static int _regmap_bus_formatted_write(void *context, unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 				       unsigned int val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) static int _regmap_bus_reg_write(void *context, unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 				 unsigned int val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) static int _regmap_bus_raw_write(void *context, unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 				 unsigned int val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) bool regmap_reg_in_ranges(unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 			  const struct regmap_range *ranges,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 			  unsigned int nranges)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	const struct regmap_range *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	for (i = 0, r = ranges; i < nranges; i++, r++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 		if (regmap_reg_in_range(reg, r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) bool regmap_check_range_table(struct regmap *map, unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 			      const struct regmap_access_table *table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	/* Check "no ranges" first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	/* In case zero "yes ranges" are supplied, any reg is OK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	if (!table->n_yes_ranges)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	return regmap_reg_in_ranges(reg, table->yes_ranges,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 				    table->n_yes_ranges);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) EXPORT_SYMBOL_GPL(regmap_check_range_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) bool regmap_writeable(struct regmap *map, unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	if (map->max_register && reg > map->max_register)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	if (map->writeable_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 		return map->writeable_reg(map->dev, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	if (map->wr_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 		return regmap_check_range_table(map, reg, map->wr_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) bool regmap_cached(struct regmap *map, unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	if (map->cache_type == REGCACHE_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	if (!map->cache_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	if (map->max_register && reg > map->max_register)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	map->lock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	ret = regcache_read(map, reg, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	map->unlock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) bool regmap_readable(struct regmap *map, unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	if (!map->reg_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	if (map->max_register && reg > map->max_register)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	if (map->format.format_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	if (map->readable_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 		return map->readable_reg(map->dev, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	if (map->rd_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		return regmap_check_range_table(map, reg, map->rd_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) bool regmap_volatile(struct regmap *map, unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	if (!map->format.format_write && !regmap_readable(map, reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	if (map->volatile_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 		return map->volatile_reg(map->dev, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	if (map->volatile_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 		return regmap_check_range_table(map, reg, map->volatile_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	if (map->cache_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) bool regmap_precious(struct regmap *map, unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	if (!regmap_readable(map, reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	if (map->precious_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 		return map->precious_reg(map->dev, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	if (map->precious_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		return regmap_check_range_table(map, reg, map->precious_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) bool regmap_writeable_noinc(struct regmap *map, unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	if (map->writeable_noinc_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 		return map->writeable_noinc_reg(map->dev, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	if (map->wr_noinc_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		return regmap_check_range_table(map, reg, map->wr_noinc_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) bool regmap_readable_noinc(struct regmap *map, unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	if (map->readable_noinc_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		return map->readable_noinc_reg(map->dev, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	if (map->rd_noinc_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 		return regmap_check_range_table(map, reg, map->rd_noinc_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	size_t num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	for (i = 0; i < num; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		if (!regmap_volatile(map, reg + regmap_get_offset(map, i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) static void regmap_format_12_20_write(struct regmap *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 				     unsigned int reg, unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	u8 *out = map->work_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	out[0] = reg >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	out[1] = (reg << 4) | (val >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	out[2] = val >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	out[3] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) static void regmap_format_2_6_write(struct regmap *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 				     unsigned int reg, unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	u8 *out = map->work_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	*out = (reg << 6) | val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) static void regmap_format_4_12_write(struct regmap *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 				     unsigned int reg, unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	__be16 *out = map->work_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	*out = cpu_to_be16((reg << 12) | val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) static void regmap_format_7_9_write(struct regmap *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 				    unsigned int reg, unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	__be16 *out = map->work_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	*out = cpu_to_be16((reg << 9) | val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) static void regmap_format_10_14_write(struct regmap *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 				    unsigned int reg, unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	u8 *out = map->work_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	out[2] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	out[1] = (val >> 8) | (reg << 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	out[0] = reg >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	u8 *b = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	b[0] = val << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	put_unaligned_be16(val << shift, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	put_unaligned_le16(val << shift, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) static void regmap_format_16_native(void *buf, unsigned int val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 				    unsigned int shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	u16 v = val << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	memcpy(buf, &v, sizeof(v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	u8 *b = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	val <<= shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	b[0] = val >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	b[1] = val >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	b[2] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	put_unaligned_be32(val << shift, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	put_unaligned_le32(val << shift, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) static void regmap_format_32_native(void *buf, unsigned int val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 				    unsigned int shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	u32 v = val << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	memcpy(buf, &v, sizeof(v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	put_unaligned_be64((u64) val << shift, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	put_unaligned_le64((u64) val << shift, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) static void regmap_format_64_native(void *buf, unsigned int val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 				    unsigned int shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	u64 v = (u64) val << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	memcpy(buf, &v, sizeof(v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) static void regmap_parse_inplace_noop(void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) static unsigned int regmap_parse_8(const void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	const u8 *b = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	return b[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) static unsigned int regmap_parse_16_be(const void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	return get_unaligned_be16(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) static unsigned int regmap_parse_16_le(const void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	return get_unaligned_le16(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) static void regmap_parse_16_be_inplace(void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	u16 v = get_unaligned_be16(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	memcpy(buf, &v, sizeof(v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) static void regmap_parse_16_le_inplace(void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	u16 v = get_unaligned_le16(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	memcpy(buf, &v, sizeof(v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) static unsigned int regmap_parse_16_native(const void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	u16 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	memcpy(&v, buf, sizeof(v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	return v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) static unsigned int regmap_parse_24(const void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	const u8 *b = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	unsigned int ret = b[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	ret |= ((unsigned int)b[1]) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	ret |= ((unsigned int)b[0]) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) static unsigned int regmap_parse_32_be(const void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	return get_unaligned_be32(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) static unsigned int regmap_parse_32_le(const void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	return get_unaligned_le32(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) static void regmap_parse_32_be_inplace(void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	u32 v = get_unaligned_be32(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	memcpy(buf, &v, sizeof(v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) static void regmap_parse_32_le_inplace(void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	u32 v = get_unaligned_le32(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	memcpy(buf, &v, sizeof(v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) static unsigned int regmap_parse_32_native(const void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	u32 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	memcpy(&v, buf, sizeof(v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	return v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) static unsigned int regmap_parse_64_be(const void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	return get_unaligned_be64(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) static unsigned int regmap_parse_64_le(const void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	return get_unaligned_le64(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) static void regmap_parse_64_be_inplace(void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	u64 v =  get_unaligned_be64(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	memcpy(buf, &v, sizeof(v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) static void regmap_parse_64_le_inplace(void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	u64 v = get_unaligned_le64(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	memcpy(buf, &v, sizeof(v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) static unsigned int regmap_parse_64_native(const void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	u64 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	memcpy(&v, buf, sizeof(v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	return v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) static void regmap_lock_hwlock(void *__map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	struct regmap *map = __map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	hwspin_lock_timeout(map->hwlock, UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) static void regmap_lock_hwlock_irq(void *__map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	struct regmap *map = __map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) static void regmap_lock_hwlock_irqsave(void *__map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	struct regmap *map = __map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 				    &map->spinlock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) static void regmap_unlock_hwlock(void *__map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	struct regmap *map = __map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	hwspin_unlock(map->hwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) static void regmap_unlock_hwlock_irq(void *__map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	struct regmap *map = __map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	hwspin_unlock_irq(map->hwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) static void regmap_unlock_hwlock_irqrestore(void *__map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	struct regmap *map = __map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) static void regmap_lock_unlock_none(void *__map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) static void regmap_lock_mutex(void *__map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	struct regmap *map = __map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	mutex_lock(&map->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) static void regmap_unlock_mutex(void *__map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	struct regmap *map = __map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	mutex_unlock(&map->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) static void regmap_lock_spinlock(void *__map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) __acquires(&map->spinlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	struct regmap *map = __map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	spin_lock_irqsave(&map->spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	map->spinlock_flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) static void regmap_unlock_spinlock(void *__map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) __releases(&map->spinlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	struct regmap *map = __map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) static void dev_get_regmap_release(struct device *dev, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	 * We don't actually have anything to do here; the goal here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	 * is not to manage the regmap but to provide a simple way to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	 * get the regmap back given a struct device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) static bool _regmap_range_add(struct regmap *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 			      struct regmap_range_node *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	struct rb_root *root = &map->range_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	struct rb_node **new = &(root->rb_node), *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	while (*new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		struct regmap_range_node *this =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 			rb_entry(*new, struct regmap_range_node, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		parent = *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		if (data->range_max < this->range_min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 			new = &((*new)->rb_left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		else if (data->range_min > this->range_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 			new = &((*new)->rb_right);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	rb_link_node(&data->node, parent, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	rb_insert_color(&data->node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 						      unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	struct rb_node *node = map->range_tree.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	while (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		struct regmap_range_node *this =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 			rb_entry(node, struct regmap_range_node, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		if (reg < this->range_min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 			node = node->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		else if (reg > this->range_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 			node = node->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 			return this;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) static void regmap_range_exit(struct regmap *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	struct rb_node *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	struct regmap_range_node *range_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	next = rb_first(&map->range_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	while (next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		range_node = rb_entry(next, struct regmap_range_node, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		next = rb_next(&range_node->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		rb_erase(&range_node->node, &map->range_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		kfree(range_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	kfree(map->selector_work_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) static int regmap_set_name(struct regmap *map, const struct regmap_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	if (config->name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		const char *name = kstrdup_const(config->name, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		kfree_const(map->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		map->name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) int regmap_attach_dev(struct device *dev, struct regmap *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		      const struct regmap_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	struct regmap **m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	map->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	ret = regmap_set_name(map, config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	regmap_debugfs_exit(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	regmap_debugfs_init(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	/* Add a devres resource for dev_get_regmap() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	if (!m) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		regmap_debugfs_exit(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	*m = map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	devres_add(dev, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) EXPORT_SYMBOL_GPL(regmap_attach_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 					const struct regmap_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	enum regmap_endian endian;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	/* Retrieve the endianness specification from the regmap config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	endian = config->reg_format_endian;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	/* If the regmap config specified a non-default value, use that */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	if (endian != REGMAP_ENDIAN_DEFAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		return endian;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	/* Retrieve the endianness specification from the bus config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	if (bus && bus->reg_format_endian_default)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		endian = bus->reg_format_endian_default;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	/* If the bus specified a non-default value, use that */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	if (endian != REGMAP_ENDIAN_DEFAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		return endian;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	/* Use this if no other value was found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	return REGMAP_ENDIAN_BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) enum regmap_endian regmap_get_val_endian(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 					 const struct regmap_bus *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 					 const struct regmap_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	enum regmap_endian endian;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	/* Retrieve the endianness specification from the regmap config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	endian = config->val_format_endian;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	/* If the regmap config specified a non-default value, use that */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	if (endian != REGMAP_ENDIAN_DEFAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		return endian;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	/* If the firmware node exist try to get endianness from it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	if (fwnode_property_read_bool(fwnode, "big-endian"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		endian = REGMAP_ENDIAN_BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	else if (fwnode_property_read_bool(fwnode, "little-endian"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		endian = REGMAP_ENDIAN_LITTLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	else if (fwnode_property_read_bool(fwnode, "native-endian"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		endian = REGMAP_ENDIAN_NATIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	/* If the endianness was specified in fwnode, use that */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	if (endian != REGMAP_ENDIAN_DEFAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		return endian;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	/* Retrieve the endianness specification from the bus config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	if (bus && bus->val_format_endian_default)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		endian = bus->val_format_endian_default;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	/* If the bus specified a non-default value, use that */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	if (endian != REGMAP_ENDIAN_DEFAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		return endian;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	/* Use this if no other value was found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	return REGMAP_ENDIAN_BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) EXPORT_SYMBOL_GPL(regmap_get_val_endian);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) struct regmap *__regmap_init(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 			     const struct regmap_bus *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 			     void *bus_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 			     const struct regmap_config *config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 			     struct lock_class_key *lock_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 			     const char *lock_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	struct regmap *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	enum regmap_endian reg_endian, val_endian;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	if (!config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	map = kzalloc(sizeof(*map), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	if (map == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	ret = regmap_set_name(map, config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		goto err_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	ret = -EINVAL; /* Later error paths rely on this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	if (config->disable_locking) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		map->lock = map->unlock = regmap_lock_unlock_none;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		map->can_sleep = config->can_sleep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		regmap_debugfs_disable(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	} else if (config->lock && config->unlock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		map->lock = config->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		map->unlock = config->unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		map->lock_arg = config->lock_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		map->can_sleep = config->can_sleep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	} else if (config->use_hwlock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		if (!map->hwlock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 			ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 			goto err_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		switch (config->hwlock_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		case HWLOCK_IRQSTATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 			map->lock = regmap_lock_hwlock_irqsave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 			map->unlock = regmap_unlock_hwlock_irqrestore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		case HWLOCK_IRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 			map->lock = regmap_lock_hwlock_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 			map->unlock = regmap_unlock_hwlock_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 			map->lock = regmap_lock_hwlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 			map->unlock = regmap_unlock_hwlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		map->lock_arg = map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		if ((bus && bus->fast_io) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		    config->fast_io) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 			spin_lock_init(&map->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 			map->lock = regmap_lock_spinlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 			map->unlock = regmap_unlock_spinlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 			lockdep_set_class_and_name(&map->spinlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 						   lock_key, lock_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 			mutex_init(&map->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 			map->lock = regmap_lock_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 			map->unlock = regmap_unlock_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 			map->can_sleep = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 			lockdep_set_class_and_name(&map->mutex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 						   lock_key, lock_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		map->lock_arg = map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	 * When we write in fast-paths with regmap_bulk_write() don't allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	 * scratch buffers with sleeping allocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	if ((bus && bus->fast_io) || config->fast_io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		map->alloc_flags = GFP_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		map->alloc_flags = GFP_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	map->format.pad_bytes = config->pad_bits / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 			config->val_bits + config->pad_bits, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	map->reg_shift = config->pad_bits % 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	if (config->reg_stride)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		map->reg_stride = config->reg_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		map->reg_stride = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	if (is_power_of_2(map->reg_stride))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		map->reg_stride_order = ilog2(map->reg_stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		map->reg_stride_order = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	map->use_single_read = config->use_single_read || !bus || !bus->read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	map->use_single_write = config->use_single_write || !bus || !bus->write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	map->can_multi_write = config->can_multi_write && bus && bus->write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	if (bus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		map->max_raw_read = bus->max_raw_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		map->max_raw_write = bus->max_raw_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	map->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	map->bus = bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	map->bus_context = bus_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	map->max_register = config->max_register;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	map->wr_table = config->wr_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	map->rd_table = config->rd_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	map->volatile_table = config->volatile_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	map->precious_table = config->precious_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	map->wr_noinc_table = config->wr_noinc_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	map->rd_noinc_table = config->rd_noinc_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	map->writeable_reg = config->writeable_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	map->readable_reg = config->readable_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	map->volatile_reg = config->volatile_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	map->precious_reg = config->precious_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	map->writeable_noinc_reg = config->writeable_noinc_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	map->readable_noinc_reg = config->readable_noinc_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	map->cache_type = config->cache_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	spin_lock_init(&map->async_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	INIT_LIST_HEAD(&map->async_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	INIT_LIST_HEAD(&map->async_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	init_waitqueue_head(&map->async_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	if (config->read_flag_mask ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	    config->write_flag_mask ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	    config->zero_flag_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		map->read_flag_mask = config->read_flag_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		map->write_flag_mask = config->write_flag_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	} else if (bus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		map->read_flag_mask = bus->read_flag_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	if (!bus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		map->reg_read  = config->reg_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		map->reg_write = config->reg_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		map->defer_caching = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		goto skip_format_initialization;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	} else if (!bus->read || !bus->write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		map->reg_read = _regmap_bus_reg_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		map->reg_write = _regmap_bus_reg_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		map->reg_update_bits = bus->reg_update_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		map->defer_caching = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		goto skip_format_initialization;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		map->reg_read  = _regmap_bus_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		map->reg_update_bits = bus->reg_update_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	reg_endian = regmap_get_reg_endian(bus, config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	val_endian = regmap_get_val_endian(dev, bus, config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	switch (config->reg_bits + map->reg_shift) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		switch (config->val_bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 			map->format.format_write = regmap_format_2_6_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 			goto err_hwlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		switch (config->val_bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		case 12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 			map->format.format_write = regmap_format_4_12_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 			goto err_hwlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	case 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		switch (config->val_bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		case 9:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 			map->format.format_write = regmap_format_7_9_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 			goto err_hwlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	case 10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		switch (config->val_bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		case 14:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 			map->format.format_write = regmap_format_10_14_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 			goto err_hwlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	case 12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		switch (config->val_bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		case 20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 			map->format.format_write = regmap_format_12_20_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 			goto err_hwlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		map->format.format_reg = regmap_format_8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		switch (reg_endian) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		case REGMAP_ENDIAN_BIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 			map->format.format_reg = regmap_format_16_be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		case REGMAP_ENDIAN_LITTLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 			map->format.format_reg = regmap_format_16_le;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		case REGMAP_ENDIAN_NATIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 			map->format.format_reg = regmap_format_16_native;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 			goto err_hwlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	case 24:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		if (reg_endian != REGMAP_ENDIAN_BIG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 			goto err_hwlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		map->format.format_reg = regmap_format_24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		switch (reg_endian) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		case REGMAP_ENDIAN_BIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 			map->format.format_reg = regmap_format_32_be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		case REGMAP_ENDIAN_LITTLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 			map->format.format_reg = regmap_format_32_le;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		case REGMAP_ENDIAN_NATIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 			map->format.format_reg = regmap_format_32_native;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 			goto err_hwlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	case 64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		switch (reg_endian) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		case REGMAP_ENDIAN_BIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 			map->format.format_reg = regmap_format_64_be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		case REGMAP_ENDIAN_LITTLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 			map->format.format_reg = regmap_format_64_le;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		case REGMAP_ENDIAN_NATIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 			map->format.format_reg = regmap_format_64_native;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 			goto err_hwlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		goto err_hwlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	if (val_endian == REGMAP_ENDIAN_NATIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		map->format.parse_inplace = regmap_parse_inplace_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	switch (config->val_bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		map->format.format_val = regmap_format_8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		map->format.parse_val = regmap_parse_8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		map->format.parse_inplace = regmap_parse_inplace_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		switch (val_endian) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		case REGMAP_ENDIAN_BIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 			map->format.format_val = regmap_format_16_be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 			map->format.parse_val = regmap_parse_16_be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 			map->format.parse_inplace = regmap_parse_16_be_inplace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		case REGMAP_ENDIAN_LITTLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 			map->format.format_val = regmap_format_16_le;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 			map->format.parse_val = regmap_parse_16_le;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 			map->format.parse_inplace = regmap_parse_16_le_inplace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		case REGMAP_ENDIAN_NATIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 			map->format.format_val = regmap_format_16_native;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 			map->format.parse_val = regmap_parse_16_native;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 			goto err_hwlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	case 24:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		if (val_endian != REGMAP_ENDIAN_BIG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 			goto err_hwlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		map->format.format_val = regmap_format_24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		map->format.parse_val = regmap_parse_24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		switch (val_endian) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		case REGMAP_ENDIAN_BIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 			map->format.format_val = regmap_format_32_be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 			map->format.parse_val = regmap_parse_32_be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 			map->format.parse_inplace = regmap_parse_32_be_inplace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		case REGMAP_ENDIAN_LITTLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 			map->format.format_val = regmap_format_32_le;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 			map->format.parse_val = regmap_parse_32_le;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 			map->format.parse_inplace = regmap_parse_32_le_inplace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		case REGMAP_ENDIAN_NATIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 			map->format.format_val = regmap_format_32_native;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 			map->format.parse_val = regmap_parse_32_native;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 			goto err_hwlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	case 64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		switch (val_endian) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		case REGMAP_ENDIAN_BIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 			map->format.format_val = regmap_format_64_be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 			map->format.parse_val = regmap_parse_64_be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 			map->format.parse_inplace = regmap_parse_64_be_inplace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		case REGMAP_ENDIAN_LITTLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 			map->format.format_val = regmap_format_64_le;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 			map->format.parse_val = regmap_parse_64_le;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 			map->format.parse_inplace = regmap_parse_64_le_inplace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		case REGMAP_ENDIAN_NATIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 			map->format.format_val = regmap_format_64_native;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 			map->format.parse_val = regmap_parse_64_native;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 			goto err_hwlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	if (map->format.format_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		if ((reg_endian != REGMAP_ENDIAN_BIG) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		    (val_endian != REGMAP_ENDIAN_BIG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 			goto err_hwlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		map->use_single_write = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	if (!map->format.format_write &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	    !(map->format.format_reg && map->format.format_val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		goto err_hwlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	if (map->work_buf == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		goto err_hwlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	if (map->format.format_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		map->defer_caching = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		map->reg_write = _regmap_bus_formatted_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	} else if (map->format.format_val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		map->defer_caching = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		map->reg_write = _regmap_bus_raw_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) skip_format_initialization:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	map->range_tree = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	for (i = 0; i < config->num_ranges; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		const struct regmap_range_cfg *range_cfg = &config->ranges[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		struct regmap_range_node *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		/* Sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		if (range_cfg->range_max < range_cfg->range_min) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 			dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 				range_cfg->range_max, range_cfg->range_min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 			goto err_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		if (range_cfg->range_max > map->max_register) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 			dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 				range_cfg->range_max, map->max_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 			goto err_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		if (range_cfg->selector_reg > map->max_register) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 			dev_err(map->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 				"Invalid range %d: selector out of map\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 			goto err_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		if (range_cfg->window_len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 			dev_err(map->dev, "Invalid range %d: window_len 0\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 				i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 			goto err_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		/* Make sure, that this register range has no selector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		   or data window within its boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		for (j = 0; j < config->num_ranges; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 			unsigned sel_reg = config->ranges[j].selector_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 			unsigned win_min = config->ranges[j].window_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 			unsigned win_max = win_min +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 					   config->ranges[j].window_len - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 			/* Allow data window inside its own virtual range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 			if (j == i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 			if (range_cfg->range_min <= sel_reg &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 			    sel_reg <= range_cfg->range_max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 				dev_err(map->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 					"Range %d: selector for %d in window\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 					i, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 				goto err_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 			if (!(win_max < range_cfg->range_min ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 			      win_min > range_cfg->range_max)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 				dev_err(map->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 					"Range %d: window for %d in window\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 					i, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 				goto err_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		new = kzalloc(sizeof(*new), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		if (new == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 			goto err_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		new->map = map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		new->name = range_cfg->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		new->range_min = range_cfg->range_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		new->range_max = range_cfg->range_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		new->selector_reg = range_cfg->selector_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		new->selector_mask = range_cfg->selector_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		new->selector_shift = range_cfg->selector_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		new->window_start = range_cfg->window_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		new->window_len = range_cfg->window_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		if (!_regmap_range_add(map, new)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 			dev_err(map->dev, "Failed to add range %d\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 			kfree(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 			goto err_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		if (map->selector_work_buf == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 			map->selector_work_buf =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 				kzalloc(map->format.buf_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 			if (map->selector_work_buf == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 				ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 				goto err_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	ret = regcache_init(map, config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		goto err_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		ret = regmap_attach_dev(dev, map, config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 			goto err_regcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		regmap_debugfs_init(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	return map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) err_regcache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	regcache_exit(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) err_range:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	regmap_range_exit(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	kfree(map->work_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) err_hwlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	if (map->hwlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		hwspin_lock_free(map->hwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) err_name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	kfree_const(map->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) err_map:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) EXPORT_SYMBOL_GPL(__regmap_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) static void devm_regmap_release(struct device *dev, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	regmap_exit(*(struct regmap **)res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) struct regmap *__devm_regmap_init(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 				  const struct regmap_bus *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 				  void *bus_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 				  const struct regmap_config *config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 				  struct lock_class_key *lock_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 				  const char *lock_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	struct regmap **ptr, *regmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	if (!ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	regmap = __regmap_init(dev, bus, bus_context, config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 			       lock_key, lock_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	if (!IS_ERR(regmap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		*ptr = regmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		devres_add(dev, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		devres_free(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	return regmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) EXPORT_SYMBOL_GPL(__devm_regmap_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) static void regmap_field_init(struct regmap_field *rm_field,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	struct regmap *regmap, struct reg_field reg_field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	rm_field->regmap = regmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	rm_field->reg = reg_field.reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	rm_field->shift = reg_field.lsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	rm_field->id_size = reg_field.id_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	rm_field->id_offset = reg_field.id_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)  * devm_regmap_field_alloc() - Allocate and initialise a register field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)  * @dev: Device that will be interacted with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)  * @regmap: regmap bank in which this register field is located.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)  * @reg_field: Register field with in the bank.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)  * The return value will be an ERR_PTR() on error or a valid pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)  * to a struct regmap_field. The regmap_field will be automatically freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)  * by the device management code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) struct regmap_field *devm_regmap_field_alloc(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		struct regmap *regmap, struct reg_field reg_field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	struct regmap_field *rm_field = devm_kzalloc(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 					sizeof(*rm_field), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	if (!rm_field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	regmap_field_init(rm_field, regmap, reg_field);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	return rm_field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)  * regmap_field_bulk_alloc() - Allocate and initialise a bulk register field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)  * @regmap: regmap bank in which this register field is located.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)  * @rm_field: regmap register fields within the bank.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)  * @reg_field: Register fields within the bank.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)  * @num_fields: Number of register fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)  * The return value will be an -ENOMEM on error or zero for success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)  * Newly allocated regmap_fields should be freed by calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)  * regmap_field_bulk_free()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) int regmap_field_bulk_alloc(struct regmap *regmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 			    struct regmap_field **rm_field,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 			    struct reg_field *reg_field,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 			    int num_fields)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	struct regmap_field *rf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	rf = kcalloc(num_fields, sizeof(*rf), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	if (!rf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	for (i = 0; i < num_fields; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 		regmap_field_init(&rf[i], regmap, reg_field[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		rm_field[i] = &rf[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) EXPORT_SYMBOL_GPL(regmap_field_bulk_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)  * devm_regmap_field_bulk_alloc() - Allocate and initialise a bulk register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)  * fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)  * @dev: Device that will be interacted with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)  * @regmap: regmap bank in which this register field is located.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)  * @rm_field: regmap register fields within the bank.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)  * @reg_field: Register fields within the bank.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)  * @num_fields: Number of register fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)  * The return value will be an -ENOMEM on error or zero for success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)  * Newly allocated regmap_fields will be automatically freed by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)  * device management code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) int devm_regmap_field_bulk_alloc(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 				 struct regmap *regmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 				 struct regmap_field **rm_field,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 				 struct reg_field *reg_field,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 				 int num_fields)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	struct regmap_field *rf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	rf = devm_kcalloc(dev, num_fields, sizeof(*rf), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	if (!rf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	for (i = 0; i < num_fields; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		regmap_field_init(&rf[i], regmap, reg_field[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		rm_field[i] = &rf[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)  * regmap_field_bulk_free() - Free register field allocated using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)  *                       regmap_field_bulk_alloc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)  * @field: regmap fields which should be freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) void regmap_field_bulk_free(struct regmap_field *field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	kfree(field);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) EXPORT_SYMBOL_GPL(regmap_field_bulk_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)  * devm_regmap_field_bulk_free() - Free a bulk register field allocated using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)  *                            devm_regmap_field_bulk_alloc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)  * @dev: Device that will be interacted with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)  * @field: regmap field which should be freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)  * Free register field allocated using devm_regmap_field_bulk_alloc(). Usually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)  * drivers need not call this function, as the memory allocated via devm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)  * will be freed as per device-driver life-cycle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) void devm_regmap_field_bulk_free(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 				 struct regmap_field *field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	devm_kfree(dev, field);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)  * devm_regmap_field_free() - Free a register field allocated using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)  *                            devm_regmap_field_alloc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)  * @dev: Device that will be interacted with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)  * @field: regmap field which should be freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)  * Free register field allocated using devm_regmap_field_alloc(). Usually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)  * drivers need not call this function, as the memory allocated via devm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)  * will be freed as per device-driver life-cyle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) void devm_regmap_field_free(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	struct regmap_field *field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	devm_kfree(dev, field);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) EXPORT_SYMBOL_GPL(devm_regmap_field_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)  * regmap_field_alloc() - Allocate and initialise a register field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)  * @regmap: regmap bank in which this register field is located.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)  * @reg_field: Register field with in the bank.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)  * The return value will be an ERR_PTR() on error or a valid pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)  * to a struct regmap_field. The regmap_field should be freed by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)  * user once its finished working with it using regmap_field_free().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) struct regmap_field *regmap_field_alloc(struct regmap *regmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 		struct reg_field reg_field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	if (!rm_field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	regmap_field_init(rm_field, regmap, reg_field);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	return rm_field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) EXPORT_SYMBOL_GPL(regmap_field_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)  * regmap_field_free() - Free register field allocated using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)  *                       regmap_field_alloc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)  * @field: regmap field which should be freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) void regmap_field_free(struct regmap_field *field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	kfree(field);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) EXPORT_SYMBOL_GPL(regmap_field_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)  * regmap_reinit_cache() - Reinitialise the current register cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)  * @map: Register map to operate on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)  * @config: New configuration.  Only the cache data will be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)  * Discard any existing register cache for the map and initialize a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)  * new cache.  This can be used to restore the cache to defaults or to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)  * update the cache configuration to reflect runtime discovery of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)  * hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)  * No explicit locking is done here, the user needs to ensure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)  * this function will not race with other calls to regmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	regcache_exit(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	regmap_debugfs_exit(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	map->max_register = config->max_register;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	map->writeable_reg = config->writeable_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	map->readable_reg = config->readable_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	map->volatile_reg = config->volatile_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	map->precious_reg = config->precious_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	map->writeable_noinc_reg = config->writeable_noinc_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	map->readable_noinc_reg = config->readable_noinc_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	map->cache_type = config->cache_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	ret = regmap_set_name(map, config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	regmap_debugfs_init(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	map->cache_bypass = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	map->cache_only = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	return regcache_init(map, config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) EXPORT_SYMBOL_GPL(regmap_reinit_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)  * regmap_exit() - Free a previously allocated register map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)  * @map: Register map to operate on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) void regmap_exit(struct regmap *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	struct regmap_async *async;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	regcache_exit(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	regmap_debugfs_exit(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	regmap_range_exit(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	if (map->bus && map->bus->free_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		map->bus->free_context(map->bus_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	kfree(map->work_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	while (!list_empty(&map->async_free)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		async = list_first_entry_or_null(&map->async_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 						 struct regmap_async,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 						 list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		list_del(&async->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		kfree(async->work_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 		kfree(async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	if (map->hwlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 		hwspin_lock_free(map->hwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	if (map->lock == regmap_lock_mutex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 		mutex_destroy(&map->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	kfree_const(map->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	kfree(map->patch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) EXPORT_SYMBOL_GPL(regmap_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) static int dev_get_regmap_match(struct device *dev, void *res, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	struct regmap **r = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	if (!r || !*r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		WARN_ON(!r || !*r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	/* If the user didn't specify a name match any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	if (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 		return !strcmp((*r)->name, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)  * dev_get_regmap() - Obtain the regmap (if any) for a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)  * @dev: Device to retrieve the map for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)  * @name: Optional name for the register map, usually NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)  * Returns the regmap for the device if one is present, or NULL.  If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)  * name is specified then it must match the name specified when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)  * registering the device, if it is NULL then the first regmap found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)  * will be used.  Devices with multiple register maps are very rare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)  * generic code should normally not need to specify a name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) struct regmap *dev_get_regmap(struct device *dev, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	struct regmap **r = devres_find(dev, dev_get_regmap_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 					dev_get_regmap_match, (void *)name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	if (!r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	return *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) EXPORT_SYMBOL_GPL(dev_get_regmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)  * regmap_get_device() - Obtain the device from a regmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)  * @map: Register map to operate on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)  * Returns the underlying device that the regmap has been created for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) struct device *regmap_get_device(struct regmap *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	return map->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) EXPORT_SYMBOL_GPL(regmap_get_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) static int _regmap_select_page(struct regmap *map, unsigned int *reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 			       struct regmap_range_node *range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 			       unsigned int val_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	void *orig_work_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	unsigned int win_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	unsigned int win_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	bool page_chg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	win_offset = (*reg - range->range_min) % range->window_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	win_page = (*reg - range->range_min) / range->window_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	if (val_num > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 		/* Bulk write shouldn't cross range boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		if (*reg + val_num - 1 > range->range_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 		/* ... or single page boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 		if (val_num > range->window_len - win_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	/* It is possible to have selector register inside data window.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	   In that case, selector register is located on every page and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	   it needs no page switching, when accessed alone. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	if (val_num > 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	    range->window_start + win_offset != range->selector_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 		/* Use separate work_buf during page switching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 		orig_work_buf = map->work_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 		map->work_buf = map->selector_work_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 		ret = _regmap_update_bits(map, range->selector_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 					  range->selector_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 					  win_page << range->selector_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 					  &page_chg, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		map->work_buf = orig_work_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 		if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	*reg = range->window_start + win_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 					  unsigned long mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	u8 *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	if (!mask || !map->work_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	buf = map->work_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	for (i = 0; i < max_bytes; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 		buf[i] |= (mask >> (8 * i)) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 				  const void *val, size_t val_len, bool noinc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	struct regmap_range_node *range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	void *work_val = map->work_buf + map->format.reg_bytes +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		map->format.pad_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	int ret = -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	WARN_ON(!map->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	/* Check for unwritable or noinc registers in range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	 * before we start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	if (!regmap_writeable_noinc(map, reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 		for (i = 0; i < val_len / map->format.val_bytes; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 			unsigned int element =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 				reg + regmap_get_offset(map, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 			if (!regmap_writeable(map, element) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 				regmap_writeable_noinc(map, element))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	if (!map->cache_bypass && map->format.parse_val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 		unsigned int ival;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 		int val_bytes = map->format.val_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 		for (i = 0; i < val_len / val_bytes; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 			ival = map->format.parse_val(val + (i * val_bytes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 			ret = regcache_write(map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 					     reg + regmap_get_offset(map, i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 					     ival);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 			if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 				dev_err(map->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 					"Error in caching of register: %x ret: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 					reg + regmap_get_offset(map, i), ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		if (map->cache_only) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 			map->cache_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	range = _regmap_range_lookup(map, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	if (range) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 		int val_num = val_len / map->format.val_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 		int win_offset = (reg - range->range_min) % range->window_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 		int win_residue = range->window_len - win_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 		/* If the write goes beyond the end of the window split it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		while (val_num > win_residue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 			dev_dbg(map->dev, "Writing window %d/%zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 				win_residue, val_len / map->format.val_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 			ret = _regmap_raw_write_impl(map, reg, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 						     win_residue *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 						     map->format.val_bytes, noinc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 			if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 			reg += win_residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 			val_num -= win_residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 			val += win_residue * map->format.val_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 			val_len -= win_residue * map->format.val_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 			win_offset = (reg - range->range_min) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 				range->window_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 			win_residue = range->window_len - win_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 		ret = _regmap_select_page(map, &reg, range, noinc ? 1 : val_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 		if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	map->format.format_reg(map->work_buf, reg, map->reg_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 				      map->write_flag_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	 * Essentially all I/O mechanisms will be faster with a single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	 * buffer to write.  Since register syncs often generate raw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	 * writes of single registers optimise that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	if (val != work_val && val_len == map->format.val_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 		memcpy(work_val, val, map->format.val_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 		val = work_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	if (map->async && map->bus->async_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 		struct regmap_async *async;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 		trace_regmap_async_write_start(map, reg, val_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 		spin_lock_irqsave(&map->async_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 		async = list_first_entry_or_null(&map->async_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 						 struct regmap_async,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 						 list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 		if (async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 			list_del(&async->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 		spin_unlock_irqrestore(&map->async_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 		if (!async) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 			async = map->bus->async_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 			if (!async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 			async->work_buf = kzalloc(map->format.buf_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 						  GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 			if (!async->work_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 				kfree(async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 		async->map = map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		/* If the caller supplied the value we can use it safely. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 		memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 		       map->format.reg_bytes + map->format.val_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		spin_lock_irqsave(&map->async_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 		list_add_tail(&async->list, &map->async_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 		spin_unlock_irqrestore(&map->async_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 		if (val != work_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 			ret = map->bus->async_write(map->bus_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 						    async->work_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 						    map->format.reg_bytes +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 						    map->format.pad_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 						    val, val_len, async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 			ret = map->bus->async_write(map->bus_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 						    async->work_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 						    map->format.reg_bytes +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 						    map->format.pad_bytes +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 						    val_len, NULL, 0, async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 		if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 			dev_err(map->dev, "Failed to schedule write: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 				ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 			spin_lock_irqsave(&map->async_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 			list_move(&async->list, &map->async_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 			spin_unlock_irqrestore(&map->async_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	/* If we're doing a single register write we can probably just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	 * send the work_buf directly, otherwise try to do a gather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	 * write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	if (val == work_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 		ret = map->bus->write(map->bus_context, map->work_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 				      map->format.reg_bytes +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 				      map->format.pad_bytes +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 				      val_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	else if (map->bus->gather_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 		ret = map->bus->gather_write(map->bus_context, map->work_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 					     map->format.reg_bytes +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 					     map->format.pad_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 					     val, val_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 		ret = -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	/* If that didn't work fall back on linearising by hand. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	if (ret == -ENOTSUPP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 		len = map->format.reg_bytes + map->format.pad_bytes + val_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 		buf = kzalloc(len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 		if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 		memcpy(buf, map->work_buf, map->format.reg_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 		memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 		       val, val_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 		ret = map->bus->write(map->bus_context, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 		kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	} else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 		/* regcache_drop_region() takes lock that we already have,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 		 * thus call map->cache_ops->drop() directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 		if (map->cache_ops && map->cache_ops->drop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 			map->cache_ops->drop(map, reg, reg + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)  * regmap_can_raw_write - Test if regmap_raw_write() is supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)  * @map: Map to check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) bool regmap_can_raw_write(struct regmap *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	return map->bus && map->bus->write && map->format.format_val &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 		map->format.format_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) EXPORT_SYMBOL_GPL(regmap_can_raw_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830)  * regmap_get_raw_read_max - Get the maximum size we can read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)  * @map: Map to check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) size_t regmap_get_raw_read_max(struct regmap *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	return map->max_raw_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)  * regmap_get_raw_write_max - Get the maximum size we can read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)  * @map: Map to check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) size_t regmap_get_raw_write_max(struct regmap *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 	return map->max_raw_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) static int _regmap_bus_formatted_write(void *context, unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 				       unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	struct regmap_range_node *range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	struct regmap *map = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	WARN_ON(!map->bus || !map->format.format_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	range = _regmap_range_lookup(map, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	if (range) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 		ret = _regmap_select_page(map, &reg, range, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 		if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	map->format.format_write(map, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	trace_regmap_hw_write_start(map, reg, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	ret = map->bus->write(map->bus_context, map->work_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 			      map->format.buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	trace_regmap_hw_write_done(map, reg, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) static int _regmap_bus_reg_write(void *context, unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 				 unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	struct regmap *map = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	return map->bus->reg_write(map->bus_context, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) static int _regmap_bus_raw_write(void *context, unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 				 unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	struct regmap *map = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	WARN_ON(!map->bus || !map->format.format_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	map->format.format_val(map->work_buf + map->format.reg_bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 			       + map->format.pad_bytes, val, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	return _regmap_raw_write_impl(map, reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 				      map->work_buf +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 				      map->format.reg_bytes +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 				      map->format.pad_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 				      map->format.val_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 				      false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) static inline void *_regmap_map_get_context(struct regmap *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	return (map->bus) ? map : map->bus_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) int _regmap_write(struct regmap *map, unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 		  unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	void *context = _regmap_map_get_context(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	if (!regmap_writeable(map, reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	if (!map->cache_bypass && !map->defer_caching) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		ret = regcache_write(map, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 		if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 		if (map->cache_only) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 			map->cache_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	if (regmap_should_log(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 		dev_info(map->dev, "%x <= %x\n", reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	trace_regmap_reg_write(map, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	return map->reg_write(context, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)  * regmap_write() - Write a value to a single register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)  * @map: Register map to write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)  * @reg: Register to write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)  * @val: Value to be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)  * A value of zero will be returned on success, a negative errno will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)  * be returned in error cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	if (!IS_ALIGNED(reg, map->reg_stride))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	map->lock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	ret = _regmap_write(map, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	map->unlock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) EXPORT_SYMBOL_GPL(regmap_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)  * regmap_write_async() - Write a value to a single register asynchronously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)  * @map: Register map to write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)  * @reg: Register to write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)  * @val: Value to be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)  * A value of zero will be returned on success, a negative errno will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)  * be returned in error cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	if (!IS_ALIGNED(reg, map->reg_stride))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	map->lock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	map->async = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	ret = _regmap_write(map, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	map->async = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	map->unlock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) EXPORT_SYMBOL_GPL(regmap_write_async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) int _regmap_raw_write(struct regmap *map, unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 		      const void *val, size_t val_len, bool noinc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	size_t val_bytes = map->format.val_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	size_t val_count = val_len / val_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	size_t chunk_count, chunk_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	size_t chunk_regs = val_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	if (!val_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	if (map->use_single_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 		chunk_regs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	else if (map->max_raw_write && val_len > map->max_raw_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 		chunk_regs = map->max_raw_write / val_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	chunk_count = val_count / chunk_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 	chunk_bytes = chunk_regs * val_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 	/* Write as many bytes as possible with chunk_size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	for (i = 0; i < chunk_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 		ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 		reg += regmap_get_offset(map, chunk_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 		val += chunk_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 		val_len -= chunk_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 	/* Write remaining bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 	if (val_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 		ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)  * regmap_raw_write() - Write raw values to one or more registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)  * @map: Register map to write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)  * @reg: Initial register to write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)  * @val: Block of data to be written, laid out for direct transmission to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)  *       device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)  * @val_len: Length of data pointed to by val.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041)  * This function is intended to be used for things like firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)  * download where a large block of data needs to be transferred to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)  * device.  No formatting will be done on the data provided.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)  * A value of zero will be returned on success, a negative errno will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)  * be returned in error cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) int regmap_raw_write(struct regmap *map, unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 		     const void *val, size_t val_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	if (!regmap_can_raw_write(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	if (val_len % map->format.val_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	map->lock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 	ret = _regmap_raw_write(map, reg, val, val_len, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	map->unlock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) EXPORT_SYMBOL_GPL(regmap_raw_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)  * regmap_noinc_write(): Write data from a register without incrementing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)  *			register number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072)  * @map: Register map to write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073)  * @reg: Register to write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)  * @val: Pointer to data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075)  * @val_len: Length of output buffer in bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)  * The regmap API usually assumes that bulk bus write operations will write a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)  * range of registers. Some devices have certain registers for which a write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)  * operation can write to an internal FIFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)  * The target register must be volatile but registers after it can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)  * completely unrelated cacheable registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)  * This will attempt multiple writes as required to write val_len bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086)  * A value of zero will be returned on success, a negative errno will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)  * returned in error cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) int regmap_noinc_write(struct regmap *map, unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 		      const void *val, size_t val_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	size_t write_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	if (!map->bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 	if (!map->bus->write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 		return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	if (val_len % map->format.val_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	if (!IS_ALIGNED(reg, map->reg_stride))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	if (val_len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	map->lock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	while (val_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 		if (map->max_raw_write && map->max_raw_write < val_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 			write_len = map->max_raw_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 			write_len = val_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 		ret = _regmap_raw_write(map, reg, val, write_len, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 		val = ((u8 *)val) + write_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 		val_len -= write_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	map->unlock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) EXPORT_SYMBOL_GPL(regmap_noinc_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)  * regmap_field_update_bits_base() - Perform a read/modify/write cycle a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)  *                                   register field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)  * @field: Register field to write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)  * @mask: Bitmask to change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)  * @val: Value to be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)  * @change: Boolean indicating if a write was done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)  * @async: Boolean indicating asynchronously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)  * @force: Boolean indicating use force update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)  * Perform a read/modify/write cycle on the register field with change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)  * async, force option.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145)  * A value of zero will be returned on success, a negative errno will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)  * be returned in error cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) int regmap_field_update_bits_base(struct regmap_field *field,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 				  unsigned int mask, unsigned int val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 				  bool *change, bool async, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	mask = (mask << field->shift) & field->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	return regmap_update_bits_base(field->regmap, field->reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 				       mask, val << field->shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 				       change, async, force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)  * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162)  *                                    register field with port ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)  * @field: Register field to write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)  * @id: port ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)  * @mask: Bitmask to change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)  * @val: Value to be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168)  * @change: Boolean indicating if a write was done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)  * @async: Boolean indicating asynchronously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)  * @force: Boolean indicating use force update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)  * A value of zero will be returned on success, a negative errno will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173)  * be returned in error cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 				   unsigned int mask, unsigned int val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 				   bool *change, bool async, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 	if (id >= field->id_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 	mask = (mask << field->shift) & field->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 	return regmap_update_bits_base(field->regmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 				       field->reg + (field->id_offset * id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 				       mask, val << field->shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 				       change, async, force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192)  * regmap_bulk_write() - Write multiple registers to the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)  * @map: Register map to write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195)  * @reg: First register to be write from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196)  * @val: Block of data to be written, in native register size for device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)  * @val_count: Number of registers to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)  * This function is intended to be used for writing a large block of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)  * data to the device either in single transfer or multiple transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)  * A value of zero will be returned on success, a negative errno will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203)  * be returned in error cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 		     size_t val_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 	int ret = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	size_t val_bytes = map->format.val_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	if (!IS_ALIGNED(reg, map->reg_stride))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 	 * Some devices don't support bulk write, for them we have a series of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	 * single write operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 	if (!map->bus || !map->format.parse_inplace) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 		map->lock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 		for (i = 0; i < val_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 			unsigned int ival;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 			switch (val_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 			case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 				ival = *(u8 *)(val + (i * val_bytes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 			case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 				ival = *(u16 *)(val + (i * val_bytes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 			case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 				ival = *(u32 *)(val + (i * val_bytes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 			case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 				ival = *(u64 *)(val + (i * val_bytes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 				ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 			ret = _regmap_write(map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 					    reg + regmap_get_offset(map, i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 					    ival);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 			if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 		map->unlock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 		void *wval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 		wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 		if (!wval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 		for (i = 0; i < val_count * val_bytes; i += val_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 			map->format.parse_inplace(wval + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 		ret = regmap_raw_write(map, reg, wval, val_bytes * val_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 		kfree(wval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) EXPORT_SYMBOL_GPL(regmap_bulk_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270)  * _regmap_raw_multi_reg_write()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272)  * the (register,newvalue) pairs in regs have not been formatted, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273)  * they are all in the same page and have been changed to being page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274)  * relative. The page register has been written if that was necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) static int _regmap_raw_multi_reg_write(struct regmap *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 				       const struct reg_sequence *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 				       size_t num_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	u8 *u8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	size_t val_bytes = map->format.val_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	size_t reg_bytes = map->format.reg_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	size_t pad_bytes = map->format.pad_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	size_t pair_size = reg_bytes + pad_bytes + val_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	size_t len = pair_size * num_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	buf = kzalloc(len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 	if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 	/* We have to linearise by hand. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 	u8 = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	for (i = 0; i < num_regs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 		unsigned int reg = regs[i].reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 		unsigned int val = regs[i].def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 		trace_regmap_hw_write_start(map, reg, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 		map->format.format_reg(u8, reg, map->reg_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 		u8 += reg_bytes + pad_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 		map->format.format_val(u8, val, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 		u8 += val_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 	u8 = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	*u8 |= map->write_flag_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	ret = map->bus->write(map->bus_context, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 	kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	for (i = 0; i < num_regs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 		int reg = regs[i].reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 		trace_regmap_hw_write_done(map, reg, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) static unsigned int _regmap_register_page(struct regmap *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 					  unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 					  struct regmap_range_node *range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 	unsigned int win_page = (reg - range->range_min) / range->window_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	return win_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) static int _regmap_range_multi_paged_reg_write(struct regmap *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 					       struct reg_sequence *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 					       size_t num_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 	int i, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 	struct reg_sequence *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	unsigned int this_page = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 	unsigned int page_change = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	 * the set of registers are not neccessarily in order, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 	 * since the order of write must be preserved this algorithm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	 * chops the set each time the page changes. This also applies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	 * if there is a delay required at any point in the sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 	base = regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 	for (i = 0, n = 0; i < num_regs; i++, n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 		unsigned int reg = regs[i].reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 		struct regmap_range_node *range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 		range = _regmap_range_lookup(map, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 		if (range) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 			unsigned int win_page = _regmap_register_page(map, reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 								      range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 			if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 				this_page = win_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 			if (win_page != this_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 				this_page = win_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 				page_change = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 		/* If we have both a page change and a delay make sure to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 		 * write the regs and apply the delay before we change the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 		 * page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 		if (page_change || regs[i].delay_us) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 				/* For situations where the first write requires
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 				 * a delay we need to make sure we don't call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 				 * raw_multi_reg_write with n=0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 				 * This can't occur with page breaks as we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 				 * never write on the first iteration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 				if (regs[i].delay_us && i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 					n = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 				ret = _regmap_raw_multi_reg_write(map, base, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 				if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 					return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 				if (regs[i].delay_us) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 					if (map->can_sleep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 						fsleep(regs[i].delay_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 					else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 						udelay(regs[i].delay_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 				base += n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 				n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 				if (page_change) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 					ret = _regmap_select_page(map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 								  &base[n].reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 								  range, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 					if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 						return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 					page_change = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	if (n > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 		return _regmap_raw_multi_reg_write(map, base, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) static int _regmap_multi_reg_write(struct regmap *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 				   const struct reg_sequence *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 				   size_t num_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 	if (!map->can_multi_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 		for (i = 0; i < num_regs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 			ret = _regmap_write(map, regs[i].reg, regs[i].def);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 			if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 			if (regs[i].delay_us) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 				if (map->can_sleep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 					fsleep(regs[i].delay_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 					udelay(regs[i].delay_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 	if (!map->format.parse_inplace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 	if (map->writeable_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 		for (i = 0; i < num_regs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 			int reg = regs[i].reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 			if (!map->writeable_reg(map->dev, reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 			if (!IS_ALIGNED(reg, map->reg_stride))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 	if (!map->cache_bypass) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 		for (i = 0; i < num_regs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 			unsigned int val = regs[i].def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 			unsigned int reg = regs[i].reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 			ret = regcache_write(map, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 			if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 				dev_err(map->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 				"Error in caching of register: %x ret: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 								reg, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 		if (map->cache_only) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 			map->cache_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 	WARN_ON(!map->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 	for (i = 0; i < num_regs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 		unsigned int reg = regs[i].reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 		struct regmap_range_node *range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 		/* Coalesce all the writes between a page break or a delay
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 		 * in a sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 		range = _regmap_range_lookup(map, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 		if (range || regs[i].delay_us) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 			size_t len = sizeof(struct reg_sequence)*num_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 			struct reg_sequence *base = kmemdup(regs, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 							   GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 			if (!base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 			ret = _regmap_range_multi_paged_reg_write(map, base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 								  num_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 			kfree(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 	return _regmap_raw_multi_reg_write(map, regs, num_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494)  * regmap_multi_reg_write() - Write multiple registers to the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496)  * @map: Register map to write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497)  * @regs: Array of structures containing register,value to be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498)  * @num_regs: Number of registers to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500)  * Write multiple registers to the device where the set of register, value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)  * pairs are supplied in any order, possibly not all in a single range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503)  * The 'normal' block write mode will send ultimately send data on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504)  * target bus as R,V1,V2,V3,..,Vn where successively higher registers are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505)  * addressed. However, this alternative block multi write mode will send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506)  * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507)  * must of course support the mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)  * A value of zero will be returned on success, a negative errno will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510)  * returned in error cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 			   int num_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	map->lock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	ret = _regmap_multi_reg_write(map, regs, num_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 	map->unlock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)  * regmap_multi_reg_write_bypassed() - Write multiple registers to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529)  *                                     device but not the cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531)  * @map: Register map to write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532)  * @regs: Array of structures containing register,value to be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533)  * @num_regs: Number of registers to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535)  * Write multiple registers to the device but not the cache where the set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536)  * of register are supplied in any order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538)  * This function is intended to be used for writing a large block of data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)  * atomically to the device in single transfer for those I2C client devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540)  * that implement this alternative block write mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542)  * A value of zero will be returned on success, a negative errno will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543)  * be returned in error cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) int regmap_multi_reg_write_bypassed(struct regmap *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 				    const struct reg_sequence *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 				    int num_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 	bool bypass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 	map->lock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 	bypass = map->cache_bypass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 	map->cache_bypass = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 	ret = _regmap_multi_reg_write(map, regs, num_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 	map->cache_bypass = bypass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 	map->unlock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568)  * regmap_raw_write_async() - Write raw values to one or more registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569)  *                            asynchronously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571)  * @map: Register map to write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572)  * @reg: Initial register to write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573)  * @val: Block of data to be written, laid out for direct transmission to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574)  *       device.  Must be valid until regmap_async_complete() is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575)  * @val_len: Length of data pointed to by val.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577)  * This function is intended to be used for things like firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578)  * download where a large block of data needs to be transferred to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579)  * device.  No formatting will be done on the data provided.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581)  * If supported by the underlying bus the write will be scheduled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582)  * asynchronously, helping maximise I/O speed on higher speed buses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)  * like SPI.  regmap_async_complete() can be called to ensure that all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584)  * asynchrnous writes have been completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)  * A value of zero will be returned on success, a negative errno will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587)  * be returned in error cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) int regmap_raw_write_async(struct regmap *map, unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 			   const void *val, size_t val_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 	if (val_len % map->format.val_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 	if (!IS_ALIGNED(reg, map->reg_stride))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 	map->lock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 	map->async = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	ret = _regmap_raw_write(map, reg, val, val_len, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 	map->async = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 	map->unlock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) EXPORT_SYMBOL_GPL(regmap_raw_write_async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 			    unsigned int val_len, bool noinc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 	struct regmap_range_node *range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 	WARN_ON(!map->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 	if (!map->bus || !map->bus->read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 	range = _regmap_range_lookup(map, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 	if (range) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 		ret = _regmap_select_page(map, &reg, range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 					  noinc ? 1 : val_len / map->format.val_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 		if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 	map->format.format_reg(map->work_buf, reg, map->reg_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 	regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 				      map->read_flag_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 	trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 	ret = map->bus->read(map->bus_context, map->work_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 			     map->format.reg_bytes + map->format.pad_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 			     val, val_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 	trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) static int _regmap_bus_reg_read(void *context, unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 				unsigned int *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 	struct regmap *map = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 	return map->bus->reg_read(map->bus_context, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) static int _regmap_bus_read(void *context, unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 			    unsigned int *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 	struct regmap *map = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 	void *work_val = map->work_buf + map->format.reg_bytes +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 		map->format.pad_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 	if (!map->format.parse_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 	ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 	if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 		*val = map->format.parse_val(work_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) static int _regmap_read(struct regmap *map, unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 			unsigned int *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 	void *context = _regmap_map_get_context(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 	if (!map->cache_bypass) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 		ret = regcache_read(map, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 		if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 	if (map->cache_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 	if (!regmap_readable(map, reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 	ret = map->reg_read(context, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 	if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 		if (regmap_should_log(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 			dev_info(map->dev, "%x => %x\n", reg, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 		trace_regmap_reg_read(map, reg, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 		if (!map->cache_bypass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 			regcache_write(map, reg, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705)  * regmap_read() - Read a value from a single register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707)  * @map: Register map to read from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708)  * @reg: Register to be read from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709)  * @val: Pointer to store read value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711)  * A value of zero will be returned on success, a negative errno will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712)  * be returned in error cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 	if (!IS_ALIGNED(reg, map->reg_stride))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 	map->lock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 	ret = _regmap_read(map, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 	map->unlock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) EXPORT_SYMBOL_GPL(regmap_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732)  * regmap_raw_read() - Read raw data from the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734)  * @map: Register map to read from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735)  * @reg: First register to be read from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736)  * @val: Pointer to store read value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737)  * @val_len: Size of data to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739)  * A value of zero will be returned on success, a negative errno will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740)  * be returned in error cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 		    size_t val_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 	size_t val_bytes = map->format.val_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 	size_t val_count = val_len / val_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 	unsigned int v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 	int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 	if (!map->bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 	if (val_len % map->format.val_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 	if (!IS_ALIGNED(reg, map->reg_stride))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 	if (val_count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 	map->lock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 	if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 	    map->cache_type == REGCACHE_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 		size_t chunk_count, chunk_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 		size_t chunk_regs = val_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 		if (!map->bus->read) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 			ret = -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 		if (map->use_single_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 			chunk_regs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 		else if (map->max_raw_read && val_len > map->max_raw_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 			chunk_regs = map->max_raw_read / val_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 		chunk_count = val_count / chunk_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 		chunk_bytes = chunk_regs * val_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 		/* Read bytes that fit into whole chunks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 		for (i = 0; i < chunk_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 			ret = _regmap_raw_read(map, reg, val, chunk_bytes, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 			if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 			reg += regmap_get_offset(map, chunk_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 			val += chunk_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 			val_len -= chunk_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 		/* Read remaining bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 		if (val_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 			ret = _regmap_raw_read(map, reg, val, val_len, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 			if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 		/* Otherwise go word by word for the cache; should be low
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 		 * cost as we expect to hit the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 		for (i = 0; i < val_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 			ret = _regmap_read(map, reg + regmap_get_offset(map, i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 					   &v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 			if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 			map->format.format_val(val + (i * val_bytes), v, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 	map->unlock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) EXPORT_SYMBOL_GPL(regmap_raw_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818)  * regmap_noinc_read(): Read data from a register without incrementing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819)  *			register number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821)  * @map: Register map to read from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822)  * @reg: Register to read from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823)  * @val: Pointer to data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824)  * @val_len: Length of output buffer in bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826)  * The regmap API usually assumes that bulk bus read operations will read a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827)  * range of registers. Some devices have certain registers for which a read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828)  * operation read will read from an internal FIFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830)  * The target register must be volatile but registers after it can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831)  * completely unrelated cacheable registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833)  * This will attempt multiple reads as required to read val_len bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835)  * A value of zero will be returned on success, a negative errno will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836)  * returned in error cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) int regmap_noinc_read(struct regmap *map, unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 		      void *val, size_t val_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 	size_t read_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 	if (!map->bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 	if (!map->bus->read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 		return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 	if (val_len % map->format.val_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 	if (!IS_ALIGNED(reg, map->reg_stride))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 	if (val_len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 	map->lock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 	if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 	while (val_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 		if (map->max_raw_read && map->max_raw_read < val_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 			read_len = map->max_raw_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 			read_len = val_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 		ret = _regmap_raw_read(map, reg, val, read_len, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 		val = ((u8 *)val) + read_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 		val_len -= read_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 	map->unlock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) EXPORT_SYMBOL_GPL(regmap_noinc_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881)  * regmap_field_read(): Read a value to a single register field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883)  * @field: Register field to read from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884)  * @val: Pointer to store read value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886)  * A value of zero will be returned on success, a negative errno will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887)  * be returned in error cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) int regmap_field_read(struct regmap_field *field, unsigned int *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 	unsigned int reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 	ret = regmap_read(field->regmap, field->reg, &reg_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 	if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 	reg_val &= field->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 	reg_val >>= field->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 	*val = reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) EXPORT_SYMBOL_GPL(regmap_field_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906)  * regmap_fields_read() - Read a value to a single register field with port ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908)  * @field: Register field to read from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909)  * @id: port ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910)  * @val: Pointer to store read value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912)  * A value of zero will be returned on success, a negative errno will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913)  * be returned in error cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) int regmap_fields_read(struct regmap_field *field, unsigned int id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 		       unsigned int *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 	unsigned int reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 	if (id >= field->id_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 	ret = regmap_read(field->regmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 			  field->reg + (field->id_offset * id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 			  &reg_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 	if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 	reg_val &= field->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 	reg_val >>= field->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 	*val = reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) EXPORT_SYMBOL_GPL(regmap_fields_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939)  * regmap_bulk_read() - Read multiple registers from the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941)  * @map: Register map to read from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942)  * @reg: First register to be read from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943)  * @val: Pointer to store read value, in native register size for device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944)  * @val_count: Number of registers to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946)  * A value of zero will be returned on success, a negative errno will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947)  * be returned in error cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 		     size_t val_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 	int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 	size_t val_bytes = map->format.val_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 	bool vol = regmap_volatile_range(map, reg, val_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 	if (!IS_ALIGNED(reg, map->reg_stride))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 	if (val_count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 	if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 		ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 		if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 		for (i = 0; i < val_count * val_bytes; i += val_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 			map->format.parse_inplace(val + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 		u64 *u64 = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 		u32 *u32 = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 		u16 *u16 = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 		u8 *u8 = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 		map->lock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 		for (i = 0; i < val_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 			unsigned int ival;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 			ret = _regmap_read(map, reg + regmap_get_offset(map, i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 					   &ival);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 			if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 			switch (map->format.val_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 			case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 				u64[i] = ival;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 			case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 				u32[i] = ival;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 			case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 				u16[i] = ival;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 			case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 				u8[i] = ival;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 				ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 		map->unlock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) EXPORT_SYMBOL_GPL(regmap_bulk_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) static int _regmap_update_bits(struct regmap *map, unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 			       unsigned int mask, unsigned int val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 			       bool *change, bool force_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 	unsigned int tmp, orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 	if (change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 		*change = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 	if (regmap_volatile(map, reg) && map->reg_update_bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 		ret = map->reg_update_bits(map->bus_context, reg, mask, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 		if (ret == 0 && change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 			*change = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 		ret = _regmap_read(map, reg, &orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 		if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 		tmp = orig & ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 		tmp |= val & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 		if (force_write || (tmp != orig)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 			ret = _regmap_write(map, reg, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 			if (ret == 0 && change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 				*change = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048)  * regmap_update_bits_base() - Perform a read/modify/write cycle on a register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050)  * @map: Register map to update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051)  * @reg: Register to update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052)  * @mask: Bitmask to change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053)  * @val: New value for bitmask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054)  * @change: Boolean indicating if a write was done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055)  * @async: Boolean indicating asynchronously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056)  * @force: Boolean indicating use force update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058)  * Perform a read/modify/write cycle on a register map with change, async, force
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059)  * options.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061)  * If async is true:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063)  * With most buses the read must be done synchronously so this is most useful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064)  * for devices with a cache which do not need to interact with the hardware to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065)  * determine the current register value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067)  * Returns zero for success, a negative number on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) int regmap_update_bits_base(struct regmap *map, unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 			    unsigned int mask, unsigned int val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 			    bool *change, bool async, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 	map->lock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 	map->async = async;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 	ret = _regmap_update_bits(map, reg, mask, val, change, force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 	map->async = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 	map->unlock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) EXPORT_SYMBOL_GPL(regmap_update_bits_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090)  * regmap_test_bits() - Check if all specified bits are set in a register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092)  * @map: Register map to operate on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093)  * @reg: Register to read from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094)  * @bits: Bits to test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096)  * Returns 0 if at least one of the tested bits is not set, 1 if all tested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097)  * bits are set and a negative error number if the underlying regmap_read()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098)  * fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 	unsigned int val, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 	ret = regmap_read(map, reg, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 	return (val & bits) == bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) EXPORT_SYMBOL_GPL(regmap_test_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) void regmap_async_complete_cb(struct regmap_async *async, int ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) 	struct regmap *map = async->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 	bool wake;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 	trace_regmap_async_io_complete(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 	spin_lock(&map->async_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 	list_move(&async->list, &map->async_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 	wake = list_empty(&map->async_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 	if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 		map->async_ret = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 	spin_unlock(&map->async_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 	if (wake)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 		wake_up(&map->async_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) static int regmap_async_is_done(struct regmap *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 	spin_lock_irqsave(&map->async_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 	ret = list_empty(&map->async_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 	spin_unlock_irqrestore(&map->async_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146)  * regmap_async_complete - Ensure all asynchronous I/O has completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148)  * @map: Map to operate on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150)  * Blocks until any pending asynchronous I/O has completed.  Returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151)  * an error code for any failed I/O operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) int regmap_async_complete(struct regmap *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 	/* Nothing to do with no async support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 	if (!map->bus || !map->bus->async_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 	trace_regmap_async_complete_start(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 	wait_event(map->async_waitq, regmap_async_is_done(map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) 	spin_lock_irqsave(&map->async_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 	ret = map->async_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 	map->async_ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) 	spin_unlock_irqrestore(&map->async_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) 	trace_regmap_async_complete_done(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) EXPORT_SYMBOL_GPL(regmap_async_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178)  * regmap_register_patch - Register and apply register updates to be applied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179)  *                         on device initialistion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181)  * @map: Register map to apply updates to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182)  * @regs: Values to update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183)  * @num_regs: Number of entries in regs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185)  * Register a set of register updates to be applied to the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186)  * whenever the device registers are synchronised with the cache and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187)  * apply them immediately.  Typically this is used to apply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188)  * corrections to be applied to the device defaults on startup, such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189)  * as the updates some vendors provide to undocumented registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191)  * The caller must ensure that this function cannot be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192)  * concurrently with either itself or regcache_sync().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 			  int num_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 	struct reg_sequence *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 	bool bypass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 	if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) 	    num_regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 	p = krealloc(map->patch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 		     sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 		     GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 	if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 		memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 		map->patch = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 		map->patch_regs += num_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) 	map->lock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) 	bypass = map->cache_bypass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 	map->cache_bypass = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 	map->async = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 	ret = _regmap_multi_reg_write(map, regs, num_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 	map->async = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 	map->cache_bypass = bypass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 	map->unlock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 	regmap_async_complete(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) EXPORT_SYMBOL_GPL(regmap_register_patch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237)  * regmap_get_val_bytes() - Report the size of a register value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239)  * @map: Register map to operate on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241)  * Report the size of a register value, mainly intended to for use by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242)  * generic infrastructure built on top of regmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) int regmap_get_val_bytes(struct regmap *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 	if (map->format.format_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 	return map->format.val_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254)  * regmap_get_max_register() - Report the max register value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256)  * @map: Register map to operate on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258)  * Report the max register value, mainly intended to for use by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259)  * generic infrastructure built on top of regmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) int regmap_get_max_register(struct regmap *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 	return map->max_register ? map->max_register : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) EXPORT_SYMBOL_GPL(regmap_get_max_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268)  * regmap_get_reg_stride() - Report the register address stride
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270)  * @map: Register map to operate on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272)  * Report the register address stride, mainly intended to for use by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273)  * generic infrastructure built on top of regmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) int regmap_get_reg_stride(struct regmap *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 	return map->reg_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) int regmap_parse_val(struct regmap *map, const void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 			unsigned int *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 	if (!map->format.parse_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 	*val = map->format.parse_val(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) EXPORT_SYMBOL_GPL(regmap_parse_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) static int __init regmap_initcall(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 	regmap_debugfs_initcall();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) postcore_initcall(regmap_initcall);