Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) // Register map access API - debugfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) // Copyright 2011 Wolfson Microelectronics plc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) // Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) struct regmap_debugfs_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	struct regmap *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	struct list_head link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) static unsigned int dummy_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) static struct dentry *regmap_debugfs_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) static LIST_HEAD(regmap_debugfs_early_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) static DEFINE_MUTEX(regmap_debugfs_early_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) /* Calculate the length of a fixed format  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) static size_t regmap_calc_reg_len(int max_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	return snprintf(NULL, 0, "%x", max_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) static ssize_t regmap_name_read_file(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 				     char __user *user_buf, size_t count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 				     loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	struct regmap *map = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	const char *name = "nodev";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	if (map->dev && map->dev->driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 		name = map->dev->driver->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	ret = snprintf(buf, PAGE_SIZE, "%s\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) static const struct file_operations regmap_name_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	.open = simple_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	.read = regmap_name_read_file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	.llseek = default_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) static void regmap_debugfs_free_dump_cache(struct regmap *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	struct regmap_debugfs_off_cache *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	while (!list_empty(&map->debugfs_off_cache)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		c = list_first_entry(&map->debugfs_off_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 				     struct regmap_debugfs_off_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 				     list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		list_del(&c->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		kfree(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) static bool regmap_printable(struct regmap *map, unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	if (regmap_precious(map, reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	if (!regmap_readable(map, reg) && !regmap_cached(map, reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  * Work out where the start offset maps into register numbers, bearing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  * in mind that we suppress hidden registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 						  unsigned int base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 						  loff_t from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 						  loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	struct regmap_debugfs_off_cache *c = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	loff_t p = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	unsigned int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	unsigned int fpos_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	unsigned int reg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	/* Suppress the cache if we're using a subrange */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	if (base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		return base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	 * If we don't have a cache build one so we don't have to do a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	 * linear scan each time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	mutex_lock(&map->cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	i = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	if (list_empty(&map->debugfs_off_cache)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		for (; i <= map->max_register; i += map->reg_stride) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 			/* Skip unprinted registers, closing off cache entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 			if (!regmap_printable(map, i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 				if (c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 					c->max = p - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 					c->max_reg = i - map->reg_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 					list_add_tail(&c->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 						      &map->debugfs_off_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 					c = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 			/* No cache entry?  Start a new one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 			if (!c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 				c = kzalloc(sizeof(*c), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 				if (!c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 					regmap_debugfs_free_dump_cache(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 					mutex_unlock(&map->cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 					return base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 				c->min = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 				c->base_reg = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 			p += map->debugfs_tot_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	/* Close the last entry off if we didn't scan beyond it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	if (c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		c->max = p - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		c->max_reg = i - map->reg_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		list_add_tail(&c->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 			      &map->debugfs_off_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	 * This should never happen; we return above if we fail to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	 * allocate and we should never be in this code if there are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	 * no registers at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	WARN_ON(list_empty(&map->debugfs_off_cache));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	ret = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	/* Find the relevant block:offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	list_for_each_entry(c, &map->debugfs_off_cache, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		if (from >= c->min && from <= c->max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 			fpos_offset = from - c->min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 			reg_offset = fpos_offset / map->debugfs_tot_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 			*pos = c->min + (reg_offset * map->debugfs_tot_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 			mutex_unlock(&map->cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 			return c->base_reg + (reg_offset * map->reg_stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		*pos = c->max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		ret = c->max_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	mutex_unlock(&map->cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static inline void regmap_calc_tot_len(struct regmap *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 				       void *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	/* Calculate the length of a fixed format  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	if (!map->debugfs_tot_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		map->debugfs_reg_len = regmap_calc_reg_len(map->max_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		map->debugfs_val_len = 2 * map->format.val_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		map->debugfs_tot_len = map->debugfs_reg_len +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 			map->debugfs_val_len + 3;      /* : \n */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static int regmap_next_readable_reg(struct regmap *map, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	struct regmap_debugfs_off_cache *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	if (regmap_printable(map, reg + map->reg_stride)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		ret = reg + map->reg_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		mutex_lock(&map->cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		list_for_each_entry(c, &map->debugfs_off_cache, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 			if (reg > c->max_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 			if (reg < c->base_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 				ret = c->base_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		mutex_unlock(&map->cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 				   unsigned int to, char __user *user_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 				   size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	size_t buf_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	loff_t p = *ppos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	unsigned int val, start_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	if (*ppos < 0 || !count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		count = PAGE_SIZE << (MAX_ORDER - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	buf = kmalloc(count, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	regmap_calc_tot_len(map, buf, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	/* Work out which register we're starting at */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	for (i = start_reg; i >= 0 && i <= to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	     i = regmap_next_readable_reg(map, i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		/* If we're in the region the user is trying to read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		if (p >= *ppos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 			/* ...but not beyond it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 			if (buf_pos + map->debugfs_tot_len > count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 			/* Format the register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 			snprintf(buf + buf_pos, count - buf_pos, "%.*x: ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 				 map->debugfs_reg_len, i - from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 			buf_pos += map->debugfs_reg_len + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 			/* Format the value, write all X if we can't read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 			ret = regmap_read(map, i, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 			if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 				snprintf(buf + buf_pos, count - buf_pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 					 "%.*x", map->debugfs_val_len, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 				memset(buf + buf_pos, 'X',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 				       map->debugfs_val_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 			buf_pos += 2 * map->format.val_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 			buf[buf_pos++] = '\n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		p += map->debugfs_tot_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	ret = buf_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	if (copy_to_user(user_buf, buf, buf_pos)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	*ppos += buf_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 				    size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	struct regmap *map = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	return regmap_read_debugfs(map, 0, map->max_register, user_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 				   count, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) #undef REGMAP_ALLOW_WRITE_DEBUGFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) #ifdef REGMAP_ALLOW_WRITE_DEBUGFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)  * This can be dangerous especially when we have clients such as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)  * PMICs, therefore don't provide any real compile time configuration option
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)  * for this feature, people who want to use this will need to modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)  * the source code directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static ssize_t regmap_map_write_file(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 				     const char __user *user_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 				     size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	char buf[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	size_t buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	char *start = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	unsigned long reg, value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	struct regmap *map = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	buf_size = min(count, (sizeof(buf)-1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	if (copy_from_user(buf, user_buf, buf_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	buf[buf_size] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	while (*start == ' ')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		start++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	reg = simple_strtoul(start, &start, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	while (*start == ' ')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		start++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	if (kstrtoul(start, 16, &value))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	/* Userspace has been fiddling around behind the kernel's back */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	ret = regmap_write(map, reg, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	return buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) #define regmap_map_write_file NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static const struct file_operations regmap_map_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	.open = simple_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	.read = regmap_map_read_file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	.write = regmap_map_write_file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	.llseek = default_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static ssize_t regmap_range_read_file(struct file *file, char __user *user_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 				      size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	struct regmap_range_node *range = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	struct regmap *map = range->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	return regmap_read_debugfs(map, range->range_min, range->range_max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 				   user_buf, count, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static const struct file_operations regmap_range_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	.open = simple_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	.read = regmap_range_read_file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	.llseek = default_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static ssize_t regmap_reg_ranges_read_file(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 					   char __user *user_buf, size_t count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 					   loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	struct regmap *map = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	struct regmap_debugfs_off_cache *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	loff_t p = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	size_t buf_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	char *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	unsigned entry_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	if (*ppos < 0 || !count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		count = PAGE_SIZE << (MAX_ORDER - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	buf = kmalloc(count, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	entry = kmalloc(PAGE_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	if (!entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 		kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	/* While we are at it, build the register dump cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	 * now so the read() operation on the `registers' file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	 * can benefit from using the cache.  We do not care
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	 * about the file position information that is contained
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	 * in the cache, just about the actual register blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	regmap_calc_tot_len(map, buf, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	regmap_debugfs_get_dump_start(map, 0, *ppos, &p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	/* Reset file pointer as the fixed-format of the `registers'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	 * file is not compatible with the `range' file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	p = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	mutex_lock(&map->cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	list_for_each_entry(c, &map->debugfs_off_cache, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		entry_len = snprintf(entry, PAGE_SIZE, "%x-%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 				     c->base_reg, c->max_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		if (p >= *ppos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 			if (buf_pos + entry_len > count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 			memcpy(buf + buf_pos, entry, entry_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 			buf_pos += entry_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		p += entry_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	mutex_unlock(&map->cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	kfree(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	ret = buf_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	if (copy_to_user(user_buf, buf, buf_pos)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		goto out_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	*ppos += buf_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) out_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static const struct file_operations regmap_reg_ranges_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	.open = simple_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	.read = regmap_reg_ranges_read_file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	.llseek = default_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static int regmap_access_show(struct seq_file *s, void *ignored)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	struct regmap *map = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	int i, reg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	reg_len = regmap_calc_reg_len(map->max_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	for (i = 0; i <= map->max_register; i += map->reg_stride) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		/* Ignore registers which are neither readable nor writable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		if (!regmap_readable(map, i) && !regmap_writeable(map, i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 		/* Format the register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		seq_printf(s, "%.*x: %c %c %c %c\n", reg_len, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 			   regmap_readable(map, i) ? 'y' : 'n',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 			   regmap_writeable(map, i) ? 'y' : 'n',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 			   regmap_volatile(map, i) ? 'y' : 'n',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 			   regmap_precious(map, i) ? 'y' : 'n');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) DEFINE_SHOW_ATTRIBUTE(regmap_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static ssize_t regmap_cache_only_write_file(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 					    const char __user *user_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 					    size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	struct regmap *map = container_of(file->private_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 					  struct regmap, cache_only);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	bool new_val, require_sync = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	err = kstrtobool_from_user(user_buf, count, &new_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	/* Ignore malforned data like debugfs_write_file_bool() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	err = debugfs_file_get(file->f_path.dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	map->lock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	if (new_val && !map->cache_only) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		dev_warn(map->dev, "debugfs cache_only=Y forced\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		add_taint(TAINT_USER, LOCKDEP_STILL_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	} else if (!new_val && map->cache_only) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 		dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 		require_sync = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	map->cache_only = new_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	map->unlock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	debugfs_file_put(file->f_path.dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	if (require_sync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 		err = regcache_sync(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 			dev_err(map->dev, "Failed to sync cache %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) static const struct file_operations regmap_cache_only_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	.open = simple_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	.read = debugfs_read_file_bool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	.write = regmap_cache_only_write_file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) static ssize_t regmap_cache_bypass_write_file(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 					      const char __user *user_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 					      size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	struct regmap *map = container_of(file->private_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 					  struct regmap, cache_bypass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	bool new_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	err = kstrtobool_from_user(user_buf, count, &new_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	/* Ignore malforned data like debugfs_write_file_bool() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 		return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	err = debugfs_file_get(file->f_path.dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	map->lock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	if (new_val && !map->cache_bypass) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		dev_warn(map->dev, "debugfs cache_bypass=Y forced\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 		add_taint(TAINT_USER, LOCKDEP_STILL_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	} else if (!new_val && map->cache_bypass) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 		dev_warn(map->dev, "debugfs cache_bypass=N forced\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	map->cache_bypass = new_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	map->unlock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	debugfs_file_put(file->f_path.dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) static const struct file_operations regmap_cache_bypass_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	.open = simple_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	.read = debugfs_read_file_bool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	.write = regmap_cache_bypass_write_file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) void regmap_debugfs_init(struct regmap *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	struct rb_node *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	struct regmap_range_node *range_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	const char *devname = "dummy";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	const char *name = map->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	 * Userspace can initiate reads from the hardware over debugfs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	 * Normally internal regmap structures and buffers are protected with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	 * a mutex or a spinlock, but if the regmap owner decided to disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	 * all locking mechanisms, this is no longer the case. For safety:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	 * don't create the debugfs entries if locking is disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	if (map->debugfs_disable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 		dev_dbg(map->dev, "regmap locking disabled - not creating debugfs entries\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	/* If we don't have the debugfs root yet, postpone init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	if (!regmap_debugfs_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 		struct regmap_debugfs_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		node = kzalloc(sizeof(*node), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 		if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 		node->map = map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 		mutex_lock(&regmap_debugfs_early_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 		list_add(&node->link, &regmap_debugfs_early_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 		mutex_unlock(&regmap_debugfs_early_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	INIT_LIST_HEAD(&map->debugfs_off_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	mutex_init(&map->cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	if (map->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 		devname = dev_name(map->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	if (name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 		if (!map->debugfs_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 			map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 					      devname, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 			if (!map->debugfs_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 		name = map->debugfs_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 		name = devname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	if (!strcmp(name, "dummy")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 		kfree(map->debugfs_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 		map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 						dummy_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 		if (!map->debugfs_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 		name = map->debugfs_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 		dummy_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	map->debugfs = debugfs_create_dir(name, regmap_debugfs_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	debugfs_create_file("name", 0400, map->debugfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 			    map, &regmap_name_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	debugfs_create_file("range", 0400, map->debugfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 			    map, &regmap_reg_ranges_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	if (map->max_register || regmap_readable(map, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 		umode_t registers_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) #if defined(REGMAP_ALLOW_WRITE_DEBUGFS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 		registers_mode = 0600;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 		registers_mode = 0400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 		debugfs_create_file("registers", registers_mode, map->debugfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 				    map, &regmap_map_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 		debugfs_create_file("access", 0400, map->debugfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 				    map, &regmap_access_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	if (map->cache_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 		debugfs_create_file("cache_only", 0600, map->debugfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 				    &map->cache_only, &regmap_cache_only_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 		debugfs_create_bool("cache_dirty", 0400, map->debugfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 				    &map->cache_dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 		debugfs_create_file("cache_bypass", 0600, map->debugfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 				    &map->cache_bypass,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 				    &regmap_cache_bypass_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	next = rb_first(&map->range_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	while (next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 		range_node = rb_entry(next, struct regmap_range_node, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 		if (range_node->name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 			debugfs_create_file(range_node->name, 0400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 					    map->debugfs, range_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 					    &regmap_range_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 		next = rb_next(&range_node->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	if (map->cache_ops && map->cache_ops->debugfs_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 		map->cache_ops->debugfs_init(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) void regmap_debugfs_exit(struct regmap *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	if (map->debugfs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 		debugfs_remove_recursive(map->debugfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 		mutex_lock(&map->cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 		regmap_debugfs_free_dump_cache(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 		mutex_unlock(&map->cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 		kfree(map->debugfs_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 		map->debugfs_name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 		struct regmap_debugfs_node *node, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 		mutex_lock(&regmap_debugfs_early_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 		list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 					 link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 			if (node->map == map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 				list_del(&node->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 				kfree(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 		mutex_unlock(&regmap_debugfs_early_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) void regmap_debugfs_initcall(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 	struct regmap_debugfs_node *node, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 	regmap_debugfs_root = debugfs_create_dir("regmap", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	mutex_lock(&regmap_debugfs_early_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 	list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 		regmap_debugfs_init(node->map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 		list_del(&node->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 		kfree(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 	mutex_unlock(&regmap_debugfs_early_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }