^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) // Register cache access API - rbtree caching support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) // Copyright 2011 Wolfson Microelectronics plc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) // Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/rbtree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) unsigned int value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static int regcache_rbtree_exit(struct regmap *map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct regcache_rbtree_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /* block of adjacent registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) void *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /* Which registers are present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) long *cache_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /* base register handled by this block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) unsigned int base_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /* number of registers available in the block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) unsigned int blklen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /* the actual rbtree node holding this block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct rb_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct regcache_rbtree_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct rb_root root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct regcache_rbtree_node *cached_rbnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static inline void regcache_rbtree_get_base_top_reg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct regmap *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct regcache_rbtree_node *rbnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) unsigned int *base, unsigned int *top)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) *base = rbnode->base_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) *top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static unsigned int regcache_rbtree_get_register(struct regmap *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct regcache_rbtree_node *rbnode, unsigned int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return regcache_get_val(map, rbnode->block, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static void regcache_rbtree_set_register(struct regmap *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct regcache_rbtree_node *rbnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) unsigned int idx, unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) set_bit(idx, rbnode->cache_present);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) regcache_set_val(map, rbnode->block, idx, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct rb_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct regcache_rbtree_node *rbnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) unsigned int base_reg, top_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) rbnode = rbtree_ctx->cached_rbnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (rbnode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) &top_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (reg >= base_reg && reg <= top_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return rbnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) node = rbtree_ctx->root.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) while (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) rbnode = rb_entry(node, struct regcache_rbtree_node, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) &top_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (reg >= base_reg && reg <= top_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) rbtree_ctx->cached_rbnode = rbnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return rbnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) } else if (reg > top_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) node = node->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) } else if (reg < base_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) node = node->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct regcache_rbtree_node *rbnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct rb_node **new, *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct regcache_rbtree_node *rbnode_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unsigned int base_reg_tmp, top_reg_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) unsigned int base_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) new = &root->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) while (*new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) rbnode_tmp = rb_entry(*new, struct regcache_rbtree_node, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* base and top registers of the current rbnode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) &top_reg_tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /* base register of the rbnode to be added */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) base_reg = rbnode->base_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) parent = *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* if this register has already been inserted, just return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (base_reg >= base_reg_tmp &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) base_reg <= top_reg_tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) else if (base_reg > top_reg_tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) new = &((*new)->rb_right);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) else if (base_reg < base_reg_tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) new = &((*new)->rb_left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /* insert the node into the rbtree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) rb_link_node(&rbnode->node, parent, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) rb_insert_color(&rbnode->node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static int rbtree_show(struct seq_file *s, void *ignored)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct regmap *map = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct regcache_rbtree_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct rb_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) unsigned int base, top;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) size_t mem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) int nodes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) int registers = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) int this_registers, average;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) map->lock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) mem_size = sizeof(*rbtree_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) for (node = rb_first(&rbtree_ctx->root); node != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) node = rb_next(node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) n = rb_entry(node, struct regcache_rbtree_node, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) mem_size += sizeof(*n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) mem_size += (n->blklen * map->cache_word_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) regcache_rbtree_get_base_top_reg(map, n, &base, &top);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) this_registers = ((top - base) / map->reg_stride) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) seq_printf(s, "%x-%x (%d)\n", base, top, this_registers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) nodes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) registers += this_registers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (nodes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) average = registers / nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) average = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) nodes, registers, average, mem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) map->unlock(map->lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) DEFINE_SHOW_ATTRIBUTE(rbtree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static void rbtree_debugfs_init(struct regmap *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static int regcache_rbtree_init(struct regmap *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct regcache_rbtree_ctx *rbtree_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (!map->cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) rbtree_ctx = map->cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) rbtree_ctx->root = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) rbtree_ctx->cached_rbnode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) for (i = 0; i < map->num_reg_defaults; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) ret = regcache_rbtree_write(map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) map->reg_defaults[i].reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) map->reg_defaults[i].def);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) regcache_rbtree_exit(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static int regcache_rbtree_exit(struct regmap *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct rb_node *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct regcache_rbtree_ctx *rbtree_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct regcache_rbtree_node *rbtree_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* if we've already been called then just return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) rbtree_ctx = map->cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (!rbtree_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /* free up the rbtree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) next = rb_first(&rbtree_ctx->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) while (next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) rbtree_node = rb_entry(next, struct regcache_rbtree_node, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) next = rb_next(&rbtree_node->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) rb_erase(&rbtree_node->node, &rbtree_ctx->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) kfree(rbtree_node->cache_present);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) kfree(rbtree_node->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) kfree(rbtree_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* release the resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) kfree(map->cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) map->cache = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static int regcache_rbtree_read(struct regmap *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) unsigned int reg, unsigned int *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct regcache_rbtree_node *rbnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) unsigned int reg_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) rbnode = regcache_rbtree_lookup(map, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (rbnode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (!test_bit(reg_tmp, rbnode->cache_present))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) *value = regcache_rbtree_get_register(map, rbnode, reg_tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static int regcache_rbtree_insert_to_block(struct regmap *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct regcache_rbtree_node *rbnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) unsigned int base_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) unsigned int top_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) unsigned int value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) unsigned int blklen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) unsigned int pos, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) unsigned long *present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) u8 *blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) blklen = (top_reg - base_reg) / map->reg_stride + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) pos = (reg - base_reg) / map->reg_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) offset = (rbnode->base_reg - base_reg) / map->reg_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) blk = krealloc(rbnode->block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) blklen * map->cache_word_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (!blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) rbnode->block = blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) present = krealloc(rbnode->cache_present,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) BITS_TO_LONGS(blklen) * sizeof(*present),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (!present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * sizeof(*present));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) present = rbnode->cache_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /* insert the register value in the correct place in the rbnode block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (pos == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) memmove(blk + offset * map->cache_word_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) blk, rbnode->blklen * map->cache_word_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) bitmap_shift_left(present, present, offset, blklen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /* update the rbnode block, its size and the base register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) rbnode->blklen = blklen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) rbnode->base_reg = base_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) rbnode->cache_present = present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) regcache_rbtree_set_register(map, rbnode, pos, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) static struct regcache_rbtree_node *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct regcache_rbtree_node *rbnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) const struct regmap_range *range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (!rbnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /* If there is a read table then use it to guess at an allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (map->rd_table) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) for (i = 0; i < map->rd_table->n_yes_ranges; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (regmap_reg_in_range(reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) &map->rd_table->yes_ranges[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (i != map->rd_table->n_yes_ranges) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) range = &map->rd_table->yes_ranges[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) rbnode->blklen = (range->range_max - range->range_min) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) map->reg_stride + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) rbnode->base_reg = range->range_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (!rbnode->blklen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) rbnode->blklen = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) rbnode->base_reg = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (!rbnode->block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) rbnode->cache_present = kcalloc(BITS_TO_LONGS(rbnode->blklen),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) sizeof(*rbnode->cache_present),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (!rbnode->cache_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) goto err_free_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) return rbnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) err_free_block:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) kfree(rbnode->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) kfree(rbnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) unsigned int value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct regcache_rbtree_ctx *rbtree_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct regcache_rbtree_node *rbnode, *rbnode_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) struct rb_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) unsigned int reg_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) rbtree_ctx = map->cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /* if we can't locate it in the cached rbnode we'll have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * to traverse the rbtree looking for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) rbnode = regcache_rbtree_lookup(map, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (rbnode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) regcache_rbtree_set_register(map, rbnode, reg_tmp, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) unsigned int base_reg, top_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) unsigned int new_base_reg, new_top_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) unsigned int min, max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) unsigned int max_dist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) unsigned int dist, best_dist = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) max_dist = map->reg_stride * sizeof(*rbnode_tmp) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) map->cache_word_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (reg < max_dist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) min = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) min = reg - max_dist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) max = reg + max_dist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /* look for an adjacent register to the one we are about to add */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) node = rbtree_ctx->root.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) while (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) rbnode_tmp = rb_entry(node, struct regcache_rbtree_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) regcache_rbtree_get_base_top_reg(map, rbnode_tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) &base_reg, &top_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (base_reg <= max && top_reg >= min) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (reg < base_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) dist = base_reg - reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) else if (reg > top_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) dist = reg - top_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) dist = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (dist < best_dist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) rbnode = rbnode_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) best_dist = dist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) new_base_reg = min(reg, base_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) new_top_reg = max(reg, top_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * Keep looking, we want to choose the closest block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * otherwise we might end up creating overlapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * blocks, which breaks the rbtree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (reg < base_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) node = node->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) else if (reg > top_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) node = node->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (rbnode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) ret = regcache_rbtree_insert_to_block(map, rbnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) new_base_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) new_top_reg, reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) rbtree_ctx->cached_rbnode = rbnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /* We did not manage to find a place to insert it in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * an existing block so create a new rbnode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) rbnode = regcache_rbtree_node_alloc(map, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (!rbnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) regcache_rbtree_set_register(map, rbnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) reg - rbnode->base_reg, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) rbtree_ctx->cached_rbnode = rbnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) unsigned int max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct regcache_rbtree_ctx *rbtree_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct rb_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) struct regcache_rbtree_node *rbnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) unsigned int base_reg, top_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) unsigned int start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) rbtree_ctx = map->cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) rbnode = rb_entry(node, struct regcache_rbtree_node, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) &top_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (base_reg > max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (top_reg < min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (min > base_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) start = (min - base_reg) / map->reg_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (max < top_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) end = (max - base_reg) / map->reg_stride + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) end = rbnode->blklen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) ret = regcache_sync_block(map, rbnode->block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) rbnode->cache_present,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) rbnode->base_reg, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return regmap_async_complete(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) static int regcache_rbtree_drop(struct regmap *map, unsigned int min,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) unsigned int max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct regcache_rbtree_ctx *rbtree_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) struct regcache_rbtree_node *rbnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct rb_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) unsigned int base_reg, top_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) unsigned int start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) rbtree_ctx = map->cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) rbnode = rb_entry(node, struct regcache_rbtree_node, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) &top_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (base_reg > max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (top_reg < min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (min > base_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) start = (min - base_reg) / map->reg_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (max < top_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) end = (max - base_reg) / map->reg_stride + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) end = rbnode->blklen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) bitmap_clear(rbnode->cache_present, start, end - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) struct regcache_ops regcache_rbtree_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) .type = REGCACHE_RBTREE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) .name = "rbtree",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) .init = regcache_rbtree_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) .exit = regcache_rbtree_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) .debugfs_init = rbtree_debugfs_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) .read = regcache_rbtree_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) .write = regcache_rbtree_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) .sync = regcache_rbtree_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) .drop = regcache_rbtree_drop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) };