Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)    lru_cache.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)    Copyright (C) 2003-2008, LINBIT Information Technologies GmbH.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)    Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)    Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/string.h> /* for memset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/seq_file.h> /* for seq_printf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/lru_cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	      "Lars Ellenberg <lars@linbit.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) MODULE_DESCRIPTION("lru_cache - Track sets of hot objects");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) /* this is developers aid only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * it catches concurrent access (lack of locking on the users part) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define PARANOIA_ENTRY() do {		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	BUG_ON(!lc);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	BUG_ON(!lc->nr_elements);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	BUG_ON(test_and_set_bit(__LC_PARANOIA, &lc->flags)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define RETURN(x...)     do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	clear_bit_unlock(__LC_PARANOIA, &lc->flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	return x ; } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) /* BUG() if e is not one of the elements tracked by lc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define PARANOIA_LC_ELEMENT(lc, e) do {	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	struct lru_cache *lc_ = (lc);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	struct lc_element *e_ = (e);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	unsigned i = e_->lc_index;	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	BUG_ON(i >= lc_->nr_elements);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	BUG_ON(lc_->lc_element[i] != e_); } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) /* We need to atomically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  *  - try to grab the lock (set LC_LOCKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  *  - only if there is no pending transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  *    (neither LC_DIRTY nor LC_STARVING is set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  * Because of PARANOIA_ENTRY() above abusing lc->flags as well,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  * it is not sufficient to just say
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  *	return 0 == cmpxchg(&lc->flags, 0, LC_LOCKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) int lc_try_lock(struct lru_cache *lc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		val = cmpxchg(&lc->flags, 0, LC_LOCKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	} while (unlikely (val == LC_PARANOIA));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	/* Spin until no-one is inside a PARANOIA_ENTRY()/RETURN() section. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	return 0 == val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	/* Alternative approach, spin in case someone enters or leaves a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	 * PARANOIA_ENTRY()/RETURN() section. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	unsigned long old, new, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		old = lc->flags & LC_PARANOIA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 		new = old | LC_LOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		val = cmpxchg(&lc->flags, old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	} while (unlikely (val == (old ^ LC_PARANOIA)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	return old == val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  * lc_create - prepares to track objects in an active set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78)  * @name: descriptive name only used in lc_seq_printf_stats and lc_seq_dump_details
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79)  * @max_pending_changes: maximum changes to accumulate until a transaction is required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80)  * @e_count: number of elements allowed to be active simultaneously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81)  * @e_size: size of the tracked objects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82)  * @e_off: offset to the &struct lc_element member in a tracked object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)  * Returns a pointer to a newly initialized struct lru_cache on success,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  * or NULL on (allocation) failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		unsigned max_pending_changes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		unsigned e_count, size_t e_size, size_t e_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	struct hlist_head *slot = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	struct lc_element **element = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	struct lru_cache *lc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	struct lc_element *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	unsigned cache_obj_size = kmem_cache_size(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	WARN_ON(cache_obj_size < e_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	if (cache_obj_size < e_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	/* e_count too big; would probably fail the allocation below anyways.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	 * for typical use cases, e_count should be few thousand at most. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	if (e_count > LC_MAX_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	slot = kcalloc(e_count, sizeof(struct hlist_head), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	if (!slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		goto out_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	element = kcalloc(e_count, sizeof(struct lc_element *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	if (!element)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		goto out_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	lc = kzalloc(sizeof(*lc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	if (!lc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		goto out_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	INIT_LIST_HEAD(&lc->in_use);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	INIT_LIST_HEAD(&lc->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	INIT_LIST_HEAD(&lc->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	INIT_LIST_HEAD(&lc->to_be_changed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	lc->name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	lc->element_size = e_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	lc->element_off = e_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	lc->nr_elements = e_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	lc->max_pending_changes = max_pending_changes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	lc->lc_cache = cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	lc->lc_element = element;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	lc->lc_slot = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	/* preallocate all objects */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	for (i = 0; i < e_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		void *p = kmem_cache_alloc(cache, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		memset(p, 0, lc->element_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		e = p + e_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		e->lc_index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		e->lc_number = LC_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		e->lc_new_number = LC_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		list_add(&e->list, &lc->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		element[i] = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	if (i == e_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		return lc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	/* else: could not allocate all elements, give up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	for (i--; i; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		void *p = element[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		kmem_cache_free(cache, p - e_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	kfree(lc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) out_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	kfree(element);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	kfree(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static void lc_free_by_index(struct lru_cache *lc, unsigned i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	void *p = lc->lc_element[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	WARN_ON(!p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		p -= lc->element_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		kmem_cache_free(lc->lc_cache, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  * lc_destroy - frees memory allocated by lc_create()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  * @lc: the lru cache to destroy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) void lc_destroy(struct lru_cache *lc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	if (!lc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	for (i = 0; i < lc->nr_elements; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		lc_free_by_index(lc, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	kfree(lc->lc_element);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	kfree(lc->lc_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	kfree(lc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  * lc_reset - does a full reset for @lc and the hash table slots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)  * @lc: the lru cache to operate on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)  * It is roughly the equivalent of re-allocating a fresh lru_cache object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)  * basically a short cut to lc_destroy(lc); lc = lc_create(...);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) void lc_reset(struct lru_cache *lc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	INIT_LIST_HEAD(&lc->in_use);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	INIT_LIST_HEAD(&lc->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	INIT_LIST_HEAD(&lc->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	INIT_LIST_HEAD(&lc->to_be_changed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	lc->used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	lc->hits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	lc->misses = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	lc->starving = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	lc->locked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	lc->changed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	lc->pending_changes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	lc->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	memset(lc->lc_slot, 0, sizeof(struct hlist_head) * lc->nr_elements);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	for (i = 0; i < lc->nr_elements; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		struct lc_element *e = lc->lc_element[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		void *p = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		p -= lc->element_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		memset(p, 0, lc->element_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		/* re-init it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		e->lc_index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		e->lc_number = LC_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		e->lc_new_number = LC_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		list_add(&e->list, &lc->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)  * lc_seq_printf_stats - print stats about @lc into @seq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)  * @seq: the seq_file to print into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)  * @lc: the lru cache to print statistics of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) void lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	/* NOTE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	 * total calls to lc_get are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	 * (starving + hits + misses)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	 * misses include "locked" count (update from an other thread in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	 * progress) and "changed", when this in fact lead to an successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	 * update of the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	seq_printf(seq, "\t%s: used:%u/%u hits:%lu misses:%lu starving:%lu locked:%lu changed:%lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		   lc->name, lc->used, lc->nr_elements,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		   lc->hits, lc->misses, lc->starving, lc->locked, lc->changed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	return  lc->lc_slot + (enr % lc->nr_elements);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static struct lc_element *__lc_find(struct lru_cache *lc, unsigned int enr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		bool include_changing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	struct lc_element *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	BUG_ON(!lc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	BUG_ON(!lc->nr_elements);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	hlist_for_each_entry(e, lc_hash_slot(lc, enr), colision) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		/* "about to be changed" elements, pending transaction commit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		 * are hashed by their "new number". "Normal" elements have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		 * lc_number == lc_new_number. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		if (e->lc_new_number != enr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		if (e->lc_new_number == e->lc_number || include_changing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 			return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)  * lc_find - find element by label, if present in the hash table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)  * @lc: The lru_cache object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)  * @enr: element number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)  * Returns the pointer to an element, if the element with the requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)  * "label" or element number is present in the hash table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)  * or NULL if not found. Does not change the refcnt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)  * Ignores elements that are "about to be used", i.e. not yet in the active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)  * set, but still pending transaction commit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	return __lc_find(lc, enr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)  * lc_is_used - find element by label
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)  * @lc: The lru_cache object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)  * @enr: element number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)  * Returns true, if the element with the requested "label" or element number is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)  * present in the hash table, and is used (refcnt > 0).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)  * Also finds elements that are not _currently_ used but only "about to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)  * used", i.e. on the "to_be_changed" list, pending transaction commit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) bool lc_is_used(struct lru_cache *lc, unsigned int enr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	struct lc_element *e = __lc_find(lc, enr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	return e && e->refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)  * lc_del - removes an element from the cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)  * @lc: The lru_cache object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)  * @e: The element to remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)  * @e must be unused (refcnt == 0). Moves @e from "lru" to "free" list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)  * sets @e->enr to %LC_FREE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) void lc_del(struct lru_cache *lc, struct lc_element *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	PARANOIA_ENTRY();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	PARANOIA_LC_ELEMENT(lc, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	BUG_ON(e->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	e->lc_number = e->lc_new_number = LC_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	hlist_del_init(&e->colision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	list_move(&e->list, &lc->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	RETURN();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) static struct lc_element *lc_prepare_for_change(struct lru_cache *lc, unsigned new_number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	struct list_head *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	struct lc_element *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	if (!list_empty(&lc->free))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		n = lc->free.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	else if (!list_empty(&lc->lru))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		n = lc->lru.prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	e = list_entry(n, struct lc_element, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	PARANOIA_LC_ELEMENT(lc, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	e->lc_new_number = new_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	if (!hlist_unhashed(&e->colision))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		__hlist_del(&e->colision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	hlist_add_head(&e->colision, lc_hash_slot(lc, new_number));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	list_move(&e->list, &lc->to_be_changed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static int lc_unused_element_available(struct lru_cache *lc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	if (!list_empty(&lc->free))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		return 1; /* something on the free list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	if (!list_empty(&lc->lru))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		return 1;  /* something to evict */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /* used as internal flags to __lc_get */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	LC_GET_MAY_CHANGE = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	LC_GET_MAY_USE_UNCOMMITTED = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	struct lc_element *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	PARANOIA_ENTRY();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	if (lc->flags & LC_STARVING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		++lc->starving;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		RETURN(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	e = __lc_find(lc, enr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	/* if lc_new_number != lc_number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	 * this enr is currently being pulled in already,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	 * and will be available once the pending transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	 * has been committed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	if (e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		if (e->lc_new_number != e->lc_number) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 			/* It has been found above, but on the "to_be_changed"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 			 * list, not yet committed.  Don't pull it in twice,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 			 * wait for the transaction, then try again...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 			if (!(flags & LC_GET_MAY_USE_UNCOMMITTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 				RETURN(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 			/* ... unless the caller is aware of the implications,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 			 * probably preparing a cumulative transaction. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 			++e->refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 			++lc->hits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 			RETURN(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		/* else: lc_new_number == lc_number; a real hit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		++lc->hits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		if (e->refcnt++ == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 			lc->used++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		list_move(&e->list, &lc->in_use); /* Not evictable... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		RETURN(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	/* e == NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	++lc->misses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	if (!(flags & LC_GET_MAY_CHANGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		RETURN(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	/* To avoid races with lc_try_lock(), first, mark us dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	 * (using test_and_set_bit, as it implies memory barriers), ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	test_and_set_bit(__LC_DIRTY, &lc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	/* ... only then check if it is locked anyways. If lc_unlock clears
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	 * the dirty bit again, that's not a problem, we will come here again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	if (test_bit(__LC_LOCKED, &lc->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		++lc->locked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		RETURN(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	/* In case there is nothing available and we can not kick out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	 * the LRU element, we have to wait ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	if (!lc_unused_element_available(lc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		__set_bit(__LC_STARVING, &lc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		RETURN(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	/* It was not present in the active set.  We are going to recycle an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	 * unused (or even "free") element, but we won't accumulate more than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	 * max_pending_changes changes.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	if (lc->pending_changes >= lc->max_pending_changes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		RETURN(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	e = lc_prepare_for_change(lc, enr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	BUG_ON(!e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	clear_bit(__LC_STARVING, &lc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	BUG_ON(++e->refcnt != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	lc->used++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	lc->pending_changes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	RETURN(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)  * lc_get - get element by label, maybe change the active set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)  * @lc: the lru cache to operate on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)  * @enr: the label to look up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)  * Finds an element in the cache, increases its usage count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)  * "touches" and returns it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)  * In case the requested number is not present, it needs to be added to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)  * cache. Therefore it is possible that an other element becomes evicted from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)  * the cache. In either case, the user is notified so he is able to e.g. keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)  * a persistent log of the cache changes, and therefore the objects in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)  * Return values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)  *  NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)  *     The cache was marked %LC_STARVING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)  *     or the requested label was not in the active set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)  *     and a changing transaction is still pending (@lc was marked %LC_DIRTY).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)  *     Or no unused or free element could be recycled (@lc will be marked as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)  *     %LC_STARVING, blocking further lc_get() operations).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)  *  pointer to the element with the REQUESTED element number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)  *     In this case, it can be used right away
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)  *  pointer to an UNUSED element with some different element number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)  *          where that different number may also be %LC_FREE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)  *          In this case, the cache is marked %LC_DIRTY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)  *          so lc_try_lock() will no longer succeed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)  *          The returned element pointer is moved to the "to_be_changed" list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)  *          and registered with the new element number on the hash collision chains,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)  *          so it is possible to pick it up from lc_is_used().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)  *          Up to "max_pending_changes" (see lc_create()) can be accumulated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)  *          The user now should do whatever housekeeping is necessary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)  *          typically serialize on lc_try_lock_for_transaction(), then call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)  *          lc_committed(lc) and lc_unlock(), to finish the change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)  * NOTE: The user needs to check the lc_number on EACH use, so he recognizes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)  *       any cache set change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	return __lc_get(lc, enr, LC_GET_MAY_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)  * lc_get_cumulative - like lc_get; also finds to-be-changed elements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)  * @lc: the lru cache to operate on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)  * @enr: the label to look up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)  * Unlike lc_get this also returns the element for @enr, if it is belonging to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)  * a pending transaction, so the return values are like for lc_get(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)  * plus:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)  * pointer to an element already on the "to_be_changed" list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)  * 	In this case, the cache was already marked %LC_DIRTY.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)  * Caller needs to make sure that the pending transaction is completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)  * before proceeding to actually use this element.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct lc_element *lc_get_cumulative(struct lru_cache *lc, unsigned int enr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	return __lc_get(lc, enr, LC_GET_MAY_CHANGE|LC_GET_MAY_USE_UNCOMMITTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)  * lc_try_get - get element by label, if present; do not change the active set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)  * @lc: the lru cache to operate on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)  * @enr: the label to look up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)  * Finds an element in the cache, increases its usage count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)  * "touches" and returns it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)  * Return values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)  *  NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)  *     The cache was marked %LC_STARVING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)  *     or the requested label was not in the active set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)  *  pointer to the element with the REQUESTED element number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)  *     In this case, it can be used right away
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	return __lc_get(lc, enr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)  * lc_committed - tell @lc that pending changes have been recorded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)  * @lc: the lru cache to operate on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)  * User is expected to serialize on explicit lc_try_lock_for_transaction()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)  * before the transaction is started, and later needs to lc_unlock() explicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)  * as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) void lc_committed(struct lru_cache *lc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	struct lc_element *e, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	PARANOIA_ENTRY();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	list_for_each_entry_safe(e, tmp, &lc->to_be_changed, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 		/* count number of changes, not number of transactions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		++lc->changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 		e->lc_number = e->lc_new_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 		list_move(&e->list, &lc->in_use);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	lc->pending_changes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	RETURN();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)  * lc_put - give up refcnt of @e
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)  * @lc: the lru cache to operate on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)  * @e: the element to put
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)  * If refcnt reaches zero, the element is moved to the lru list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)  * and a %LC_STARVING (if set) is cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)  * Returns the new (post-decrement) refcnt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) unsigned int lc_put(struct lru_cache *lc, struct lc_element *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	PARANOIA_ENTRY();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	PARANOIA_LC_ELEMENT(lc, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	BUG_ON(e->refcnt == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	BUG_ON(e->lc_number != e->lc_new_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	if (--e->refcnt == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 		/* move it to the front of LRU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 		list_move(&e->list, &lc->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		lc->used--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 		clear_bit_unlock(__LC_STARVING, &lc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	RETURN(e->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)  * lc_element_by_index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)  * @lc: the lru cache to operate on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)  * @i: the index of the element to return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) struct lc_element *lc_element_by_index(struct lru_cache *lc, unsigned i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	BUG_ON(i >= lc->nr_elements);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	BUG_ON(lc->lc_element[i] == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	BUG_ON(lc->lc_element[i]->lc_index != i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	return lc->lc_element[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)  * lc_index_of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)  * @lc: the lru cache to operate on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)  * @e: the element to query for its index position in lc->element
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) unsigned int lc_index_of(struct lru_cache *lc, struct lc_element *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	PARANOIA_LC_ELEMENT(lc, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	return e->lc_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)  * lc_set - associate index with label
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)  * @lc: the lru cache to operate on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)  * @enr: the label to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)  * @index: the element index to associate label with.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)  * Used to initialize the active set to some previously recorded state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) void lc_set(struct lru_cache *lc, unsigned int enr, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	struct lc_element *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	struct list_head *lh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	if (index < 0 || index >= lc->nr_elements)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	e = lc_element_by_index(lc, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	BUG_ON(e->lc_number != e->lc_new_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	BUG_ON(e->refcnt != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	e->lc_number = e->lc_new_number = enr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	hlist_del_init(&e->colision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	if (enr == LC_FREE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 		lh = &lc->free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 		hlist_add_head(&e->colision, lc_hash_slot(lc, enr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 		lh = &lc->lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	list_move(&e->list, lh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)  * lc_dump - Dump a complete LRU cache to seq in textual form.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)  * @lc: the lru cache to operate on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)  * @seq: the &struct seq_file pointer to seq_printf into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)  * @utext: user supplied additional "heading" or other info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)  * @detail: function pointer the user may provide to dump further details
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)  * of the object the lc_element is embedded in. May be NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)  * Note: a leading space ' ' and trailing newline '\n' is implied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	     void (*detail) (struct seq_file *, struct lc_element *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	unsigned int nr_elements = lc->nr_elements;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	struct lc_element *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	seq_printf(seq, "\tnn: lc_number (new nr) refcnt %s\n ", utext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	for (i = 0; i < nr_elements; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 		e = lc_element_by_index(lc, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 		if (e->lc_number != e->lc_new_number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 			seq_printf(seq, "\t%5d: %6d %8d %6d ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 				i, e->lc_number, e->lc_new_number, e->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 			seq_printf(seq, "\t%5d: %6d %-8s %6d ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 				i, e->lc_number, "-\"-", e->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 		if (detail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 			detail(seq, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 		seq_putc(seq, '\n');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) EXPORT_SYMBOL(lc_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) EXPORT_SYMBOL(lc_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) EXPORT_SYMBOL(lc_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) EXPORT_SYMBOL(lc_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) EXPORT_SYMBOL(lc_del);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) EXPORT_SYMBOL(lc_try_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) EXPORT_SYMBOL(lc_find);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) EXPORT_SYMBOL(lc_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) EXPORT_SYMBOL(lc_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) EXPORT_SYMBOL(lc_committed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) EXPORT_SYMBOL(lc_element_by_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) EXPORT_SYMBOL(lc_index_of);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) EXPORT_SYMBOL(lc_seq_printf_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) EXPORT_SYMBOL(lc_seq_dump_details);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) EXPORT_SYMBOL(lc_try_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) EXPORT_SYMBOL(lc_is_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) EXPORT_SYMBOL(lc_get_cumulative);