^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2006-2007 Silicon Graphics, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "xfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "xfs_mru_cache.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * The MRU Cache data structure consists of a data store, an array of lists and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * a lock to protect its internal state. At initialisation time, the client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * supplies an element lifetime in milliseconds and a group count, as well as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * function pointer to call when deleting elements. A data structure for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * queueing up work in the form of timed callbacks is also included.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * The group count controls how many lists are created, and thereby how finely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * the elements are grouped in time. When reaping occurs, all the elements in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * all the lists whose time has expired are deleted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * To give an example of how this works in practice, consider a client that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * initialises an MRU Cache with a lifetime of ten seconds and a group count of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * five. Five internal lists will be created, each representing a two second
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * period in time. When the first element is added, time zero for the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * structure is initialised to the current time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * All the elements added in the first two seconds are appended to the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * list. Elements added in the third second go into the second list, and so on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * If an element is accessed at any point, it is removed from its list and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * inserted at the head of the current most-recently-used list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * The reaper function will have nothing to do until at least twelve seconds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * have elapsed since the first element was added. The reason for this is that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * if it were called at t=11s, there could be elements in the first list that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * have only been inactive for nine seconds, so it still does nothing. If it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * called anywhere between t=12 and t=14 seconds, it will delete all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * elements that remain in the first list. It's therefore possible for elements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * to remain in the data store even after they've been inactive for up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * (t + t/g) seconds, where t is the inactive element lifetime and g is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * number of groups.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * The above example assumes that the reaper function gets called at least once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * every (t/g) seconds. If it is called less frequently, unused elements will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * accumulate in the reap list until the reaper function is eventually called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * The current implementation uses work queue callbacks to carefully time the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * reaper function calls, so this should happen rarely, if at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * From a design perspective, the primary reason for the choice of a list array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * representing discrete time intervals is that it's only practical to reap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * expired elements in groups of some appreciable size. This automatically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * introduces a granularity to element lifetimes, so there's no point storing an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * individual timeout with each element that specifies a more precise reap time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * The bonus is a saving of sizeof(long) bytes of memory per element stored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * The elements could have been stored in just one list, but an array of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * counters or pointers would need to be maintained to allow them to be divided
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * up into discrete time groups. More critically, the process of touching or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * removing an element would involve walking large portions of the entire list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * which would have a detrimental effect on performance. The additional memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * requirement for the array of list heads is minimal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * When an element is touched or deleted, it needs to be removed from its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * current list. Doubly linked lists are used to make the list maintenance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * portion of these operations O(1). Since reaper timing can be imprecise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * inserts and lookups can occur when there are no free lists available. When
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * this happens, all the elements on the LRU list need to be migrated to the end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * of the reap list. To keep the list maintenance portion of these operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * O(1) also, list tails need to be accessible without walking the entire list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * This is the reason why doubly linked list heads are used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * An MRU Cache is a dynamic data structure that stores its elements in a way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * that allows efficient lookups, but also groups them into discrete time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * intervals based on insertion time. This allows elements to be efficiently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * and automatically reaped after a fixed period of inactivity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * When a client data pointer is stored in the MRU Cache it needs to be added to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * both the data store and to one of the lists. It must also be possible to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * access each of these entries via the other, i.e. to:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * a) Walk a list, removing the corresponding data store entry for each item.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * b) Look up a data store entry, then access its list entry directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * To achieve both of these goals, each entry must contain both a list entry and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * a key, in addition to the user's data pointer. Note that it's not a good
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * idea to have the client embed one of these structures at the top of their own
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * data structure, because inserting the same item more than once would most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * likely result in a loop in one of the lists. That's a sure-fire recipe for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * an infinite loop in the code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct xfs_mru_cache {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct radix_tree_root store; /* Core storage data structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct list_head *lists; /* Array of lists, one per grp. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct list_head reap_list; /* Elements overdue for reaping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) spinlock_t lock; /* Lock to protect this struct. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) unsigned int grp_count; /* Number of discrete groups. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) unsigned int grp_time; /* Time period spanned by grps. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) unsigned int lru_grp; /* Group containing time zero. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) unsigned long time_zero; /* Time first element was added. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) xfs_mru_cache_free_func_t free_func; /* Function pointer for freeing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct delayed_work work; /* Workqueue data for reaping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) unsigned int queued; /* work has been queued */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static struct workqueue_struct *xfs_mru_reap_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * When inserting, destroying or reaping, it's first necessary to update the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * lists relative to a particular time. In the case of destroying, that time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * will be well in the future to ensure that all items are moved to the reap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * list. In all other cases though, the time will be the current time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * This function enters a loop, moving the contents of the LRU list to the reap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * list again and again until either a) the lists are all empty, or b) time zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * has been advanced sufficiently to be within the immediate element lifetime.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * Case a) above is detected by counting how many groups are migrated and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * stopping when they've all been moved. Case b) is detected by monitoring the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * time_zero field, which is updated as each group is migrated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * The return value is the earliest time that more migration could be needed, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * zero if there's no need to schedule more work because the lists are empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) STATIC unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) _xfs_mru_cache_migrate(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct xfs_mru_cache *mru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) unsigned long now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) unsigned int grp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) unsigned int migrated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct list_head *lru_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* Nothing to do if the data store is empty. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (!mru->time_zero)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* While time zero is older than the time spanned by all the lists. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) while (mru->time_zero <= now - mru->grp_count * mru->grp_time) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * If the LRU list isn't empty, migrate its elements to the tail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * of the reap list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) lru_list = mru->lists + mru->lru_grp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (!list_empty(lru_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) list_splice_init(lru_list, mru->reap_list.prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * Advance the LRU group number, freeing the old LRU list to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * become the new MRU list; advance time zero accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) mru->lru_grp = (mru->lru_grp + 1) % mru->grp_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) mru->time_zero += mru->grp_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * If reaping is so far behind that all the elements on all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * lists have been migrated to the reap list, it's now empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (++migrated == mru->grp_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) mru->lru_grp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) mru->time_zero = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /* Find the first non-empty list from the LRU end. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) for (grp = 0; grp < mru->grp_count; grp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* Check the grp'th list from the LRU end. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) lru_list = mru->lists + ((mru->lru_grp + grp) % mru->grp_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (!list_empty(lru_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return mru->time_zero +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) (mru->grp_count + grp) * mru->grp_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /* All the lists must be empty. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) mru->lru_grp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) mru->time_zero = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * When inserting or doing a lookup, an element needs to be inserted into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * MRU list. The lists must be migrated first to ensure that they're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * up-to-date, otherwise the new element could be given a shorter lifetime in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * the cache than it should.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) _xfs_mru_cache_list_insert(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct xfs_mru_cache *mru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct xfs_mru_cache_elem *elem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) unsigned int grp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) unsigned long now = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * If the data store is empty, initialise time zero, leave grp set to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * zero and start the work queue timer if necessary. Otherwise, set grp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * to the number of group times that have elapsed since time zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (!_xfs_mru_cache_migrate(mru, now)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) mru->time_zero = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (!mru->queued) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) mru->queued = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) queue_delayed_work(xfs_mru_reap_wq, &mru->work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) mru->grp_count * mru->grp_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) grp = (now - mru->time_zero) / mru->grp_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) grp = (mru->lru_grp + grp) % mru->grp_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /* Insert the element at the tail of the corresponding list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) list_add_tail(&elem->list_node, mru->lists + grp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * When destroying or reaping, all the elements that were migrated to the reap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * list need to be deleted. For each element this involves removing it from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * data store, removing it from the reap list, calling the client's free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * function and deleting the element from the element zone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * We get called holding the mru->lock, which we drop and then reacquire.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * Sparse need special help with this to tell it we know what we are doing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) _xfs_mru_cache_clear_reap_list(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct xfs_mru_cache *mru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) __releases(mru->lock) __acquires(mru->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct xfs_mru_cache_elem *elem, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct list_head tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) INIT_LIST_HEAD(&tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) list_for_each_entry_safe(elem, next, &mru->reap_list, list_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /* Remove the element from the data store. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) radix_tree_delete(&mru->store, elem->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * remove to temp list so it can be freed without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * needing to hold the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) list_move(&elem->list_node, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) spin_unlock(&mru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) list_for_each_entry_safe(elem, next, &tmp, list_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) list_del_init(&elem->list_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) mru->free_func(mru->data, elem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) spin_lock(&mru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * We fire the reap timer every group expiry interval so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * we always have a reaper ready to run. This makes shutdown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * and flushing of the reaper easy to do. Hence we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * keep when the next reap must occur so we can determine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * at each interval whether there is anything we need to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) _xfs_mru_cache_reap(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct xfs_mru_cache *mru =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) container_of(work, struct xfs_mru_cache, work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) unsigned long now, next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) ASSERT(mru && mru->lists);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (!mru || !mru->lists)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) spin_lock(&mru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) next = _xfs_mru_cache_migrate(mru, jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) _xfs_mru_cache_clear_reap_list(mru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) mru->queued = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if ((mru->queued > 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) now = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (next <= now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) next -= now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) queue_delayed_work(xfs_mru_reap_wq, &mru->work, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) spin_unlock(&mru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) xfs_mru_cache_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) xfs_mru_reap_wq = alloc_workqueue("xfs_mru_cache",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) WQ_MEM_RECLAIM|WQ_FREEZABLE, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (!xfs_mru_reap_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) xfs_mru_cache_uninit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) destroy_workqueue(xfs_mru_reap_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * To initialise a struct xfs_mru_cache pointer, call xfs_mru_cache_create()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * with the address of the pointer, a lifetime value in milliseconds, a group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * count and a free function to use when deleting elements. This function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * returns 0 if the initialisation was successful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) xfs_mru_cache_create(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct xfs_mru_cache **mrup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) unsigned int lifetime_ms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) unsigned int grp_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) xfs_mru_cache_free_func_t free_func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct xfs_mru_cache *mru = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) int err = 0, grp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) unsigned int grp_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (mrup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) *mrup = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (!mrup || !grp_count || !lifetime_ms || !free_func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (!(grp_time = msecs_to_jiffies(lifetime_ms) / grp_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (!(mru = kmem_zalloc(sizeof(*mru), 0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /* An extra list is needed to avoid reaping up to a grp_time early. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) mru->grp_count = grp_count + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) mru->lists = kmem_zalloc(mru->grp_count * sizeof(*mru->lists), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (!mru->lists) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) for (grp = 0; grp < mru->grp_count; grp++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) INIT_LIST_HEAD(mru->lists + grp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * We use GFP_KERNEL radix tree preload and do inserts under a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * spinlock so GFP_ATOMIC is appropriate for the radix tree itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) INIT_RADIX_TREE(&mru->store, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) INIT_LIST_HEAD(&mru->reap_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) spin_lock_init(&mru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) INIT_DELAYED_WORK(&mru->work, _xfs_mru_cache_reap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) mru->grp_time = grp_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) mru->free_func = free_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) mru->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) *mrup = mru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (err && mru && mru->lists)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) kmem_free(mru->lists);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (err && mru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) kmem_free(mru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * Call xfs_mru_cache_flush() to flush out all cached entries, calling their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * free functions as they're deleted. When this function returns, the caller is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * guaranteed that all the free functions for all the elements have finished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * executing and the reaper is not running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) xfs_mru_cache_flush(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct xfs_mru_cache *mru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (!mru || !mru->lists)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) spin_lock(&mru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (mru->queued) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) spin_unlock(&mru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) cancel_delayed_work_sync(&mru->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) spin_lock(&mru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) _xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) _xfs_mru_cache_clear_reap_list(mru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) spin_unlock(&mru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) xfs_mru_cache_destroy(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct xfs_mru_cache *mru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (!mru || !mru->lists)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) xfs_mru_cache_flush(mru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) kmem_free(mru->lists);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) kmem_free(mru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * To insert an element, call xfs_mru_cache_insert() with the data store, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * element's key and the client data pointer. This function returns 0 on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * success or ENOMEM if memory for the data element couldn't be allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) xfs_mru_cache_insert(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct xfs_mru_cache *mru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) unsigned long key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct xfs_mru_cache_elem *elem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) ASSERT(mru && mru->lists);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (!mru || !mru->lists)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (radix_tree_preload(GFP_NOFS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) INIT_LIST_HEAD(&elem->list_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) elem->key = key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) spin_lock(&mru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) error = radix_tree_insert(&mru->store, key, elem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) radix_tree_preload_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) _xfs_mru_cache_list_insert(mru, elem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) spin_unlock(&mru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * To remove an element without calling the free function, call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * xfs_mru_cache_remove() with the data store and the element's key. On success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * the client data pointer for the removed element is returned, otherwise this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * function will return a NULL pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct xfs_mru_cache_elem *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) xfs_mru_cache_remove(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct xfs_mru_cache *mru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) unsigned long key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct xfs_mru_cache_elem *elem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) ASSERT(mru && mru->lists);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (!mru || !mru->lists)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) spin_lock(&mru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) elem = radix_tree_delete(&mru->store, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (elem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) list_del(&elem->list_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) spin_unlock(&mru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return elem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * To remove and element and call the free function, call xfs_mru_cache_delete()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * with the data store and the element's key.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) xfs_mru_cache_delete(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) struct xfs_mru_cache *mru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) unsigned long key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct xfs_mru_cache_elem *elem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) elem = xfs_mru_cache_remove(mru, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (elem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) mru->free_func(mru->data, elem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * To look up an element using its key, call xfs_mru_cache_lookup() with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * data store and the element's key. If found, the element will be moved to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * head of the MRU list to indicate that it's been touched.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * The internal data structures are protected by a spinlock that is STILL HELD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * when this function returns. Call xfs_mru_cache_done() to release it. Note
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * that it is not safe to call any function that might sleep in the interim.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * The implementation could have used reference counting to avoid this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * restriction, but since most clients simply want to get, set or test a member
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * of the returned data structure, the extra per-element memory isn't warranted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * If the element isn't found, this function returns NULL and the spinlock is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * released. xfs_mru_cache_done() should NOT be called when this occurs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * Because sparse isn't smart enough to know about conditional lock return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * status, we need to help it get it right by annotating the path that does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * not release the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct xfs_mru_cache_elem *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) xfs_mru_cache_lookup(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct xfs_mru_cache *mru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) unsigned long key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct xfs_mru_cache_elem *elem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) ASSERT(mru && mru->lists);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (!mru || !mru->lists)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) spin_lock(&mru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) elem = radix_tree_lookup(&mru->store, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (elem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) list_del(&elem->list_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) _xfs_mru_cache_list_insert(mru, elem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) __release(mru_lock); /* help sparse not be stupid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) spin_unlock(&mru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) return elem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * To release the internal data structure spinlock after having performed an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * xfs_mru_cache_lookup() or an xfs_mru_cache_peek(), call xfs_mru_cache_done()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * with the data store pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) xfs_mru_cache_done(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) struct xfs_mru_cache *mru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) __releases(mru->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) spin_unlock(&mru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }