^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _BCACHE_BTREE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _BCACHE_BTREE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * THE BTREE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * At a high level, bcache's btree is relatively standard b+ tree. All keys and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * pointers are in the leaves; interior nodes only have pointers to the child
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * In the interior nodes, a struct bkey always points to a child btree node, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * the key is the highest key in the child node - except that the highest key in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * an interior node is always MAX_KEY. The size field refers to the size on disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * of the child node - this would allow us to have variable sized btree nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * (handy for keeping the depth of the btree 1 by expanding just the root).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Btree nodes are themselves log structured, but this is hidden fairly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * thoroughly. Btree nodes on disk will in practice have extents that overlap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * (because they were written at different times), but in memory we never have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * overlapping extents - when we read in a btree node from disk, the first thing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * we do is resort all the sets of keys with a mergesort, and in the same pass
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * we check for overlapping extents and adjust them appropriately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * struct btree_op is a central interface to the btree code. It's used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * specifying read vs. write locking, and the embedded closure is used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * waiting on IO or reserve memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * BTREE CACHE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * Btree nodes are cached in memory; traversing the btree might require reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * in btree nodes which is handled mostly transparently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * bch_btree_node_get() looks up a btree node in the cache and reads it in from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * disk if necessary. This function is almost never called directly though - the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * btree() macro is used to get a btree node, call some function on it, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * unlock the node after the function returns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * The root is special cased - it's taken out of the cache's lru (thus pinning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * it in memory), so we can find the root of the btree by just dereferencing a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * pointer instead of looking it up in the cache. This makes locking a bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * tricky, since the root pointer is protected by the lock in the btree node it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * points to - the btree_root() macro handles this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * In various places we must be able to allocate memory for multiple btree nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * in order to make forward progress. To do this we use the btree cache itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * as a reserve; if __get_free_pages() fails, we'll find a node in the btree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * cache we can reuse. We can't allow more than one thread to be doing this at a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * time, so there's a lock, implemented by a pointer to the btree_op closure -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * this allows the btree_root() macro to implicitly release this lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * BTREE IO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * Btree nodes never have to be explicitly read in; bch_btree_node_get() handles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * For writing, we have two btree_write structs embeddded in struct btree - one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * write in flight, and one being set up, and we toggle between them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * Writing is done with a single function - bch_btree_write() really serves two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * different purposes and should be broken up into two different functions. When
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * passing now = false, it merely indicates that the node is now dirty - calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * it ensures that the dirty keys will be written at some point in the future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * When passing now = true, bch_btree_write() causes a write to happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * "immediately" (if there was already a write in flight, it'll cause the write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * to happen as soon as the previous write completes). It returns immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * though - but it takes a refcount on the closure in struct btree_op you passed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * to it, so a closure_sync() later can be used to wait for the write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * This is handy because btree_split() and garbage collection can issue writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * in parallel, reducing the amount of time they have to hold write locks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * When traversing the btree, we may need write locks starting at some level -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * inserting a key into the btree will typically only require a write lock on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * the leaf node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * This is specified with the lock field in struct btree_op; lock = 0 means we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * take write locks at level <= 0, i.e. only leaf nodes. bch_btree_node_get()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * checks this field and returns the node with the appropriate lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * If, after traversing the btree, the insertion code discovers it has to split
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * then it must restart from the root and take new locks - to do this it changes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * the lock field and returns -EINTR, which causes the btree_root() macro to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * Handling cache misses require a different mechanism for upgrading to a write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * lock. We do cache lookups with only a read lock held, but if we get a cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * miss and we wish to insert this data into the cache, we have to insert a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * placeholder key to detect races - otherwise, we could race with a write and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * overwrite the data that was just written to the cache with stale data from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * the backing device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * For this we use a sequence number that write locks and unlocks increment - to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * insert the check key it unlocks the btree node and then takes a write lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * and fails if the sequence number doesn't match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #include "bset.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #include "debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct btree_write {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) atomic_t *journal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* If btree_split() frees a btree node, it writes a new pointer to that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * btree node indicating it was freed; it takes a refcount on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * c->prio_blocked because we can't write the gens until the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * pointer is on disk. This allows btree_write_endio() to release the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * refcount that btree_split() took.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) int prio_blocked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct btree {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /* Hottest entries first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct hlist_node hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /* Key/pointer for this btree node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) BKEY_PADDED(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) unsigned long seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct rw_semaphore lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct cache_set *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct btree *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct mutex write_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) uint16_t written; /* would be nice to kill */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) uint8_t level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct btree_keys keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /* For outstanding btree writes, used as a lock - protects write_idx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct closure io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct semaphore io_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct delayed_work work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct btree_write writes[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define BTREE_FLAG(flag) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static inline bool btree_node_ ## flag(struct btree *b) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) { return test_bit(BTREE_NODE_ ## flag, &b->flags); } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static inline void set_btree_node_ ## flag(struct btree *b) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) { set_bit(BTREE_NODE_ ## flag, &b->flags); }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) enum btree_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) BTREE_NODE_io_error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) BTREE_NODE_dirty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) BTREE_NODE_write_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) BTREE_NODE_journal_flush,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) BTREE_FLAG(io_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) BTREE_FLAG(dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) BTREE_FLAG(write_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) BTREE_FLAG(journal_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static inline struct btree_write *btree_current_write(struct btree *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return b->writes + btree_node_write_idx(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static inline struct btree_write *btree_prev_write(struct btree *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return b->writes + (btree_node_write_idx(b) ^ 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static inline struct bset *btree_bset_first(struct btree *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return b->keys.set->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static inline struct bset *btree_bset_last(struct btree *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return bset_tree_last(&b->keys)->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static inline unsigned int bset_block_offset(struct btree *b, struct bset *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return bset_sector_offset(&b->keys, i) >> b->c->block_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static inline void set_gc_sectors(struct cache_set *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) atomic_set(&c->sectors_to_gc, c->cache->sb.bucket_size * c->nbuckets / 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) void bkey_put(struct cache_set *c, struct bkey *k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /* Looping macros */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #define for_each_cached_btree(b, c, iter) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) for (iter = 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) iter < ARRAY_SIZE((c)->bucket_hash); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) iter++) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) hlist_for_each_entry_rcu((b), (c)->bucket_hash + iter, hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /* Recursing down the btree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct btree_op {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /* for waiting on btree reserve in btree_split() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) wait_queue_entry_t wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /* Btree level at which we start taking write locks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) short lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) unsigned int insert_collision:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct btree_check_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct btree_check_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct btree_check_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct task_struct *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) #define BCH_BTR_CHKTHREAD_MAX 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct btree_check_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct cache_set *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) int total_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) int key_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) spinlock_t idx_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) atomic_t started;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) atomic_t enough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) wait_queue_head_t wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct btree_check_info infos[BCH_BTR_CHKTHREAD_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) memset(op, 0, sizeof(struct btree_op));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) init_wait(&op->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) op->lock = write_lock_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static inline void rw_lock(bool w, struct btree *b, int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) w ? down_write_nested(&b->lock, level + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) : down_read_nested(&b->lock, level + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) b->seq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static inline void rw_unlock(bool w, struct btree *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) b->seq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) (w ? up_write : up_read)(&b->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) void bch_btree_node_read_done(struct btree *b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) void __bch_btree_node_write(struct btree *b, struct closure *parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) void bch_btree_node_write(struct btree *b, struct closure *parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) void bch_btree_set_root(struct btree *b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) int level, bool wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct btree *parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct bkey *k, int level, bool write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct btree *parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct bkey *check_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) int bch_btree_insert(struct cache_set *c, struct keylist *keys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) atomic_t *journal_ref, struct bkey *replace_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) int bch_gc_thread_start(struct cache_set *c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) void bch_initial_gc_finish(struct cache_set *c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) void bch_moving_gc(struct cache_set *c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) int bch_btree_check(struct cache_set *c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static inline void wake_up_gc(struct cache_set *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) wake_up(&c->gc_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static inline void force_wake_up_gc(struct cache_set *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * Garbage collection thread only works when sectors_to_gc < 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * calling wake_up_gc() won't start gc thread if sectors_to_gc is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * not a nagetive value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * Therefore sectors_to_gc is set to -1 here, before waking up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * gc thread by calling wake_up_gc(). Then gc_should_run() will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * give a chance to permit gc thread to run. "Give a chance" means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * before going into gc_should_run(), there is still possibility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * that c->sectors_to_gc being set to other positive value. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * this routine won't 100% make sure gc thread will be woken up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * to run.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) atomic_set(&c->sectors_to_gc, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) wake_up_gc(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * These macros are for recursing down the btree - they handle the details of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * locking and looking up nodes in the cache for you. They're best treated as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * mere syntax when reading code that uses them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * op->lock determines whether we take a read or a write lock at a given depth.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * If you've got a read lock and find that you need a write lock (i.e. you're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * going to have to split), set op->lock and return -EINTR; btree_root() will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * call you again and you'll have the correct lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * btree - recurse down the btree on a specified key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * @fn: function to call, which will be passed the child node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * @key: key to recurse on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * @b: parent btree node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * @op: pointer to struct btree_op
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) #define bcache_btree(fn, key, b, op, ...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) int _r, l = (b)->level - 1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) bool _w = l <= (op)->lock; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) _w, b); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (!IS_ERR(_child)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) rw_unlock(_w, _child); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) } else \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) _r = PTR_ERR(_child); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) _r; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * btree_root - call a function on the root of the btree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * @fn: function to call, which will be passed the child node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * @c: cache set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * @op: pointer to struct btree_op
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) #define bcache_btree_root(fn, c, op, ...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) int _r = -EINTR; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct btree *_b = (c)->root; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) bool _w = insert_lock(op, _b); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) rw_lock(_w, _b, _b->level); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (_b == (c)->root && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) _w == insert_lock(op, _b)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) rw_unlock(_w, _b); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) bch_cannibalize_unlock(c); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (_r == -EINTR) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) schedule(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) } while (_r == -EINTR); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) finish_wait(&(c)->btree_cache_wait, &(op)->wait); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) _r; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) #define MAP_DONE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) #define MAP_CONTINUE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) #define MAP_ALL_NODES 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) #define MAP_LEAF_NODES 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) #define MAP_END_KEY 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) typedef int (btree_map_nodes_fn)(struct btree_op *b_op, struct btree *b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct bkey *from, btree_map_nodes_fn *fn, int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct bkey *from, btree_map_nodes_fn *fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return __bch_btree_map_nodes(op, c, from, fn, MAP_ALL_NODES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static inline int bch_btree_map_leaf_nodes(struct btree_op *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct cache_set *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct bkey *from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) btree_map_nodes_fn *fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) typedef int (btree_map_keys_fn)(struct btree_op *op, struct btree *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct bkey *k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct bkey *from, btree_map_keys_fn *fn, int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct bkey *from, btree_map_keys_fn *fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) typedef bool (keybuf_pred_fn)(struct keybuf *buf, struct bkey *k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) void bch_keybuf_init(struct keybuf *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) struct bkey *end, keybuf_pred_fn *pred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct bkey *end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct keybuf_key *bch_keybuf_next(struct keybuf *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) struct keybuf *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) struct bkey *end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) keybuf_pred_fn *pred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) #endif