^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Primary bucket allocation code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 2012 Google, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Allocation in bcache is done in terms of buckets:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * btree pointers - they must match for the pointer to be considered valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * bucket simply by incrementing its gen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * The gens (along with the priorities; it's really the gens are important but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * the code is named as if it's the priorities) are written in an arbitrary list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * of buckets on disk, with a pointer to them in the journal header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * When we invalidate a bucket, we have to write its new gen to disk and wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * for that write to complete before we use it - otherwise after a crash we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * could have pointers that appeared to be good but pointed to data that had
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * been overwritten.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Since the gens and priorities are all stored contiguously on disk, we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * batch this up: We fill up the free_inc list with freshly invalidated buckets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * call prio_write(), and when prio_write() finishes we pull buckets off the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * free_inc list and optionally discard them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * free_inc isn't the only freelist - if it was, we'd often to sleep while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * priorities and gens were being written before we could allocate. c->free is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * smaller freelist, and buckets on that list are always ready to be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * If we've got discards enabled, that happens when a bucket moves from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * free_inc list to the free list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * There is another freelist, because sometimes we have buckets that we know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * have nothing pointing into them - these we can reuse without waiting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * priorities to be rewritten. These come from freed btree nodes and buckets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * that garbage collection discovered no longer had valid keys pointing into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * them (because they were overwritten). That's the unused list - buckets on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * unused list move to the free list, optionally being discarded in the process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * It's also important to ensure that gens don't wrap around - with respect to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * either the oldest gen in the btree or the gen on disk. This is quite
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * difficult to do in practice, but we explicitly guard against it anyways - if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * a bucket is in danger of wrapping around we simply skip invalidating it that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * time around, and we garbage collect or rewrite the priorities sooner than we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * would have otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * bch_bucket_alloc() allocates a single bucket from a specific cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * bch_bucket_alloc_set() allocates one bucket from different caches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * out of a cache set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * free_some_buckets() drives all the processes described above. It's called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * from bch_bucket_alloc() and a few other places that need to make sure free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * buckets are ready.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * invalidate_buckets_(lru|fifo)() find buckets that are available to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * invalidated, and then invalidate them and stick them on the free_inc list -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * in either lru or fifo order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include "bcache.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #include "btree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #include <trace/events/bcache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define MAX_OPEN_BUCKETS 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /* Bucket heap / gen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) uint8_t ret = ++b->gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) void bch_rescale_priorities(struct cache_set *c, int sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct cache *ca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct bucket *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) unsigned long next = c->nbuckets * c->cache->sb.bucket_size / 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) atomic_sub(sectors, &c->rescale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) r = atomic_read(&c->rescale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (r >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) } while (atomic_cmpxchg(&c->rescale, r, r + next) != r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) mutex_lock(&c->bucket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) c->min_prio = USHRT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) ca = c->cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) for_each_bucket(b, ca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (b->prio &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) b->prio != BTREE_PRIO &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) !atomic_read(&b->pin)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) b->prio--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) c->min_prio = min(c->min_prio, b->prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) mutex_unlock(&c->bucket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * Background allocation thread: scans for buckets to be invalidated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * invalidates them, rewrites prios/gens (marking them as invalidated on disk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * then optionally issues discard commands to the newly free buckets, then puts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * them on the various freelists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static inline bool can_inc_bucket_gen(struct bucket *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) BUG_ON(!ca->set->gc_mark_valid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return (!GC_MARK(b) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) !atomic_read(&b->pin) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) can_inc_bucket_gen(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) lockdep_assert_held(&ca->set->bucket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) BUG_ON(GC_MARK(b) && GC_MARK(b) != GC_MARK_RECLAIMABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (GC_SECTORS_USED(b))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) trace_bcache_invalidate(ca, b - ca->buckets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) bch_inc_gen(ca, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) b->prio = INITIAL_PRIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) atomic_inc(&b->pin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) __bch_invalidate_one_bucket(ca, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) fifo_push(&ca->free_inc, b - ca->buckets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * Determines what order we're going to reuse buckets, smallest bucket_prio()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * first: we also take into account the number of sectors of live data in that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * bucket, and in order for that multiply to make sense we have to scale bucket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * Thus, we scale the bucket priorities so that the bucket with the smallest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * prio is worth 1/8th of what INITIAL_PRIO is worth.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #define bucket_prio(b) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define bucket_max_cmp(l, r) (bucket_prio(l) < bucket_prio(r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define bucket_min_cmp(l, r) (bucket_prio(l) > bucket_prio(r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static void invalidate_buckets_lru(struct cache *ca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct bucket *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) ssize_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) ca->heap.used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) for_each_bucket(b, ca) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (!bch_can_invalidate_bucket(ca, b))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (!heap_full(&ca->heap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) heap_add(&ca->heap, b, bucket_max_cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) ca->heap.data[0] = b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) heap_sift(&ca->heap, 0, bucket_max_cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) for (i = ca->heap.used / 2 - 1; i >= 0; --i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) heap_sift(&ca->heap, i, bucket_min_cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) while (!fifo_full(&ca->free_inc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * We don't want to be calling invalidate_buckets()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * multiple times when it can't do anything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) ca->invalidate_needs_gc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) wake_up_gc(ca->set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) bch_invalidate_one_bucket(ca, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static void invalidate_buckets_fifo(struct cache *ca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct bucket *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) size_t checked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) while (!fifo_full(&ca->free_inc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (ca->fifo_last_bucket < ca->sb.first_bucket ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) ca->fifo_last_bucket >= ca->sb.nbuckets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) ca->fifo_last_bucket = ca->sb.first_bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) b = ca->buckets + ca->fifo_last_bucket++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (bch_can_invalidate_bucket(ca, b))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) bch_invalidate_one_bucket(ca, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (++checked >= ca->sb.nbuckets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) ca->invalidate_needs_gc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) wake_up_gc(ca->set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static void invalidate_buckets_random(struct cache *ca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct bucket *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) size_t checked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) while (!fifo_full(&ca->free_inc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) size_t n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) get_random_bytes(&n, sizeof(n));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) n += ca->sb.first_bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) b = ca->buckets + n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (bch_can_invalidate_bucket(ca, b))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) bch_invalidate_one_bucket(ca, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (++checked >= ca->sb.nbuckets / 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) ca->invalidate_needs_gc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) wake_up_gc(ca->set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static void invalidate_buckets(struct cache *ca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) BUG_ON(ca->invalidate_needs_gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) switch (CACHE_REPLACEMENT(&ca->sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) case CACHE_REPLACEMENT_LRU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) invalidate_buckets_lru(ca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) case CACHE_REPLACEMENT_FIFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) invalidate_buckets_fifo(ca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) case CACHE_REPLACEMENT_RANDOM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) invalidate_buckets_random(ca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) #define allocator_wait(ca, cond) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) while (1) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) set_current_state(TASK_INTERRUPTIBLE); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (cond) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) mutex_unlock(&(ca)->set->bucket_lock); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (kthread_should_stop() || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) set_current_state(TASK_RUNNING); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) goto out; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) schedule(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) mutex_lock(&(ca)->set->bucket_lock); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) __set_current_state(TASK_RUNNING); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static int bch_allocator_push(struct cache *ca, long bucket)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /* Prios/gens are actually the most important reserve */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) for (i = 0; i < RESERVE_NR; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (fifo_push(&ca->free[i], bucket))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static int bch_allocator_thread(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct cache *ca = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) mutex_lock(&ca->set->bucket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * First, we pull buckets off of the unused and free_inc lists,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * possibly issue discards to them, then we add the bucket to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * the free list:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) long bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (!fifo_pop(&ca->free_inc, bucket))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (ca->discard) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) mutex_unlock(&ca->set->bucket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) blkdev_issue_discard(ca->bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) bucket_to_sector(ca->set, bucket),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) ca->sb.bucket_size, GFP_KERNEL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) mutex_lock(&ca->set->bucket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) allocator_wait(ca, bch_allocator_push(ca, bucket));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) wake_up(&ca->set->btree_cache_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) wake_up(&ca->set->bucket_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * We've run out of free buckets, we need to find some buckets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * we can invalidate. First, invalidate them in memory and add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * them to the free_inc list:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) retry_invalidate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) allocator_wait(ca, ca->set->gc_mark_valid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) !ca->invalidate_needs_gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) invalidate_buckets(ca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * Now, we write their new gens to disk so we can start writing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * new stuff to them:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (CACHE_SYNC(&ca->sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * This could deadlock if an allocation with a btree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * node locked ever blocked - having the btree node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * locked would block garbage collection, but here we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * waiting on garbage collection before we invalidate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * and free anything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * But this should be safe since the btree code always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * uses btree_check_reserve() before allocating now, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * if it fails it blocks without btree nodes locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (!fifo_full(&ca->free_inc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) goto retry_invalidate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (bch_prio_write(ca, false) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) ca->invalidate_needs_gc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) wake_up_gc(ca->set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) wait_for_kthread_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /* Allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) DEFINE_WAIT(w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct bucket *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) long r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /* No allocation if CACHE_SET_IO_DISABLE bit is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /* fastpath */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) fifo_pop(&ca->free[reserve], r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (!wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) trace_bcache_alloc_fail(ca, reserve);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) prepare_to_wait(&ca->set->bucket_wait, &w,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) mutex_unlock(&ca->set->bucket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) mutex_lock(&ca->set->bucket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) } while (!fifo_pop(&ca->free[RESERVE_NONE], r) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) !fifo_pop(&ca->free[reserve], r));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) finish_wait(&ca->set->bucket_wait, &w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (ca->alloc_thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) wake_up_process(ca->alloc_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) trace_bcache_alloc(ca, reserve);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (expensive_debug_checks(ca->set)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) size_t iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) unsigned int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) for (j = 0; j < RESERVE_NR; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) fifo_for_each(i, &ca->free[j], iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) BUG_ON(i == r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) fifo_for_each(i, &ca->free_inc, iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) BUG_ON(i == r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) b = ca->buckets + r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) BUG_ON(atomic_read(&b->pin) != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (reserve <= RESERVE_PRIO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) SET_GC_MARK(b, GC_MARK_METADATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) SET_GC_MOVE(b, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) b->prio = BTREE_PRIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) SET_GC_MOVE(b, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) b->prio = INITIAL_PRIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (ca->set->avail_nbuckets > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) ca->set->avail_nbuckets--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) void __bch_bucket_free(struct cache *ca, struct bucket *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) SET_GC_MARK(b, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) SET_GC_SECTORS_USED(b, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (ca->set->avail_nbuckets < ca->set->nbuckets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) ca->set->avail_nbuckets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) void bch_bucket_free(struct cache_set *c, struct bkey *k)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) for (i = 0; i < KEY_PTRS(k); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) __bch_bucket_free(PTR_CACHE(c, k, i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) PTR_BUCKET(c, k, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct bkey *k, bool wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) struct cache *ca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) long b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /* No allocation if CACHE_SET_IO_DISABLE bit is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) lockdep_assert_held(&c->bucket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) bkey_init(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) ca = c->cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) b = bch_bucket_alloc(ca, reserve, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (b == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) k->ptr[0] = MAKE_PTR(ca->buckets[b].gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) bucket_to_sector(c, b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) ca->sb.nr_this_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) SET_KEY_PTRS(k, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) bch_bucket_free(c, k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) bkey_put(c, k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct bkey *k, bool wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) mutex_lock(&c->bucket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) ret = __bch_bucket_alloc_set(c, reserve, k, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) mutex_unlock(&c->bucket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /* Sector allocator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) struct open_bucket {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) unsigned int last_write_point;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) unsigned int sectors_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) BKEY_PADDED(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * We keep multiple buckets open for writes, and try to segregate different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * write streams for better cache utilization: first we try to segregate flash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * only volume write streams from cached devices, secondly we look for a bucket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * where the last write to it was sequential with the current write, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * failing that we look for a bucket that was last used by the same task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * The ideas is if you've got multiple tasks pulling data into the cache at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * same time, you'll get better cache utilization if you try to segregate their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * data and preserve locality.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * For example, dirty sectors of flash only volume is not reclaimable, if their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * dirty sectors mixed with dirty sectors of cached device, such buckets will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * be marked as dirty and won't be reclaimed, though the dirty data of cached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * device have been written back to backend device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * And say you've starting Firefox at the same time you're copying a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * bunch of files. Firefox will likely end up being fairly hot and stay in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * cache awhile, but the data you copied might not be; if you wrote all that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * data to the same buckets it'd get invalidated at the same time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * Both of those tasks will be doing fairly random IO so we can't rely on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * detecting sequential IO to segregate their data, but going off of the task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * should be a sane heuristic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) static struct open_bucket *pick_data_bucket(struct cache_set *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) const struct bkey *search,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) unsigned int write_point,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) struct bkey *alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) struct open_bucket *ret, *ret_task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) list_for_each_entry_reverse(ret, &c->data_buckets, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) else if (!bkey_cmp(&ret->key, search))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) else if (ret->last_write_point == write_point)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) ret_task = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) ret = ret_task ?: list_first_entry(&c->data_buckets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) struct open_bucket, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (!ret->sectors_free && KEY_PTRS(alloc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) ret->sectors_free = c->cache->sb.bucket_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) bkey_copy(&ret->key, alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) bkey_init(alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (!ret->sectors_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * Allocates some space in the cache to write to, and k to point to the newly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * end of the newly allocated space).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * sectors were actually allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * If s->writeback is true, will not fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) bool bch_alloc_sectors(struct cache_set *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct bkey *k,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) unsigned int sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) unsigned int write_point,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) unsigned int write_prio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) bool wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) struct open_bucket *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) BKEY_PADDED(key) alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * We might have to allocate a new bucket, which we can't do with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * spinlock held. So if we have to allocate, we drop the lock, allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * and then retry. KEY_PTRS() indicates whether alloc points to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * allocated bucket(s).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) bkey_init(&alloc.key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) spin_lock(&c->data_bucket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) unsigned int watermark = write_prio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) ? RESERVE_MOVINGGC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) : RESERVE_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) spin_unlock(&c->data_bucket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (bch_bucket_alloc_set(c, watermark, &alloc.key, wait))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) spin_lock(&c->data_bucket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * If we had to allocate, we might race and not need to allocate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * second time we call pick_data_bucket(). If we allocated a bucket but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * didn't use it, drop the refcount bch_bucket_alloc_set() took:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (KEY_PTRS(&alloc.key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) bkey_put(c, &alloc.key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) for (i = 0; i < KEY_PTRS(&b->key); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) EBUG_ON(ptr_stale(c, &b->key, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /* Set up the pointer to the space we're allocating: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) for (i = 0; i < KEY_PTRS(&b->key); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) k->ptr[i] = b->key.ptr[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) sectors = min(sectors, b->sectors_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) SET_KEY_SIZE(k, sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) SET_KEY_PTRS(k, KEY_PTRS(&b->key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * Move b to the end of the lru, and keep track of what this bucket was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * last used for:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) list_move_tail(&b->list, &c->data_buckets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) bkey_copy_key(&b->key, k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) b->last_write_point = write_point;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) b->sectors_free -= sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) for (i = 0; i < KEY_PTRS(&b->key); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) atomic_long_add(sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) &PTR_CACHE(c, &b->key, i)->sectors_written);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (b->sectors_free < c->cache->sb.block_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) b->sectors_free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * k takes refcounts on the buckets it points to until it's inserted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * into the btree, but if we're done with this bucket we just transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * get_data_bucket()'s refcount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (b->sectors_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) for (i = 0; i < KEY_PTRS(&b->key); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) spin_unlock(&c->data_bucket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /* Init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) void bch_open_buckets_free(struct cache_set *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) struct open_bucket *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) while (!list_empty(&c->data_buckets)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) b = list_first_entry(&c->data_buckets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) struct open_bucket, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) list_del(&b->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) kfree(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) int bch_open_buckets_alloc(struct cache_set *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) spin_lock_init(&c->data_bucket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) for (i = 0; i < MAX_OPEN_BUCKETS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (!b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) list_add(&b->list, &c->data_buckets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) int bch_cache_allocator_start(struct cache *ca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) struct task_struct *k = kthread_run(bch_allocator_thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) ca, "bcache_allocator");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (IS_ERR(k))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return PTR_ERR(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) ca->alloc_thread = k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }